From 8465aa958cc4559109f70a675e72d72381b5115b Mon Sep 17 00:00:00 2001 From: vigneshrajsb Date: Sat, 17 Jan 2026 14:19:08 -0800 Subject: [PATCH 1/6] init cleanup --- .env.example | 3 - .github/workflows/deploy.yml | 2 - Dockerfile | 2 - README.md | 17 -- bun.lock | 1 + package.json | 5 +- scripts/generateBlogroll.ts | 126 --------------- scripts/generateJsonSchema.ts | 164 -------------------- scripts/syncDocDates.ts | 156 ------------------- src/components/home/index.tsx | 3 +- src/components/home/latest/index.tsx | 46 ------ src/components/home/latest/latestposts.tsx | 59 ------- src/components/home/latest/types.ts | 26 ---- src/lib/static/blogcontent/blogcontent.json | 18 +-- src/lib/static/blogcontent/blogcontent.ts | 18 +-- src/pages/articles/introduction.mdx | 1 - src/pages/docs/what-is-lifecycle.mdx | 1 - src/pages/index.mdx | 9 +- 18 files changed, 22 insertions(+), 635 deletions(-) delete mode 100755 scripts/generateBlogroll.ts delete mode 100755 scripts/generateJsonSchema.ts delete mode 100644 scripts/syncDocDates.ts delete mode 100644 src/components/home/latest/index.tsx delete mode 100644 src/components/home/latest/latestposts.tsx delete mode 100644 src/components/home/latest/types.ts diff --git a/.env.example b/.env.example index 9011070..2775ed7 100644 --- a/.env.example +++ b/.env.example @@ -1,7 +1,4 @@ -SYNC_LIFECYCLE_DOCS= -GITHUB_TOKEN= UI_BRANCH=main CORE_BRANCH=main -LC_DOCS_PUBLISH= NEXT_PUBLIC_DEV_ENV=local DEV_ENV=local diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index e7375b4..b1f625e 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -39,8 +39,6 @@ jobs: - name: Deploy app run: bun run deploy env: - GITHUB_TOKEN: ${{ secrets.LC_DOCS_PUBLISH }} - SYNC_LIFECYCLE_DOCS: ${{ secrets.LC_DOCS_PUBLISH }} UI_BRANCH: main CORE_BRANCH: main diff --git a/Dockerfile b/Dockerfile index d011690..cd3031c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,8 +19,6 @@ WORKDIR /app # Copy dependency files COPY package*.json bun.lock* ./ RUN bun install -ARG SYNC_LIFECYCLE_DOCS -ENV SYNC_LIFECYCLE_DOCS=${SYNC_LIFECYCLE_DOCS} COPY . . diff --git a/README.md b/README.md index d97be86..c3230a9 100644 --- a/README.md +++ b/README.md @@ -92,23 +92,6 @@ Install the dependencies bun install ``` -Create a PAT with with the permissions below (`"code"` is `"Read and Write content"` ). -Add it to your `.env` file by copying `.env.example` and adding your PAT to the `SYNC_LIFECYCLE_DOCS` and `GITHUB_TOKEN`. - -### PAT: Personal Access Token, create a fine-grained access token with the following permissions - -| rule | access level | -| --- | --- | -| members | `read` | -| metadata | `read` | -| actions | `read/write` | -| contents | `read/write` | -| pull requests | `read/write` | -| workflows | `read/write` | - -\*The following permissiones allow you to update content located in scripts - - Run the development server ```bash diff --git a/bun.lock b/bun.lock index ae82a48..4382d3e 100644 --- a/bun.lock +++ b/bun.lock @@ -1,5 +1,6 @@ { "lockfileVersion": 1, + "configVersion": 0, "workspaces": { "": { "name": "lifecycle-docs", diff --git a/package.json b/package.json index 1f55ba2..a80daa2 100644 --- a/package.json +++ b/package.json @@ -9,11 +9,8 @@ "build:meta": "bun run ./scripts/generateMeta.ts", "build:tags": "bun run ./scripts/generateTagPages.ts", "build:sectiondata": "bun run ./scripts/generateSectionData.ts", - "build:blogroll": "bun run ./scripts/generateBlogroll.ts --debug", - "build:prep": "bun run clean && bun run build:remote:schema && bun run build:tags && bun run build:meta && bun run build:sectiondata && bun run sync:doc:dates && bun run build:blogroll && bun run sync:content", - "sync:doc:dates": "bun run ./scripts/syncDocDates.ts src/pages/articles/**/*.mdx src/pages/docs/*.mdx", + "build:prep": "bun run clean && bun run build:tags && bun run build:meta && bun run build:sectiondata && bun run sync:content", "sync:content": "bun run ./scripts/generateAllContent.ts", - "build:remote:schema": "bun run ./scripts/generateJsonSchema.ts", "clean": "rimraf src/pages/schema src/pages/tags src/lib/data", "dev": "bun run build:prep && next dev -p 3333", "deploy": "bun run build && touch out/.nojekyll", diff --git a/scripts/generateBlogroll.ts b/scripts/generateBlogroll.ts deleted file mode 100755 index 36287b6..0000000 --- a/scripts/generateBlogroll.ts +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { readFileSync, existsSync, mkdirSync, writeFileSync } from "node:fs"; -import { dirname, relative } from "node:path"; -import matter from "gray-matter"; -import { sync } from "fast-glob"; -import { Command } from "commander"; - -export const findMdxFiles = ( - dir: string, - ignorePatterns: string[], -): string[] => { - const globPattern = `${dir}/**/*.mdx`; - return sync(globPattern, { ignore: ignorePatterns }); -}; - -export const extractFrontmatter = ( - filePath: string, - baseDir: string, -): Record => { - const content = readFileSync(filePath, "utf8"); - const { - data: { title = null, description = null, date = null }, - } = matter(content); - - const path = relative(baseDir, filePath).replace(/\.mdx$/, ""); - - return { - title, - description, - date, - path, - }; -}; - -export const ensureDirectoryExists = (dir: string) => { - if (!existsSync(dir)) mkdirSync(dir, { recursive: true }); -}; - -export const organizeFrontmatter = ( - inputDir: string, - outputFilePath: string, - ignore: string[], - debug: boolean, -) => { - if (debug) console.log(`Scanning directory: ${inputDir}`); - const mdxFiles = findMdxFiles(inputDir, ignore).filter( - (filePath) => !filePath.includes("/tags/"), - ); - - if (debug) - console.log( - `Found ${mdxFiles.length} MDX files (after applying ignore and exclude patterns)`, - ); - - const frontmatterData = mdxFiles.map((file) => - extractFrontmatter(file, inputDir), - ); - - if (debug) - console.log(`Extracted frontmatter for ${frontmatterData.length} files`); - - const outputDir = dirname(outputFilePath); - ensureDirectoryExists(outputDir); - - const tsObject = `export const blogRoll = ${JSON.stringify(frontmatterData, null, 2)};\n`; - - writeFileSync(outputFilePath, tsObject, "utf8"); - if (debug) console.log(`Frontmatter data saved to ${outputFilePath}`); -}; - -export const actionGenerateBlogroll = (options) => { - const { input, output, ignore, debug } = options; - - if (!input || !output) { - console.error("Error: Both --input and --output options are required."); - process.exit(1); - } - - try { - organizeFrontmatter(input, output, ignore, debug); - } catch (error) { - console.error("Error:", error.message); - process.exit(1); - } -}; - -const program = new Command(); - -program - .name("extract-frontmatter") - .description("Extract frontmatter from MDX files and save it to a JSON file.") - .option( - "-i, --input ", - "Input directory to scan for MDX files", - "src/pages", - ) - .option( - "-o, --output ", - "Output file path (JSON)", - "src/lib/data/blogroll/blogroll.ts", - ) - .option( - "--ignore ", - "Comma-separated list of glob patterns to ignore", - (val) => val.split(","), - ["**/index.mdx"], - ) - .option("-d, --debug", "Enable debug logging", false) - .action(actionGenerateBlogroll); - -program.parse(process.argv); diff --git a/scripts/generateJsonSchema.ts b/scripts/generateJsonSchema.ts deleted file mode 100755 index 831e01f..0000000 --- a/scripts/generateJsonSchema.ts +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { mkdir, writeFile } from "node:fs/promises"; -import { join, dirname } from "node:path"; -import { Octokit } from "@octokit/core"; -import { Command } from "commander"; -import yaml from "js-yaml"; -import dotenv from "dotenv"; - -dotenv.config(); - -const auth = process.env.SYNC_LIFECYCLE_DOCS; - -if (!auth) { - console.error("SYNC_LIFECYCLE_DOCS not found in .env file"); - process.exit(1); -} - -const octokit = new Octokit({ auth }); - -export const fetchFileFromRepo = async ({ - owner = "goodrxoss", - repo = "lifecycle", - path = "docs/schema/yaml/1.0.0.yaml", - branch = "main", - debug = false, -}: FetchFileOptions): Promise => { - try { - const sanitizedPath = path.startsWith("/") ? path.slice(1) : path; - - if (debug) { - console.log(`Fetching file: ${owner}/${repo}/${sanitizedPath}@${branch}`); - } - - const response = await octokit.request( - "GET /repos/{owner}/{repo}/contents/{path}", - { - owner, - repo, - path: sanitizedPath, - ref: branch, - }, - ); - - if (!("content" in response.data)) { - throw new Error("File content not found in response"); - } - - const content = Buffer.from(response.data.content, "base64").toString( - "utf-8", - ); - if (debug) console.log("File fetched successfully"); - return content; - } catch (error) { - console.error("Error fetching file:", error); - throw error; - } -}; - -export const convertYamlToJson = >( - yamlContent: string, -): T => { - try { - return yaml.load(yamlContent) as T; - } catch (error) { - console.error("Failed to parse YAML:", error); - throw error; - } -}; - -export const syncYamlFile = async (options: SyncOptions) => { - const { - owner = "goodrxoss", - repo = "lifecycle", - // docs/schema/yaml/2.3.0.yaml - path = "docs/schema/yaml/1.0.0.yaml", - dest = "src/lib/data/lifecycle-schema", - name = "lifecycle", - debug = false, - branch = "main", - } = options; - - try { - const yamlContent = await fetchFileFromRepo({ - owner, - repo, - path, - branch, - debug, - }); - - const yamlFileName = `${name}.yaml.ts`; - const jsonFileName = `${name}.json.ts`; - const yamlFilePath = join(dest, yamlFileName); - const jsonFilePath = join(dest, jsonFileName); - - const dir = dirname(yamlFilePath); - await mkdir(dir, { recursive: true }); - - const yamlExport = `export const yamlContent = \`${yamlContent.replace(/`/g, "\\`")}\`;\n`; - await writeFile(yamlFilePath, yamlExport, "utf-8"); - console.log(`YAML exported as TypeScript string at ${yamlFilePath}`); - const parsedJson = convertYamlToJson(yamlContent); - const jsonExport = `export const jsonContent = ${JSON.stringify(parsedJson, null, 2)};\n`; - await writeFile(jsonFilePath, jsonExport, "utf-8"); - console.log(`JSON exported as TypeScript object at ${jsonFilePath}`); - } catch (error) { - console.error("Error processing YAML file:", error); - } -}; - -const program = new Command(); - -program - .option("-r, --repo ", "Source repository name", "lifecycle") - .option("-p, --path ", "Path to YAML file in source repo") - .option( - "-d, --dest ", - "Path to store YAML and JSON files locally", - "src/lib/data/lifecycle-schema", - ) - .option("-n, --name ", "Base name for output files", "lifecycle") // New option for output file naming - .option("-o, --owner ", "GitHub owner or organization", "goodrxoss") - .option("-b, --branch ", "GitHub branch to fetch from", "main") - .option("--debug", "Enable debug logging", false); - -program.parse(process.argv); -const options = program.opts(); - -(async () => { - await syncYamlFile(options as SyncOptions); -})(); - -export type SyncOptions = { - owner?: string; - repo?: string; - path?: string; - dest?: string; - name?: string; - debug?: boolean; - branch?: string; -}; - -export type FetchFileOptions = { - owner?: string; - repo?: string; - path?: string; - branch?: string; - debug?: boolean; -}; diff --git a/scripts/syncDocDates.ts b/scripts/syncDocDates.ts deleted file mode 100644 index ee66669..0000000 --- a/scripts/syncDocDates.ts +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Command } from "commander"; -import { Octokit } from "@octokit/rest"; -import fg from "fast-glob"; -import fs from "node:fs/promises"; -import path from "node:path"; -import matter from "gray-matter"; -import pLimit from "p-limit"; - -import dotenv from "dotenv"; - -dotenv.config(); - -const auth = process.env.SYNC_LIFECYCLE_DOCS; -const octokit = new Octokit({ auth }); - -export const formatDate = (dateString: string): string => { - const date = new Date(dateString); - return date.toISOString().split("T")[0]; -}; - -export const updateFrontmatter = async ( - filePath: string, - commitDate: string, -): Promise => { - try { - const fileContent = await fs.readFile(filePath, "utf-8"); - const { data: frontmatter, content } = matter(fileContent); - if (frontmatter.date) return; - frontmatter.date = commitDate; - const updatedContent = matter.stringify(content, frontmatter); - await fs.writeFile(filePath, updatedContent, "utf-8"); - console.log(`Updated date for ${filePath}: ${commitDate}`); - } catch (error) { - console.error(`Error updating frontmatter for ${filePath}:`, error); - } -}; - -export const getLatestCommitsForFiles = async ({ - files, - owner = "goodrxoss", - repo = "lifecycle-docs", -}): Promise => { - const limit = pLimit(10); - try { - const commitRequests = files.map((file) => - limit(async () => { - const relativePath = path.relative(process.cwd(), file); - const resp = await octokit.repos.listCommits({ - owner, - repo, - path: relativePath, - per_page: 1, - }); - const commits = resp?.data; - const lastCommit = commits?.[0]?.commit; - const rawCommitDate = - lastCommit?.author?.date || new Date().toISOString(); - const commitDate = formatDate(rawCommitDate); - const commitMessage = lastCommit?.message || "No commit message"; - - await updateFrontmatter(file, commitDate); - - return { - fileName: path.basename(file), - filePath: relativePath, - commitDate, - commitMessage, - }; - }), - ); - - const fileCommits = await Promise.all(commitRequests); - return fileCommits.sort( - (a, b) => - new Date(b.commitDate).getTime() - new Date(a.commitDate).getTime(), - ); - } catch (error) { - console.info("Error fetching commits:", error); - return []; - } -}; - -export const syncDocDatesAction = async ({ - files, - owner = "goodrxoss", - repo = "lifecycle-docs", -}: SyncDocDatesActionOptions): Promise => { - const resolvedFiles = await fg(files); - if (resolvedFiles.length === 0) { - console.error(`No matching files found for patterns: ${files.join(", ")}`); - return; - } - console.log(`Found ${resolvedFiles.length} files. Processing...`); - const latestDocs = await getLatestCommitsForFiles({ - owner, - repo, - files: resolvedFiles, - }); - console.log("Latest Docs:"); - latestDocs.forEach((doc) => - console.log(`- ${doc.fileName}: ${doc.commitDate} (${doc.commitMessage})`), - ); -}; - -const program = new Command(); - -program - .description("Update .mdx frontmatter with the latest Git commit date.") - .option("-o, --owner ", "GitHub repository owner", "goodrxoss") - .option("-r, --repo ", "GitHub repository name", "lifecycle-docs") - .arguments("") - .action((files: string[], options: ActionOptions) => - syncDocDatesAction({ ...options, files }), - ); - -program.parseAsync(process.argv); - -export type FileCommit = { - fileName: string; - filePath: string; - commitDate: string; - commitMessage: string; -}; - -export type GetLatestCommitsForFilesOptions = { - owner?: string; - repo?: string; - filePath: string; -}; - -export type ActionOptions = { - owner?: string; - repo?: string; -}; - -export type SyncDocDatesActionOptions = { - owner?: string; - repo?: string; - files: string[]; -}; diff --git a/src/components/home/index.tsx b/src/components/home/index.tsx index 4e37fff..74b5dbd 100644 --- a/src/components/home/index.tsx +++ b/src/components/home/index.tsx @@ -16,6 +16,5 @@ import { Bg } from "@/components/home/bg"; import { ServicesFlow as Services, Static } from "@/components/home/flows"; -import LatestPosts from "@/components/home/latest"; export { Main } from "@/components/home/main"; -export { Bg, Services, Static, LatestPosts }; +export { Bg, Services, Static }; diff --git a/src/components/home/latest/index.tsx b/src/components/home/latest/index.tsx deleted file mode 100644 index a4e582e..0000000 --- a/src/components/home/latest/index.tsx +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -"use client"; - -import dynamic from "next/dynamic"; -import { Loader } from "@/components"; -import { LatestPosts } from "@/components/home/latest/latestposts"; -import { Separator } from "@/components/ui/separator"; -import { blogRoll } from "@/lib/data/blogroll/blogroll"; -import { Post } from "@/components/home/latest/types"; - -export const LatestPostsSection = () => { - return ( -
- -
-

- Lifecycle is continually improving. The team is working to keep our - current product working optimally for our customers while implementing - new features consistently. View Lifecycle's latest articles and - documentation! -

-
- -
- ); -}; - -export default dynamic(() => Promise.resolve(LatestPostsSection), { - loading: () => , - ssr: false, -}); diff --git a/src/components/home/latest/latestposts.tsx b/src/components/home/latest/latestposts.tsx deleted file mode 100644 index 40f918c..0000000 --- a/src/components/home/latest/latestposts.tsx +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import Link from "next/link"; -import { - Card, - CardContent, - CardDescription, - CardHeader, - CardTitle, -} from "@/components/ui/card"; -import { LatestPostsProps } from "@/components/home/latest/types"; - -export const LatestPosts = ({ blogRoll }: LatestPostsProps) => { - const latestPosts = blogRoll - .sort((a, b) => +new Date(b.date) - +new Date(a.date)) - .slice(0, 9); - - return ( -
- {latestPosts.map(({ title, description, path }) => ( - - - {title} - - - - {description} - - - Read more → - - - - ))} -
- ); -}; - -export default LatestPosts; diff --git a/src/components/home/latest/types.ts b/src/components/home/latest/types.ts deleted file mode 100644 index 9c8e0e5..0000000 --- a/src/components/home/latest/types.ts +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export type Post = { - title: string; - description: string; - path: string; - date: string; -}; - -export type LatestPostsProps = { - blogRoll: Post[]; -}; diff --git a/src/lib/static/blogcontent/blogcontent.json b/src/lib/static/blogcontent/blogcontent.json index f88a8a1..1f37d98 100644 --- a/src/lib/static/blogcontent/blogcontent.json +++ b/src/lib/static/blogcontent/blogcontent.json @@ -2,35 +2,35 @@ { "title": "Introducing Lifecycle", "description": null, - "date": "2025-05-23", + "date": null, "path": "articles/introduction", - "body": "We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like \"Is anyone using staging?\" \"Staging is broken again,\" and \"Who just overwrote my changes?\" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn't scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we're thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and \"aha\" moments, and we can't wait to see how you'll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who'll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/TEtKgCs8T8) to connect!!" + "body": "We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like \"Is anyone using staging?\" \"Staging is broken again,\" and \"Who just overwrote my changes?\" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn't scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we're thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and \"aha\" moments, and we can't wait to see how you'll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who'll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/M5fhHJuEX8) to connect!!" }, { "title": "What is Lifecycle?", "description": "Lifecycle is your effortless way to test and create ephemeral environments", - "date": "2025-03-12", + "date": null, "path": "docs/what-is-lifecycle", "body": "Lifecycle is an **ephemeral** _(/əˈfem(ə)rəl/, lasting for a very short time)_ environment orchestrator that transforms your GitHub pull requests into fully functional development environments. It enables developers to test, validate, and collaborate on features without the hassle of managing infrastructure manually.\n\n> With **Lifecycle**, every pull request gets its own connected playground—ensuring that changes can be previewed, integrated, and verified before merging into its main branch.\n\n## A Developer’s Story\n\nImagine working in an organization that develops multiple services. Managing and testing changes across these services can be challenging, especially when multiple developers are working on different features simultaneously.\n\nMeet **Nick Holiday 👨‍💻**, an engineer who needs to update a database schema and modify the corresponding API in a backend service. Additionally, his change requires frontend service updates to display the new data correctly.\n\n### Traditional Workflow Challenges\n\n- **Shared environments** – Nick deploys his backend service changes to a shared dev or staging environment, but another engineer is testing unrelated changes at the same time.\n- **Conflicting updates** – The frontend engineers working on the UI might face issues if their code depends on a stable backend service that keeps changing.\n- **Environment management** – Setting up and maintaining an isolated environment for testing requires significant effort.\n\n### Enter Lifecycle\n\nWith Lifecycle, as soon as Nick opens a pull request, the system automatically:\n\n1. 🏗️ **Creates an isolated development environment** – This environment includes Nick’s updated backend service along with the necessary frontend services.\n2. 🚀 **Deploys the application** – Everything is set up exactly as it would be in production, ensuring a reliable test scenario.\n3. 🔗 **Generates a shareable URL** – Nick and his teammates can interact with the new features without setting up anything locally.\n4. 🧹 **Cleans up automatically** – Once the PR is merged or closed, Lifecycle removes the environment, keeping things tidy.\n\n## Watch a Quick Demo\n\n\n\n## How It Works\n\n\n\n## Why Use Lifecycle?\n\n- **Faster Feedback Loops** - Get instant previews of your changes without waiting for staging deployments.\n- **Isolation** - Each PR runs in its own sandbox, preventing conflicts.\n- **Seamless Collaboration** - Share URLs with stakeholders, designers, or QA engineers.\n- **Automatic Cleanup** - No more stale test environments; Lifecycle manages cleanup for you.\n- **Works with Your Stack** - Supports containerized applications and integrates with Kubernetes." }, { "title": "Auto Deploy & Labels", "description": "How to setup auto deploy for pull requests and control envionment with labels", - "date": "2025-01-29", + "date": null, "path": "docs/features/auto-deployment", "body": "## Auto-Deploy Configuration\n\nTo enable **automatic deployment** when a PR is created, set the `autoDeploy` attribute in your repository's `lifecycle.yaml` file:\n\n```yaml {2} filename=\"lifecycle.yaml\"\nenvironment:\n autoDeploy: true\n defaultServices:\n```\n\n- Lifecycle will **automatically create** the environment as soon as a PR is opened.\n- A `lifecycle-deploy!` label will be added to the PR to indicate that the environment has been deployed.\n\n---\n\n## Managing Deployments with Labels\n\nIf **auto-deploy is not enabled**, you can manually control the environment using PR labels.\n\n### Deploy an Environment\n\nTo create an ephemeral environment for a PR, **add** the `lifecycle-deploy!` label.\n\n### Tear Down an Environment\n\nTo **delete** an active environment, use either of these labels:\n\n- **Remove** `lifecycle-deploy!`\n- **Add** `lifecycle-disabled!`\n\n---\n\n## Automatic Cleanup on PR Closure\n\nWhen a PR is **closed**, Lifecycle will:\n\n1. **Tear down** the active environment.\n2. **Remove** the `lifecycle-deploy!` label from the PR.\n\nThis ensures that unused environments do not persist after the PR lifecycle is complete.\n\n---\n\n## Summary\n\n| Feature | Behavior |\n| ---------------------------- | ----------------------------------------------- |\n| `autoDeploy: true` in config | PR environments are **automatically** deployed. |\n| `lifecycle-deploy!` | **Manually deploy** an environment. |\n| Remove `lifecycle-deploy!` | **Tear down** the environment. |\n| Add `lifecycle-disabled!` | **Tear down** the environment manually. |\n| PR closed | **Environment is deleted automatically**. |\n\nUsing these configurations and labels, teams can efficiently manage **ephemeral environments** in their development workflow." }, { "title": "Webhooks", "description": null, - "date": "2025-02-16", + "date": null, "path": "docs/features/webhooks", "body": "Lifecycle can invoke **third-party services** when a build state changes.\n\nWebhooks allow users to automate external processes such as running tests, performing cleanup tasks, or sending notifications based on environment build states.\n\n## Supported Types\n\nLifecycle supports three types of webhooks:\n\n1. **`codefresh`** - Trigger Codefresh pipelines\n2. **`docker`** - Execute Docker images as Kubernetes jobs\n3. **`command`** - Run shell commands in a specified Docker image\n\n## Common Use Cases\n\n- When a build status is `deployed`, trigger **end-to-end tests**.\n- When a build status is `error`, trigger **infrastructure cleanup** or alert the team.\n- Run **security scans** on built containers.\n- Execute **database migrations** after deployment.\n- Send **notifications** to Slack, Discord, or other communication channels.\n- Perform **smoke tests** using custom test containers.\n\n## Configuration\n\nWebhooks are defined in the `lifecycle.yaml` under the `environment.webhooks` section.\n\nBelow is an example configuration for triggering end-to-end tests when the `deployed` state is reached.\n\n## Examples\n\n### `codefresh`\n\nThe `codefresh` type triggers existing Codefresh pipelines when build states change.\n\n```yaml\n# Trigger End-to-End Tests on Deployment\nenvironment:\n # ...\n defaultServices:\n - name: \"frontend\"\n optionalServices:\n - name: \"backend\"\n repository: \"lifecycle/backend\"\n branch: \"main\"\n webhooks:\n - state: deployed\n type: codefresh\n name: \"End to End Tests\"\n pipelineId: 64598362453cc650c0c9cd4d\n trigger: tests\n env:\n branch: \"{{frontend_branchName}}\"\n TEST_URL: \"https://{{frontend_publicUrl}}\"\n # ...\n```\n\n- **`state: deployed`** → Triggers the webhook when the build reaches the `deployed` state.\n- **`type: codefresh`** → Specifies that this webhook triggers a **Codefresh pipeline**.\n- **`name`** → A human-readable name for the webhook.\n- **`pipelineId`** → The unique Codefresh pipeline ID.\n- **`trigger`** → Codefresh pipeline's trigger to execute.\n- **`env`** → Passes relevant environment variables (e.g., `branch` and `TEST_URL`).\n\n---\n\n```yaml\n# Trigger Cleanup on Build Error\nenvironment:\n # ...\n webhooks:\n - state: error\n type: codefresh\n name: \"Cleanup Failed Deployment\"\n pipelineId: 74283905723ff650c0d9ab7e\n trigger: cleanup\n env:\n branch: \"{{frontend_branchName}}\"\n CLEANUP_TARGET: \"frontend\"\n # ...\n```\n\n- **`state: error`** → Triggers the webhook when the build fails.\n- **`type: codefresh`** → Invokes a Codefresh cleanup pipeline.\n- **`trigger: cleanup`** → Codefresh pipeline's trigger to execute.\n- **`env`** → Includes necessary variables, such as `branch` and `CLEANUP_TARGET`.\n\n### `docker`\n\nThe `docker` type allows you to execute any Docker image as a Kubernetes job when build states change.\n\n\n Docker webhooks run as Kubernetes jobs in the same namespace as your build.\n They have a default timeout of 30 minutes and resource limits of 200m CPU and\n 1Gi memory.\n\n\n```yaml\n# Run E2E Tests in Custom Container\nenvironment:\n # ...\n webhooks:\n - name: \"E2E Test Suite\"\n description: \"Execute comprehensive E2E tests\"\n state: deployed\n type: docker\n docker:\n image: \"myorg/e2e-tests:latest\"\n command: [\"npm\", \"run\", \"e2e\"]\n timeout: 1200 # 1 hour (optional, default: 1800 seconds)\n env:\n BASE_URL: \"https://{{frontend_publicUrl}}\"\n ENVIRONMENT: \"ephemeral\"\n```\n\n- **`docker.image`** → Docker image to execute (required)\n- **`docker.command`** → Override the default entrypoint (optional)\n- **`docker.args`** → Arguments to pass to the command (optional)\n- **`docker.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n### `command`\n\nThe `command` type is a simplified version of Docker webhooks, ideal for running shell scripts or simple commands.\n\n```yaml\n# Slack Notification Example\nenvironment:\n # ...\n webhooks:\n - name: \"Deployment Notification\"\n description: \"Notify team of successful deployment\"\n state: deployed\n type: command\n command:\n image: \"alpine:latest\"\n script: |\n apk add --no-cache curl\n curl -X POST \"$WEBHOOK_URL\" \\\n -H \"Content-Type: application/json\" \\\n -d \"{\\\"text\\\":\\\"🚀 Deployed $SERVICE_NAME to $DEPLOY_URL\\\"}\"\n timeout: 300 # 5 minutes (optional)\n env:\n WEBHOOK_URL: \"https://hooks.slack.com/services/XXX/YYY/ZZZ\"\n SERVICE_NAME: \"{{frontend_internalHostname}}\"\n DEPLOY_URL: \"https://{{frontend_publicUrl}}\"\n```\n\n\n Make sure to replace placeholder values like webhook URLs and pipeline IDs\n with your actual values.\n\n\n- **`command.image`** → Docker image to run the script in (required)\n- **`command.script`** → Shell script to execute (required)\n- **`command.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n## Trigger states\n\nWebhooks can be triggered on the following build states:\n\n- **`deployed`** → Service successfully deployed and running\n- **`error`** → Build or deployment failed\n- **`torn_down`** → Environment has been destroyed\n\n## Note\n\n- All webhooks for the same state are executed **serially** in the order defined.\n- Webhook failures do not affect the build status.\n- Webhook invocations can be viewed at `/builds/[uuid]/webhooks` page(latest 20 invocations). Use the API to view all invocations.\n- `docker` and `command` type's logs are not streamed when the job is still in progress and are available only after the job completes." }, { "title": "Native Helm Deployment", "description": "Deploy services using Helm directly in Kubernetes without external CI/CD dependencies", - "date": "2025-01-29", + "date": null, "path": "docs/features/native-helm-deployment", "body": "This feature is still in alpha and might change with breaking changes.\n\n\n**Native Helm** is an alternative deployment method that runs Helm deployments directly within Kubernetes jobs, eliminating the need for external CI/CD systems. This provides a more self-contained and portable deployment solution.\n\n\n Native Helm deployment is an opt-in feature that can be enabled globally or\n per-service.\n\n\n## Overview\n\nWhen enabled, Native Helm:\n\n- Creates Kubernetes jobs to execute Helm deployments\n- Runs in ephemeral namespaces with proper RBAC\n- Provides real-time deployment logs via WebSocket\n- Handles concurrent deployments automatically\n- Supports all standard Helm chart types\n\n## Quickstart\n\nWant to try native Helm deployment? Here's the fastest way to get started:\n\n```yaml filename=\"lifecycle.yaml\" {5}\nservices:\n - name: my-api\n defaultUUID: \"dev-0\"\n helm:\n deploymentMethod: \"native\" # That's it!\n chart:\n name: \"local\"\n valueFiles:\n - \"./helm/values.yaml\"\n```\n\nThis configuration:\n\n1. Enables native Helm for the `my-api` service\n2. Uses a local Helm chart from your repository\n3. Applies values from `./helm/values.yaml`\n4. Runs deployment as a Kubernetes job\n\n\n To enable native Helm for all services at once, see [Global\n Configuration](#enabling-native-helm).\n\n\n## Configuration\n\n### Enabling Native Helm\n\nThere are two ways to enable native Helm deployment:\n\n#### Per Service Configuration\n\nEnable native Helm for individual services:\n\n```yaml {4} filename=\"lifecycle.yaml\"\nservices:\n - name: my-service\n helm:\n deploymentMethod: \"native\" # Enable for this service only\n chart:\n name: my-chart\n```\n\n#### Global Configuration\n\nEnable native Helm for all services:\n\n```yaml {3} filename=\"lifecycle.yaml\"\nhelm:\n nativeHelm:\n enabled: true # Enable for all services\n```\n\n### Configuration Precedence\n\nLifecycle uses a hierarchical configuration system with three levels of precedence:\n\n1. **helmDefaults** - Base defaults for all deployments (database: `global_config` table)\n2. **Chart-specific config** - Per-chart defaults (database: `global_config` table)\n3. **Service YAML config** - Service-specific overrides (highest priority)\n\n\n Service-level configuration always takes precedence over global defaults.\n\n\n### Global Configuration (Database)\n\nGlobal configurations are stored in the `global_config` table in the database. Each configuration is stored as a row with:\n\n- **key**: The configuration name (e.g., 'helmDefaults', 'postgresql', 'redis')\n- **config**: JSON object containing the configuration\n\n#### helmDefaults Configuration\n\nStored in database with key `helmDefaults`:\n\n```json\n{\n \"nativeHelm\": {\n \"enabled\": true,\n \"defaultArgs\": \"--wait --timeout 30m\",\n \"defaultHelmVersion\": \"3.12.0\"\n }\n}\n```\n\n**Field Descriptions**:\n\n- `enabled`: When `true`, enables native Helm deployment for all services unless they explicitly set `deploymentMethod: \"ci\"`\n- `defaultArgs`: Arguments automatically appended to every Helm command (appears before service-specific args)\n- `defaultHelmVersion`: The Helm version to use when not specified at the service or chart level\n\n#### Chart-specific Configuration\n\nExample: PostgreSQL configuration stored with key `postgresql`:\n\n```json\n{\n \"version\": \"3.13.0\",\n \"args\": \"--force --timeout 60m0s --wait\",\n \"chart\": {\n \"name\": \"postgresql\",\n \"repoUrl\": \"https://charts.bitnami.com/bitnami\",\n \"version\": \"12.9.0\",\n \"values\": [\"auth.username=postgres_user\", \"auth.database=postgres_db\"]\n }\n}\n```\n\n\n These global configurations are managed by administrators and stored in the\n database. They provide consistent defaults across all environments and can be\n overridden at the service level.\n\n\n## Usage Examples\n\n### Quick Experiment: Deploy Jenkins!\n\nWant to see native Helm in action? Let's deploy everyone's favorite CI/CD tool - Jenkins! This example shows how easy it is to deploy popular applications using native Helm.\n\n```yaml filename=\"lifecycle.yaml\"\nenvironment:\n defaultServices:\n - name: \"my-app\"\n - name: \"jenkins\" # Add Jenkins to your default services\n\nservices:\n - name: \"jenkins\"\n helm:\n chart:\n name: \"jenkins\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n version: \"13.6.8\"\n values:\n - \"service.type=ClusterIP\"\n - \"ingress.enabled=true\"\n - \"ingress.hostname={{jenkins_publicUrl}}\"\n - \"ingress.ingressClassName=nginx\"\n```\n\n\n 🎉 That's it! With just a few lines of configuration, you'll have Jenkins\n running in your Kubernetes cluster.\n\n\nTo access your Jenkins instance:\n\n1. Check the deployment status in your PR comment\n2. Click the **Deploy Logs** link to monitor the deployment\n3. Once deployed, Jenkins will be available at the internal hostname\n\n\n For more Jenkins configuration options and values, check out the [Bitnami\n Jenkins chart\n documentation](https://github.com/bitnami/charts/tree/main/bitnami/jenkins).\n This same pattern works for any Bitnami chart (PostgreSQL, Redis, MongoDB) or\n any other public Helm chart!\n\n\n### Basic Service Deployment\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: web-api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: web-app\n version: \"1.2.0\"\n```\n\n### PostgreSQL with Overrides\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: database\n helm:\n deploymentMethod: \"native\"\n version: \"3.14.0\" # Override Helm version\n args: \"--atomic\" # Override deployment args\n chart:\n name: postgresql\n values: # Additional values merged with defaults\n - \"persistence.size=20Gi\"\n - \"replicaCount=2\"\n```\n\n### Custom Environment Variables\n\nLifecycle supports flexible environment variable formatting through the `envMapping` configuration. This feature allows you to control how environment variables from your service configuration are passed to your Helm chart.\n\n\n **Why envMapping?** Different Helm charts expect environment variables in\n different formats. Some expect an array of objects with `name` and `value`\n fields (Kubernetes standard), while others expect a simple key-value map. The\n `envMapping` feature lets you adapt to your chart's requirements.\n\n\n#### Default envMapping Configuration\n\nYou can define default `envMapping` configurations in the `global_config` database table. These defaults apply to all services using that chart unless overridden at the service level.\n\n**Example: Setting defaults for your organization's chart**\n\n```json\n// In global_config table, key: \"myorg-web-app\"\n{\n \"chart\": {\n \"name\": \"myorg-web-app\",\n \"repoUrl\": \"https://charts.myorg.com\"\n },\n \"envMapping\": {\n \"app\": {\n \"format\": \"array\",\n \"path\": \"deployment.containers[0].env\"\n }\n }\n}\n```\n\nWith this configuration, any service using the `myorg-web-app` chart will automatically use array format for environment variables:\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: \"myorg-web-app\" # Inherits envMapping from global_config\n docker:\n app:\n env:\n API_KEY: \"secret\"\n # These will be formatted as array automatically\n```\n\n\n Setting `envMapping` in global_config is particularly useful when: - You have\n a standard organizational chart used by many services - You want consistent\n environment variable handling across services - You're migrating multiple\n services and want to reduce configuration duplication\n\n\n#### Array Format\n\nBest for charts that expect Kubernetes-style env arrays.\n\n```yaml {7-9} filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping:\n app:\n format: \"array\"\n path: \"env\"\n docker:\n app:\n env:\n DATABASE_URL: \"postgres://localhost:5432/mydb\"\n API_KEY: \"secret-key-123\"\n NODE_ENV: \"production\"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set env[0].name=DATABASE_URL\n--set env[0].value=postgres://localhost:5432/mydb\n--set env[1].name=API_KEY\n--set env[1].value=secret-key-123\n--set env[2].name=NODE_ENV\n--set env[2].value=production\n```\n\n**Your chart's values.yaml would use it like:**\n\n```yaml\nenv:\n - name: DATABASE_URL\n value: postgres://localhost:5432/mydb\n - name: API_KEY\n value: secret-key-123\n - name: NODE_ENV\n value: production\n```\n\n#### Map Format\n\nBest for charts that expect a simple key-value object.\n\n```yaml {7-9} filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping:\n app:\n format: \"map\"\n path: \"envVars\"\n docker:\n app:\n env:\n DATABASE_URL: \"postgres://localhost:5432/mydb\"\n API_KEY: \"secret-key-123\"\n NODE_ENV: \"production\"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set envVars.DATABASE__URL=postgres://localhost:5432/mydb\n--set envVars.API__KEY=secret-key-123\n--set envVars.NODE__ENV=production\n```\n\n\n Note: Underscores in environment variable names are converted to double\n underscores (`__`) in map format to avoid Helm parsing issues.\n\n\n**Your chart's values.yaml would use it like:**\n\n```yaml\nenvVars:\n DATABASE__URL: postgres://localhost:5432/mydb\n API__KEY: secret-key-123\n NODE__ENV: production\n```\n\n#### Complete Example with Multiple Services\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n # Service using array format (common for standard Kubernetes deployments)\n - name: frontend\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/apps\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"array\"\n path: \"deployment.env\"\n chart:\n name: \"./charts/web-app\"\n docker:\n app:\n dockerfilePath: \"frontend/Dockerfile\"\n env:\n REACT_APP_API_URL: \"https://api.example.com\"\n REACT_APP_VERSION: \"{{build.uuid}}\"\n\n # Service using map format (common for custom charts)\n - name: backend\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/apps\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"map\"\n path: \"config.environment\"\n chart:\n name: \"./charts/api\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n\n - name: \"mysql-database\"\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n chart:\n name: \"mysql\" # Using public Helm chart\n version: \"9.14.1\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n valueFiles:\n - \"deploy/helm/mysql-values.yaml\"\n```\n\n## Templated Variables\n\nLifecycle supports template variables in Helm values that are resolved at deployment time. These variables allow you to reference dynamic values like build UUIDs, docker tags, and internal hostnames.\n\n### Available Variables\n\nTemplate variables use the format `{{{variableName}}}` and are replaced with actual values during deployment:\n\n| Variable | Description | Example Value |\n| ------------------------------------ | ------------------------- | ---------------------------------------- |\n| `{{{serviceName_dockerTag}}}` | Docker tag for a service | `main-abc123` |\n| `{{{serviceName_dockerImage}}}` | Full docker image path | `registry.com/org/repo:main-abc123` |\n| `{{{serviceName_internalHostname}}}` | Internal service hostname | `api-service.env-uuid.svc.cluster.local` |\n| `{{{build.uuid}}}` | Build UUID | `env-12345` |\n| `{{{build.namespace}}}` | Kubernetes namespace | `env-12345` |\n\n### Usage in Values\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: web-api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: \"./charts/app\"\n values:\n - \"image.tag={{{web-api_dockerTag}}}\"\n - \"backend.url=http://{{{backend-service_internalHostname}}}:8080\"\n - \"env.BUILD_ID={{{build.uuid}}}\"\n```\n\n\n**Docker Image Mapping**: When using custom charts, you'll need to map `{{{serviceName_dockerImage}}}` or `{{{serviceName_dockerTag}}}` to your chart's expected value path. Common patterns include:\n- `image.repository` and `image.tag` (most common)\n- `deployment.image` (single image string)\n- `app.image` or `application.image`\n- Custom paths specific to your chart\n\nCheck your chart's `values.yaml` to determine the correct path.\n\n\n\n#### Image Mapping Examples\n\n```yaml filename=\"lifecycle.yaml\"\n# Example 1: Separate repository and tag (most common)\nservices:\n - name: web-api\n helm:\n chart:\n name: \"./charts/standard\"\n values:\n - \"image.repository=registry.com/org/web-api\" # Static repository\n - \"image.tag={{{web-api_dockerTag}}}\" # Dynamic tag only\n\n# Example 2: Combined image string\nservices:\n - name: worker\n helm:\n chart:\n name: \"./charts/custom\"\n values:\n - \"deployment.image={{{worker_dockerImage}}}\" # Full image with tag\n\n# Example 3: Nested structure\nservices:\n - name: backend\n helm:\n chart:\n name: \"./charts/microservice\"\n values:\n - \"app.container.image={{{backend_dockerImage}}}\" # Full image with tag\n```\n\n\n**Important**: Always use triple braces `{{{variable}}}` instead of double braces `{{variable}}` for Lifecycle template variables. This prevents Helm from trying to process them as Helm template functions and ensures they are passed through correctly for Lifecycle to resolve.\n\n\n### Template Resolution Order\n\n1. Lifecycle resolves `{{{variables}}}` before passing values to Helm\n2. The resolved values are then passed to Helm using `--set` flags\n3. Helm processes its own template functions (if any) after receiving the resolved values\n\n### Example with Service Dependencies\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: api-gateway\n helm:\n chart:\n name: \"./charts/gateway\"\n values:\n - \"config.authServiceUrl=http://{{{auth-service_internalHostname}}}:3000\"\n - \"config.userServiceUrl=http://{{{user-service_internalHostname}}}:3000\"\n - \"image.tag={{{api-gateway_dockerTag}}}\"\n\n - name: auth-service\n helm:\n chart:\n name: \"./charts/microservice\"\n values:\n - \"image.tag={{{auth-service_dockerTag}}}\"\n - \"database.host={{{postgres-db_internalHostname}}}\"\n```\n\n## Deployment Process\n\n\n 1. **Job Creation**: A Kubernetes job is created in the ephemeral namespace 2.\n **RBAC Setup**: Service account with namespace-scoped permissions is created\n 3. **Git Clone**: Init container clones the repository 4. **Helm Deploy**:\n Main container executes the Helm deployment 5. **Monitoring**: Logs are\n streamed in real-time via WebSocket\n\n\n### Concurrent Deployment Handling\n\nNative Helm automatically handles concurrent deployments by:\n\n- Detecting existing deployment jobs\n- Force-deleting the old job\n- Starting the new deployment\n\nThis ensures the newest deployment always takes precedence.\n\n## Monitoring Deployments\n\n### Deploy Logs Access\n\nFor services using native Helm deployment, you can access deployment logs through the Lifecycle PR comment:\n\n1. Add the `lifecycle-status-comments!` label to your PR\n2. In the status comment that appears, you'll see a **Deploy Logs** link for each service using native Helm\n3. Click the link to view real-time deployment logs\n\n### Log Contents\n\nThe deployment logs show:\n\n- Git repository cloning progress (`clone-repo` container)\n- Helm deployment execution (`helm-deploy` container)\n- Real-time streaming of all deployment output\n- Success or failure status\n\n## Chart Types\n\nLifecycle automatically detects and handles three chart types:\n\n| Type | Detection | Features |\n| ------------- | -------------------------------------------- | ---------------------------------------------- |\n| **ORG_CHART** | Matches `orgChartName` AND has `helm.docker` | Docker image injection, env var transformation |\n| **LOCAL** | Name is \"local\" or starts with \"./\" or \"../\" | Flexible `envMapping` support |\n| **PUBLIC** | Everything else | Standard labels and tolerations |\n\n\n The `orgChartName` is configured in the database's `global_config` table with\n key `orgChart`. This allows organizations to define their standard internal\n Helm chart.\n\n\n## Troubleshooting\n\n### Deployment Fails with \"Another Operation in Progress\"\n\n**Symptom**: Helm reports an existing operation is blocking deployment\n\n**Solution**: Native Helm automatically handles this by killing existing jobs. If the issue persists:\n\n```bash\n# Check for stuck jobs\nkubectl get jobs -n env-{uuid} -l service={serviceName}\n\n# Force delete if needed\nkubectl delete job {jobName} -n env-{uuid} --force --grace-period=0\n```\n\n### Environment Variables Not Working\n\n**Symptom**: Environment variables not passed to the deployment\n\n**Common Issues**:\n\n1. `envMapping` placed under `chart` instead of directly under `helm`\n2. Incorrect format specification (array vs map)\n3. Missing path configuration\n\n**Correct Configuration**:\n\n```yaml {4-7}\nhelm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping: # Correct: directly under helm\n app:\n format: \"array\"\n path: \"env\"\n```\n\n## Migration Example\n\nHere's a complete example showing how to migrate from GitHub-type services to Helm-type services:\n\n### Before: GitHub-type Services\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: \"api-gateway\"\n github:\n repository: \"myorg/api-services\"\n branchName: \"main\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/api.dockerfile\"\n env:\n BACKEND_URL: \"{{backend-service_internalHostname}}:3000\"\n LOG_LEVEL: \"info\"\n ENV_NAME: \"production\"\n ports:\n - 8080\n deployment:\n public: true\n resource:\n cpu:\n request: \"100m\"\n memory:\n request: \"256Mi\"\n readiness:\n tcpSocketPort: 8080\n hostnames:\n host: \"example.com\"\n defaultInternalHostname: \"api-gateway-prod\"\n defaultPublicUrl: \"api.example.com\"\n\n - name: \"backend-service\"\n github:\n repository: \"myorg/api-services\"\n branchName: \"main\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n deployment:\n public: false\n resource:\n cpu:\n request: \"50m\"\n memory:\n request: \"128Mi\"\n readiness:\n tcpSocketPort: 3000\n\n - name: \"mysql-database\"\n docker:\n dockerImage: \"mysql\"\n defaultTag: \"8.0-debian\"\n ports:\n - 3306\n env:\n MYSQL_ROOT_PASSWORD: \"strongpassword123\"\n MYSQL_DATABASE: \"app_database\"\n MYSQL_USER: \"app_user\"\n MYSQL_PASSWORD: \"apppassword456\"\n deployment:\n public: false\n resource:\n cpu:\n request: \"100m\"\n memory:\n request: \"512Mi\"\n readiness:\n tcpSocketPort: 3306\n serviceDisks:\n - name: \"mysql-data\"\n mountPath: \"/var/lib/mysql\"\n accessModes: \"ReadWriteOnce\"\n storageSize: \"10Gi\"\n```\n\n### After: Helm-type Services with Native Deployment\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: \"api-gateway\"\n helm:\n deploymentMethod: \"native\" # Enable native Helm\n version: \"3.14.0\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n args: \"--wait --timeout 10m\"\n envMapping:\n app:\n format: \"array\"\n path: \"containers.api.env\"\n chart:\n name: \"./charts/microservices\"\n values:\n - 'image.tag=\"{{{api-gateway_dockerTag}}}\"'\n - \"service.type=LoadBalancer\"\n - \"ingress.enabled=true\"\n valueFiles:\n - \"deploy/helm/base-values.yaml\"\n - \"deploy/helm/api-gateway-values.yaml\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/api.dockerfile\"\n env:\n BACKEND_URL: \"{{backend-service_internalHostname}}:3000\"\n LOG_LEVEL: \"info\"\n ENV_NAME: \"production\"\n ports:\n - 8080\n\n - name: \"backend-service\"\n helm:\n deploymentMethod: \"native\"\n version: \"3.14.0\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"map\" # Using map format for this service\n path: \"env\"\n chart:\n name: \"./charts/microservices\"\n values:\n - 'image.tag=\"{{{backend-service_dockerTag}}}\"'\n - \"replicaCount=2\"\n valueFiles:\n - \"deploy/helm/base-values.yaml\"\n - \"deploy/helm/backend-values.yaml\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n\n - name: \"mysql-database\"\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n chart:\n name: \"mysql\" # Using public Helm chart\n version: \"9.14.1\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n valueFiles:\n - \"deploy/helm/mysql-values.yaml\"\n```\n\n### Key Migration Points\n\n1. **Service Type Change**: Changed from `github:` to `helm:` configuration\n2. **Repository Location**: `repository` and `branchName` move from under `github:` to directly under `helm:`\n3. **Deployment Method**: Added `deploymentMethod: \"native\"` to enable native Helm\n4. **Chart Configuration**: Added `chart:` section with local or public charts\n5. **Environment Mapping**: Added `envMapping:` to control how environment variables are passed\n6. **Helm Arguments**: Added `args:` for Helm command customization\n7. **Docker Configuration**: Kept existing `docker:` config for build process\n\n\n Note that when converting from GitHub-type to Helm-type services, the\n `repository` and `branchName` fields move from being nested under `github:` to\n being directly under `helm:`.\n\n\n\n Many configuration options (like Helm version, args, and chart details) can be\n defined in the `global_config` database table, making the service YAML\n cleaner. Only override when needed." }, @@ -44,7 +44,7 @@ { "title": "Service Dependencies", "description": "Understand service dependencies, their impact, and configuration.", - "date": "2025-02-16", + "date": null, "path": "docs/features/service-dependencies", "body": "This document will cover `environment.{defaultServices,optionalServices}` and `service.requires`, their differences, impact scope, and usage.\n\n## `environment.{defaultServices,optionalServices}`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ❌ |\n| dev-0\\* | ❌ |\n\nThis represents the default environment that will be created by lifecycle when a pull request is opened in the service repo\\* and does not have any impact on outside repos, dev-0, or any other static environments that use this service.\n\n## `services.requires`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ✅ |\n| dev-0\\* | ✅ |\n\n`services.requires` has an impact across the board; hence, it is important to understand how it works and when we should use them.\n\n**Please read the info blocks below carefully.**\n\nYou can think of `services.requires` as a hard dependency definition. For example, if you have an API service and a database, the API service will have a hard dependency on the database.\nIn this scenario, the database should not be defined as the default service. Instead, we should make the dependency explicitly clear by adding the database to the API’s `requires` block.\nBy doing this, we ensure that any outside repo that wants to use our API service will get the database along with it but only needs to specify the API service in their `defaultServices` or `optionalServices`.\n\n\n Only services defined in `lifecycle.yaml` should be used in the `requires`\n array. If a service is defined in an outside repo, use\n `environment.defaultServices` instead.\n\n\nDo not use services in the `services.requires` if the service itself is not\ndefined in the same lifecycle.yaml.\n\n\n Services defined in the `requires` block will only be resolved 1 level down.\n\n\n**This is a very important nuance, which we get tripped by regularly.**\n\n---\n\n## Examples\n\nTo better illustrate the above statement, consider this example.\n\nRepository A `r-A` has 3 services `s-A`, `s-B`, and `s-C`.\n\n- `s-A` requires `s-B`.\n- `s-B` requires `s-C`.\n\nAs you can see, `s-A` has an indirect dependency on `s-C` through `s-B`.\n\n### Scenario 1: Pull Request in Service repo\\* ✅\n\nWhen we open a pull request in `r-A` repo, lifecycle will deploy 3 services: `s-A`, `s-B`, and `s-C`.\n\n#### Breakdown\n\n- Lifecycle deploys `s-A` and `s-B` because they are defined in `defaultServices`.\n- Services defined in the `requires` block will only be resolved **one level down**.\n- Only services defined in `lifecycle.yaml` should be used in the `requires` array. If a service is defined in an outside repo, use `environment.defaultServices` instead.\n\n```yaml\n# `r-A.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: \"s-A\"\n - name: \"s-B\"\n\nservices:\n - name: \"s-A\"\n requires:\n - name: \"s-B\"\n helm: ...\n\n - name: \"s-B\"\n requires:\n - name: \"s-C\"\n helm: ...\n\n - name: \"s-C\"\n helm: ...\n```\n\n### Scenario 2: ❌\n\nRepository B `r-B` has service `s-X` and also defines an outside repo `r-A` service `s-A` as `environment.defaultServices`.\n\n```yaml\n# `r-B.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: \"s-X\"\n - name: \"s-A\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n\nservices:\n - name: \"s-X\"\n helm: ...\n```\n\n#### Breakdown\n\n1. Lifecycle deploys `s-X` and `s-A` because they are defined in `defaultServices`.\n2. Lifecycle deploys `s-B` because it is a 1st level dependency of a service (`s-A`) listed in `defaultServices`.\n3. Lifecycle **does not** deploy `s-C` since it is **not** a 1st level dependency of any service listed in `defaultServices` or `optionalServices`.\n\nThe way this scenario manifests is lifecycle will deploy `s-X`, `s-A`, and `s-B`, but the build will likely **fail** because `s-B` is missing a required dependency `s-C`.\n\n### Solutions\n\nThere are 2 ways to address this depending on your use case.\n\n#### Solution 1\n\nAdd `s-B` to `r-B`’s `environment.defaultServices` block in `r-B.lifecycle.yaml`. In effect, this will make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: \"s-X\"\n - name: \"s-A\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n - name: \"s-B\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n```\n\n#### Solution 2\n\nAdd `s-C` to the `services.requires` block of `r-A` in `r-A.lifecycle.yaml`. This will also make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: \"s-A\"\n - name: \"s-B\"\n\nservices:\n - name: \"s-A\"\n requires:\n - name: \"s-B\"\n - name: \"s-C\"\n helm: ...\n```\n\n### Choosing the Right Solution\n\nIn summary, the solution you should use depends on how you want your service to be consumed in an outside repo\\*.\n\n- If you want outside repos to explicitly include `s-A` and `s-B`, use **Solution 1**.\n- If you want outside repos to only include `s-A` and let dependencies resolve automatically, use **Solution 2**.\n\n---\n\n### Terminology\n\n- **Service repo**: The repository where `lifecycle.yaml` is defined.\n- **Outside repo**: Another repository referencing it.\n- **dev-0**: Default static environment." }, @@ -86,7 +86,7 @@ { "title": "Deploy Issues", "description": "Understand how to handle common deploy issues with environments", - "date": "2025-03-11", + "date": null, "path": "docs/troubleshooting/deploy-issues", "body": "TODO: This document will cover common deploy issues that you may encounter\n when working with Lifecycle environments." }, @@ -107,7 +107,7 @@ { "title": "Lifecycle Full Schema", "description": "Lifecycle Schema documentation; this page contains the full schema as defined in lifecycle core—all at once.", - "date": "2025-05-31", + "date": null, "path": "docs/schema/full", "body": "## Full Lifecycle Schema\n\nBelow is the full Lifecycle schema as defined in the `lifecycle.yaml` file with basic comments for each item.\n\n```yaml\n# @section environment\nenvironment:\n # @param environment.autoDeploy\n autoDeploy: false\n # @param environment.useGithubStatusComment\n useGithubStatusComment: false\n # @param environment.defaultServices\n defaultServices:\n # @param environment.defaultServices[]\n - # @param environment.defaultServices.name (required)\n name: \"\"\n # @param environment.defaultServices.repository\n repository: \"\"\n # @param environment.defaultServices.branch\n branch: \"\"\n # @param environment.optionalServices\n optionalServices:\n # @param environment.optionalServices[]\n - # @param environment.optionalServices.name (required)\n name: \"\"\n # @param environment.optionalServices.repository\n repository: \"\"\n # @param environment.optionalServices.branch\n branch: \"\"\n\n# @section services\nservices:\n # @param services[]\n - # @param services.name (required)\n name: \"\"\n # @param services.appShort\n appShort: \"\"\n # @param services.defaultUUID\n defaultUUID: \"\"\n # @param services.github\n github:\n # @param services.github.repository (required)\n repository: \"\"\n # @param services.github.branchName (required)\n branchName: \"\"\n # @param services.github.docker (required)\n docker:\n # @param services.github.docker.defaultTag (required)\n defaultTag: \"\"\n # @param services.github.docker.pipelineId\n pipelineId: \"\"\n # @param services.github.docker.ecr\n ecr: \"\"\n # @param services.github.docker.app (required)\n app:\n # @param services.github.docker.app.afterBuildPipelineConfig\n afterBuildPipelineConfig:\n # @param services.github.docker.app.afterBuildPipelineConfig.afterBuildPipelineId\n afterBuildPipelineId: \"\"\n # @param services.github.docker.app.afterBuildPipelineConfig.detatchAfterBuildPipeline\n detatchAfterBuildPipeline: false\n # @param services.github.docker.app.afterBuildPipelineConfig.description\n description: \"\"\n # @param services.github.docker.app.dockerfilePath (required)\n dockerfilePath: \"\"\n # @param services.github.docker.app.command\n command: \"\"\n # @param services.github.docker.app.arguments\n arguments: \"\"\n # @param services.github.docker.app.env\n env:\n\n # @param services.github.docker.app.ports\n ports:\n # @param services.github.docker.app.ports[]\n - \"\"\n # @param services.github.docker.init\n init:\n # @param services.github.docker.init.dockerfilePath (required)\n dockerfilePath: \"\"\n # @param services.github.docker.init.command\n command: \"\"\n # @param services.github.docker.init.arguments\n arguments: \"\"\n # @param services.github.docker.init.env\n env:\n\n # @param services.github.docker.builder\n builder:\n # @param services.github.docker.builder.engine\n engine: \"\"\n # @param services.github.deployment\n deployment:\n # @param services.github.deployment.helm\n helm:\n # @param services.github.deployment.helm.enabled\n enabled: false\n # @param services.github.deployment.helm.chartName\n chartName: \"\"\n # @param services.github.deployment.helm.chartRepoUrl\n chartRepoUrl: \"\"\n # @param services.github.deployment.helm.chartVersion\n chartVersion: \"\"\n # @param services.github.deployment.helm.cmdPs\n cmdPs: \"\"\n # @param services.github.deployment.helm.action\n action: \"\"\n # @param services.github.deployment.helm.customValues\n customValues:\n # @param services.github.deployment.helm.customValues[]\n - \"\"\n # @param services.github.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.github.deployment.helm.customValueFiles[]\n - \"\"\n # @param services.github.deployment.helm.helmVersion\n helmVersion: \"\"\n # @param services.github.deployment.helm.attachPvc\n attachPvc:\n # @param services.github.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.github.deployment.helm.attachPvc.mountPath\n mountPath: \"\"\n # @param services.github.deployment.public\n public: false\n # @param services.github.deployment.capacityType\n capacityType: \"\"\n # @param services.github.deployment.resource\n resource:\n # @param services.github.deployment.resource.cpu\n cpu:\n # @param services.github.deployment.resource.cpu.request\n request: \"\"\n # @param services.github.deployment.resource.cpu.limit\n limit: \"\"\n # @param services.github.deployment.resource.memory\n memory:\n # @param services.github.deployment.resource.memory.request\n request: \"\"\n # @param services.github.deployment.resource.memory.limit\n limit: \"\"\n # @param services.github.deployment.readiness\n readiness:\n # @param services.github.deployment.readiness.disabled\n disabled: false\n # @param services.github.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.github.deployment.readiness.httpGet\n httpGet:\n # @param services.github.deployment.readiness.httpGet.path\n path: \"\"\n # @param services.github.deployment.readiness.httpGet.port\n port: 0\n # @param services.github.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.github.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.github.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.github.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.github.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.github.deployment.hostnames\n hostnames:\n # @param services.github.deployment.hostnames.host\n host: \"\"\n # @param services.github.deployment.hostnames.acmARN\n acmARN: \"\"\n # @param services.github.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: \"\"\n # @param services.github.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: \"\"\n # @param services.github.deployment.network\n network:\n # @param services.github.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.github.deployment.network.ipWhitelist[]\n - \"\"\n # @param services.github.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.github.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.github.deployment.network.grpc\n grpc:\n # @param services.github.deployment.network.grpc.enable\n enable: false\n # @param services.github.deployment.network.grpc.host\n host: \"\"\n # @param services.github.deployment.network.grpc.defaultHost\n defaultHost: \"\"\n # @param services.github.deployment.serviceDisks\n serviceDisks:\n # @param services.github.deployment.serviceDisks[]\n - # @param services.github.deployment.serviceDisks.name (required)\n name: \"\"\n # @param services.github.deployment.serviceDisks.mountPath (required)\n mountPath: \"\"\n # @param services.github.deployment.serviceDisks.accessModes\n accessModes: \"\"\n # @param services.github.deployment.serviceDisks.storageSize (required)\n storageSize: \"\"\n # @param services.github.deployment.serviceDisks.medium\n medium: \"\"\n # @param services.docker\n docker:\n # @param services.docker.dockerImage (required)\n dockerImage: \"\"\n # @param services.docker.defaultTag (required)\n defaultTag: \"\"\n # @param services.docker.command\n command: \"\"\n # @param services.docker.arguments\n arguments: \"\"\n # @param services.docker.env\n env:\n\n # @param services.docker.ports\n ports:\n # @param services.docker.ports[]\n - \"\"\n # @param services.docker.deployment\n deployment:\n # @param services.docker.deployment.helm\n helm:\n # @param services.docker.deployment.helm.enabled\n enabled: false\n # @param services.docker.deployment.helm.chartName\n chartName: \"\"\n # @param services.docker.deployment.helm.chartRepoUrl\n chartRepoUrl: \"\"\n # @param services.docker.deployment.helm.chartVersion\n chartVersion: \"\"\n # @param services.docker.deployment.helm.cmdPs\n cmdPs: \"\"\n # @param services.docker.deployment.helm.action\n action: \"\"\n # @param services.docker.deployment.helm.customValues\n customValues:\n # @param services.docker.deployment.helm.customValues[]\n - \"\"\n # @param services.docker.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.docker.deployment.helm.customValueFiles[]\n - \"\"\n # @param services.docker.deployment.helm.helmVersion\n helmVersion: \"\"\n # @param services.docker.deployment.helm.attachPvc\n attachPvc:\n # @param services.docker.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.docker.deployment.helm.attachPvc.mountPath\n mountPath: \"\"\n # @param services.docker.deployment.public\n public: false\n # @param services.docker.deployment.capacityType\n capacityType: \"\"\n # @param services.docker.deployment.resource\n resource:\n # @param services.docker.deployment.resource.cpu\n cpu:\n # @param services.docker.deployment.resource.cpu.request\n request: \"\"\n # @param services.docker.deployment.resource.cpu.limit\n limit: \"\"\n # @param services.docker.deployment.resource.memory\n memory:\n # @param services.docker.deployment.resource.memory.request\n request: \"\"\n # @param services.docker.deployment.resource.memory.limit\n limit: \"\"\n # @param services.docker.deployment.readiness\n readiness:\n # @param services.docker.deployment.readiness.disabled\n disabled: false\n # @param services.docker.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.docker.deployment.readiness.httpGet\n httpGet:\n # @param services.docker.deployment.readiness.httpGet.path\n path: \"\"\n # @param services.docker.deployment.readiness.httpGet.port\n port: 0\n # @param services.docker.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.docker.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.docker.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.docker.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.docker.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.docker.deployment.hostnames\n hostnames:\n # @param services.docker.deployment.hostnames.host\n host: \"\"\n # @param services.docker.deployment.hostnames.acmARN\n acmARN: \"\"\n # @param services.docker.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: \"\"\n # @param services.docker.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: \"\"\n # @param services.docker.deployment.network\n network:\n # @param services.docker.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.docker.deployment.network.ipWhitelist[]\n - \"\"\n # @param services.docker.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.docker.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.docker.deployment.network.grpc\n grpc:\n # @param services.docker.deployment.network.grpc.enable\n enable: false\n # @param services.docker.deployment.network.grpc.host\n host: \"\"\n # @param services.docker.deployment.network.grpc.defaultHost\n defaultHost: \"\"\n # @param services.docker.deployment.serviceDisks\n serviceDisks:\n # @param services.docker.deployment.serviceDisks[]\n - # @param services.docker.deployment.serviceDisks.name (required)\n name: \"\"\n # @param services.docker.deployment.serviceDisks.mountPath (required)\n mountPath: \"\"\n # @param services.docker.deployment.serviceDisks.accessModes\n accessModes: \"\"\n # @param services.docker.deployment.serviceDisks.storageSize (required)\n storageSize: \"\"\n # @param services.docker.deployment.serviceDisks.medium\n medium: \"\"\n```" }, diff --git a/src/lib/static/blogcontent/blogcontent.ts b/src/lib/static/blogcontent/blogcontent.ts index c8b6b07..8421843 100644 --- a/src/lib/static/blogcontent/blogcontent.ts +++ b/src/lib/static/blogcontent/blogcontent.ts @@ -2,15 +2,15 @@ export const blogContent = [ { title: "Introducing Lifecycle", description: null, - date: "2025-05-23", + date: null, path: "articles/introduction", - body: 'We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like "Is anyone using staging?" "Staging is broken again," and "Who just overwrote my changes?" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn\'t scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we\'re thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and "aha" moments, and we can\'t wait to see how you\'ll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who\'ll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/TEtKgCs8T8) to connect!!', + body: 'We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like "Is anyone using staging?" "Staging is broken again," and "Who just overwrote my changes?" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn\'t scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we\'re thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and "aha" moments, and we can\'t wait to see how you\'ll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who\'ll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/M5fhHJuEX8) to connect!!', }, { title: "What is Lifecycle?", description: "Lifecycle is your effortless way to test and create ephemeral environments", - date: "2025-03-12", + date: null, path: "docs/what-is-lifecycle", body: "Lifecycle is an **ephemeral** _(/əˈfem(ə)rəl/, lasting for a very short time)_ environment orchestrator that transforms your GitHub pull requests into fully functional development environments. It enables developers to test, validate, and collaborate on features without the hassle of managing infrastructure manually.\n\n> With **Lifecycle**, every pull request gets its own connected playground—ensuring that changes can be previewed, integrated, and verified before merging into its main branch.\n\n## A Developer’s Story\n\nImagine working in an organization that develops multiple services. Managing and testing changes across these services can be challenging, especially when multiple developers are working on different features simultaneously.\n\nMeet **Nick Holiday 👨‍💻**, an engineer who needs to update a database schema and modify the corresponding API in a backend service. Additionally, his change requires frontend service updates to display the new data correctly.\n\n### Traditional Workflow Challenges\n\n- **Shared environments** – Nick deploys his backend service changes to a shared dev or staging environment, but another engineer is testing unrelated changes at the same time.\n- **Conflicting updates** – The frontend engineers working on the UI might face issues if their code depends on a stable backend service that keeps changing.\n- **Environment management** – Setting up and maintaining an isolated environment for testing requires significant effort.\n\n### Enter Lifecycle\n\nWith Lifecycle, as soon as Nick opens a pull request, the system automatically:\n\n1. 🏗️ **Creates an isolated development environment** – This environment includes Nick’s updated backend service along with the necessary frontend services.\n2. 🚀 **Deploys the application** – Everything is set up exactly as it would be in production, ensuring a reliable test scenario.\n3. 🔗 **Generates a shareable URL** – Nick and his teammates can interact with the new features without setting up anything locally.\n4. 🧹 **Cleans up automatically** – Once the PR is merged or closed, Lifecycle removes the environment, keeping things tidy.\n\n## Watch a Quick Demo\n\n\n\n## How It Works\n\n\n\n## Why Use Lifecycle?\n\n- **Faster Feedback Loops** - Get instant previews of your changes without waiting for staging deployments.\n- **Isolation** - Each PR runs in its own sandbox, preventing conflicts.\n- **Seamless Collaboration** - Share URLs with stakeholders, designers, or QA engineers.\n- **Automatic Cleanup** - No more stale test environments; Lifecycle manages cleanup for you.\n- **Works with Your Stack** - Supports containerized applications and integrates with Kubernetes.", }, @@ -18,14 +18,14 @@ export const blogContent = [ title: "Auto Deploy & Labels", description: "How to setup auto deploy for pull requests and control envionment with labels", - date: "2025-01-29", + date: null, path: "docs/features/auto-deployment", body: '## Auto-Deploy Configuration\n\nTo enable **automatic deployment** when a PR is created, set the `autoDeploy` attribute in your repository\'s `lifecycle.yaml` file:\n\n```yaml {2} filename="lifecycle.yaml"\nenvironment:\n autoDeploy: true\n defaultServices:\n```\n\n- Lifecycle will **automatically create** the environment as soon as a PR is opened.\n- A `lifecycle-deploy!` label will be added to the PR to indicate that the environment has been deployed.\n\n---\n\n## Managing Deployments with Labels\n\nIf **auto-deploy is not enabled**, you can manually control the environment using PR labels.\n\n### Deploy an Environment\n\nTo create an ephemeral environment for a PR, **add** the `lifecycle-deploy!` label.\n\n### Tear Down an Environment\n\nTo **delete** an active environment, use either of these labels:\n\n- **Remove** `lifecycle-deploy!`\n- **Add** `lifecycle-disabled!`\n\n---\n\n## Automatic Cleanup on PR Closure\n\nWhen a PR is **closed**, Lifecycle will:\n\n1. **Tear down** the active environment.\n2. **Remove** the `lifecycle-deploy!` label from the PR.\n\nThis ensures that unused environments do not persist after the PR lifecycle is complete.\n\n---\n\n## Summary\n\n| Feature | Behavior |\n| ---------------------------- | ----------------------------------------------- |\n| `autoDeploy: true` in config | PR environments are **automatically** deployed. |\n| `lifecycle-deploy!` | **Manually deploy** an environment. |\n| Remove `lifecycle-deploy!` | **Tear down** the environment. |\n| Add `lifecycle-disabled!` | **Tear down** the environment manually. |\n| PR closed | **Environment is deleted automatically**. |\n\nUsing these configurations and labels, teams can efficiently manage **ephemeral environments** in their development workflow.', }, { title: "Webhooks", description: null, - date: "2025-02-16", + date: null, path: "docs/features/webhooks", body: 'Lifecycle can invoke **third-party services** when a build state changes.\n\nWebhooks allow users to automate external processes such as running tests, performing cleanup tasks, or sending notifications based on environment build states.\n\n## Supported Types\n\nLifecycle supports three types of webhooks:\n\n1. **`codefresh`** - Trigger Codefresh pipelines\n2. **`docker`** - Execute Docker images as Kubernetes jobs\n3. **`command`** - Run shell commands in a specified Docker image\n\n## Common Use Cases\n\n- When a build status is `deployed`, trigger **end-to-end tests**.\n- When a build status is `error`, trigger **infrastructure cleanup** or alert the team.\n- Run **security scans** on built containers.\n- Execute **database migrations** after deployment.\n- Send **notifications** to Slack, Discord, or other communication channels.\n- Perform **smoke tests** using custom test containers.\n\n## Configuration\n\nWebhooks are defined in the `lifecycle.yaml` under the `environment.webhooks` section.\n\nBelow is an example configuration for triggering end-to-end tests when the `deployed` state is reached.\n\n## Examples\n\n### `codefresh`\n\nThe `codefresh` type triggers existing Codefresh pipelines when build states change.\n\n```yaml\n# Trigger End-to-End Tests on Deployment\nenvironment:\n # ...\n defaultServices:\n - name: "frontend"\n optionalServices:\n - name: "backend"\n repository: "lifecycle/backend"\n branch: "main"\n webhooks:\n - state: deployed\n type: codefresh\n name: "End to End Tests"\n pipelineId: 64598362453cc650c0c9cd4d\n trigger: tests\n env:\n branch: "{{frontend_branchName}}"\n TEST_URL: "https://{{frontend_publicUrl}}"\n # ...\n```\n\n- **`state: deployed`** → Triggers the webhook when the build reaches the `deployed` state.\n- **`type: codefresh`** → Specifies that this webhook triggers a **Codefresh pipeline**.\n- **`name`** → A human-readable name for the webhook.\n- **`pipelineId`** → The unique Codefresh pipeline ID.\n- **`trigger`** → Codefresh pipeline\'s trigger to execute.\n- **`env`** → Passes relevant environment variables (e.g., `branch` and `TEST_URL`).\n\n---\n\n```yaml\n# Trigger Cleanup on Build Error\nenvironment:\n # ...\n webhooks:\n - state: error\n type: codefresh\n name: "Cleanup Failed Deployment"\n pipelineId: 74283905723ff650c0d9ab7e\n trigger: cleanup\n env:\n branch: "{{frontend_branchName}}"\n CLEANUP_TARGET: "frontend"\n # ...\n```\n\n- **`state: error`** → Triggers the webhook when the build fails.\n- **`type: codefresh`** → Invokes a Codefresh cleanup pipeline.\n- **`trigger: cleanup`** → Codefresh pipeline\'s trigger to execute.\n- **`env`** → Includes necessary variables, such as `branch` and `CLEANUP_TARGET`.\n\n### `docker`\n\nThe `docker` type allows you to execute any Docker image as a Kubernetes job when build states change.\n\n\n Docker webhooks run as Kubernetes jobs in the same namespace as your build.\n They have a default timeout of 30 minutes and resource limits of 200m CPU and\n 1Gi memory.\n\n\n```yaml\n# Run E2E Tests in Custom Container\nenvironment:\n # ...\n webhooks:\n - name: "E2E Test Suite"\n description: "Execute comprehensive E2E tests"\n state: deployed\n type: docker\n docker:\n image: "myorg/e2e-tests:latest"\n command: ["npm", "run", "e2e"]\n timeout: 1200 # 1 hour (optional, default: 1800 seconds)\n env:\n BASE_URL: "https://{{frontend_publicUrl}}"\n ENVIRONMENT: "ephemeral"\n```\n\n- **`docker.image`** → Docker image to execute (required)\n- **`docker.command`** → Override the default entrypoint (optional)\n- **`docker.args`** → Arguments to pass to the command (optional)\n- **`docker.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n### `command`\n\nThe `command` type is a simplified version of Docker webhooks, ideal for running shell scripts or simple commands.\n\n```yaml\n# Slack Notification Example\nenvironment:\n # ...\n webhooks:\n - name: "Deployment Notification"\n description: "Notify team of successful deployment"\n state: deployed\n type: command\n command:\n image: "alpine:latest"\n script: |\n apk add --no-cache curl\n curl -X POST "$WEBHOOK_URL" \\\n -H "Content-Type: application/json" \\\n -d "{\\"text\\":\\"🚀 Deployed $SERVICE_NAME to $DEPLOY_URL\\"}"\n timeout: 300 # 5 minutes (optional)\n env:\n WEBHOOK_URL: "https://hooks.slack.com/services/XXX/YYY/ZZZ"\n SERVICE_NAME: "{{frontend_internalHostname}}"\n DEPLOY_URL: "https://{{frontend_publicUrl}}"\n```\n\n\n Make sure to replace placeholder values like webhook URLs and pipeline IDs\n with your actual values.\n\n\n- **`command.image`** → Docker image to run the script in (required)\n- **`command.script`** → Shell script to execute (required)\n- **`command.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n## Trigger states\n\nWebhooks can be triggered on the following build states:\n\n- **`deployed`** → Service successfully deployed and running\n- **`error`** → Build or deployment failed\n- **`torn_down`** → Environment has been destroyed\n\n## Note\n\n- All webhooks for the same state are executed **serially** in the order defined.\n- Webhook failures do not affect the build status.\n- Webhook invocations can be viewed at `/builds/[uuid]/webhooks` page(latest 20 invocations). Use the API to view all invocations.\n- `docker` and `command` type\'s logs are not streamed when the job is still in progress and are available only after the job completes.', }, @@ -33,7 +33,7 @@ export const blogContent = [ title: "Native Helm Deployment", description: "Deploy services using Helm directly in Kubernetes without external CI/CD dependencies", - date: "2025-01-29", + date: null, path: "docs/features/native-helm-deployment", body: 'This feature is still in alpha and might change with breaking changes.\n\n\n**Native Helm** is an alternative deployment method that runs Helm deployments directly within Kubernetes jobs, eliminating the need for external CI/CD systems. This provides a more self-contained and portable deployment solution.\n\n\n Native Helm deployment is an opt-in feature that can be enabled globally or\n per-service.\n\n\n## Overview\n\nWhen enabled, Native Helm:\n\n- Creates Kubernetes jobs to execute Helm deployments\n- Runs in ephemeral namespaces with proper RBAC\n- Provides real-time deployment logs via WebSocket\n- Handles concurrent deployments automatically\n- Supports all standard Helm chart types\n\n## Quickstart\n\nWant to try native Helm deployment? Here\'s the fastest way to get started:\n\n```yaml filename="lifecycle.yaml" {5}\nservices:\n - name: my-api\n defaultUUID: "dev-0"\n helm:\n deploymentMethod: "native" # That\'s it!\n chart:\n name: "local"\n valueFiles:\n - "./helm/values.yaml"\n```\n\nThis configuration:\n\n1. Enables native Helm for the `my-api` service\n2. Uses a local Helm chart from your repository\n3. Applies values from `./helm/values.yaml`\n4. Runs deployment as a Kubernetes job\n\n\n To enable native Helm for all services at once, see [Global\n Configuration](#enabling-native-helm).\n\n\n## Configuration\n\n### Enabling Native Helm\n\nThere are two ways to enable native Helm deployment:\n\n#### Per Service Configuration\n\nEnable native Helm for individual services:\n\n```yaml {4} filename="lifecycle.yaml"\nservices:\n - name: my-service\n helm:\n deploymentMethod: "native" # Enable for this service only\n chart:\n name: my-chart\n```\n\n#### Global Configuration\n\nEnable native Helm for all services:\n\n```yaml {3} filename="lifecycle.yaml"\nhelm:\n nativeHelm:\n enabled: true # Enable for all services\n```\n\n### Configuration Precedence\n\nLifecycle uses a hierarchical configuration system with three levels of precedence:\n\n1. **helmDefaults** - Base defaults for all deployments (database: `global_config` table)\n2. **Chart-specific config** - Per-chart defaults (database: `global_config` table)\n3. **Service YAML config** - Service-specific overrides (highest priority)\n\n\n Service-level configuration always takes precedence over global defaults.\n\n\n### Global Configuration (Database)\n\nGlobal configurations are stored in the `global_config` table in the database. Each configuration is stored as a row with:\n\n- **key**: The configuration name (e.g., \'helmDefaults\', \'postgresql\', \'redis\')\n- **config**: JSON object containing the configuration\n\n#### helmDefaults Configuration\n\nStored in database with key `helmDefaults`:\n\n```json\n{\n "nativeHelm": {\n "enabled": true,\n "defaultArgs": "--wait --timeout 30m",\n "defaultHelmVersion": "3.12.0"\n }\n}\n```\n\n**Field Descriptions**:\n\n- `enabled`: When `true`, enables native Helm deployment for all services unless they explicitly set `deploymentMethod: "ci"`\n- `defaultArgs`: Arguments automatically appended to every Helm command (appears before service-specific args)\n- `defaultHelmVersion`: The Helm version to use when not specified at the service or chart level\n\n#### Chart-specific Configuration\n\nExample: PostgreSQL configuration stored with key `postgresql`:\n\n```json\n{\n "version": "3.13.0",\n "args": "--force --timeout 60m0s --wait",\n "chart": {\n "name": "postgresql",\n "repoUrl": "https://charts.bitnami.com/bitnami",\n "version": "12.9.0",\n "values": ["auth.username=postgres_user", "auth.database=postgres_db"]\n }\n}\n```\n\n\n These global configurations are managed by administrators and stored in the\n database. They provide consistent defaults across all environments and can be\n overridden at the service level.\n\n\n## Usage Examples\n\n### Quick Experiment: Deploy Jenkins!\n\nWant to see native Helm in action? Let\'s deploy everyone\'s favorite CI/CD tool - Jenkins! This example shows how easy it is to deploy popular applications using native Helm.\n\n```yaml filename="lifecycle.yaml"\nenvironment:\n defaultServices:\n - name: "my-app"\n - name: "jenkins" # Add Jenkins to your default services\n\nservices:\n - name: "jenkins"\n helm:\n chart:\n name: "jenkins"\n repoUrl: "https://charts.bitnami.com/bitnami"\n version: "13.6.8"\n values:\n - "service.type=ClusterIP"\n - "ingress.enabled=true"\n - "ingress.hostname={{jenkins_publicUrl}}"\n - "ingress.ingressClassName=nginx"\n```\n\n\n 🎉 That\'s it! With just a few lines of configuration, you\'ll have Jenkins\n running in your Kubernetes cluster.\n\n\nTo access your Jenkins instance:\n\n1. Check the deployment status in your PR comment\n2. Click the **Deploy Logs** link to monitor the deployment\n3. Once deployed, Jenkins will be available at the internal hostname\n\n\n For more Jenkins configuration options and values, check out the [Bitnami\n Jenkins chart\n documentation](https://github.com/bitnami/charts/tree/main/bitnami/jenkins).\n This same pattern works for any Bitnami chart (PostgreSQL, Redis, MongoDB) or\n any other public Helm chart!\n\n\n### Basic Service Deployment\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: web-api\n helm:\n deploymentMethod: "native"\n chart:\n name: web-app\n version: "1.2.0"\n```\n\n### PostgreSQL with Overrides\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: database\n helm:\n deploymentMethod: "native"\n version: "3.14.0" # Override Helm version\n args: "--atomic" # Override deployment args\n chart:\n name: postgresql\n values: # Additional values merged with defaults\n - "persistence.size=20Gi"\n - "replicaCount=2"\n```\n\n### Custom Environment Variables\n\nLifecycle supports flexible environment variable formatting through the `envMapping` configuration. This feature allows you to control how environment variables from your service configuration are passed to your Helm chart.\n\n\n **Why envMapping?** Different Helm charts expect environment variables in\n different formats. Some expect an array of objects with `name` and `value`\n fields (Kubernetes standard), while others expect a simple key-value map. The\n `envMapping` feature lets you adapt to your chart\'s requirements.\n\n\n#### Default envMapping Configuration\n\nYou can define default `envMapping` configurations in the `global_config` database table. These defaults apply to all services using that chart unless overridden at the service level.\n\n**Example: Setting defaults for your organization\'s chart**\n\n```json\n// In global_config table, key: "myorg-web-app"\n{\n "chart": {\n "name": "myorg-web-app",\n "repoUrl": "https://charts.myorg.com"\n },\n "envMapping": {\n "app": {\n "format": "array",\n "path": "deployment.containers[0].env"\n }\n }\n}\n```\n\nWith this configuration, any service using the `myorg-web-app` chart will automatically use array format for environment variables:\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: "myorg-web-app" # Inherits envMapping from global_config\n docker:\n app:\n env:\n API_KEY: "secret"\n # These will be formatted as array automatically\n```\n\n\n Setting `envMapping` in global_config is particularly useful when: - You have\n a standard organizational chart used by many services - You want consistent\n environment variable handling across services - You\'re migrating multiple\n services and want to reduce configuration duplication\n\n\n#### Array Format\n\nBest for charts that expect Kubernetes-style env arrays.\n\n```yaml {7-9} filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping:\n app:\n format: "array"\n path: "env"\n docker:\n app:\n env:\n DATABASE_URL: "postgres://localhost:5432/mydb"\n API_KEY: "secret-key-123"\n NODE_ENV: "production"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set env[0].name=DATABASE_URL\n--set env[0].value=postgres://localhost:5432/mydb\n--set env[1].name=API_KEY\n--set env[1].value=secret-key-123\n--set env[2].name=NODE_ENV\n--set env[2].value=production\n```\n\n**Your chart\'s values.yaml would use it like:**\n\n```yaml\nenv:\n - name: DATABASE_URL\n value: postgres://localhost:5432/mydb\n - name: API_KEY\n value: secret-key-123\n - name: NODE_ENV\n value: production\n```\n\n#### Map Format\n\nBest for charts that expect a simple key-value object.\n\n```yaml {7-9} filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping:\n app:\n format: "map"\n path: "envVars"\n docker:\n app:\n env:\n DATABASE_URL: "postgres://localhost:5432/mydb"\n API_KEY: "secret-key-123"\n NODE_ENV: "production"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set envVars.DATABASE__URL=postgres://localhost:5432/mydb\n--set envVars.API__KEY=secret-key-123\n--set envVars.NODE__ENV=production\n```\n\n\n Note: Underscores in environment variable names are converted to double\n underscores (`__`) in map format to avoid Helm parsing issues.\n\n\n**Your chart\'s values.yaml would use it like:**\n\n```yaml\nenvVars:\n DATABASE__URL: postgres://localhost:5432/mydb\n API__KEY: secret-key-123\n NODE__ENV: production\n```\n\n#### Complete Example with Multiple Services\n\n```yaml filename="lifecycle.yaml"\nservices:\n # Service using array format (common for standard Kubernetes deployments)\n - name: frontend\n helm:\n deploymentMethod: "native"\n repository: "myorg/apps"\n branchName: "main"\n envMapping:\n app:\n format: "array"\n path: "deployment.env"\n chart:\n name: "./charts/web-app"\n docker:\n app:\n dockerfilePath: "frontend/Dockerfile"\n env:\n REACT_APP_API_URL: "https://api.example.com"\n REACT_APP_VERSION: "{{build.uuid}}"\n\n # Service using map format (common for custom charts)\n - name: backend\n helm:\n deploymentMethod: "native"\n repository: "myorg/apps"\n branchName: "main"\n envMapping:\n app:\n format: "map"\n path: "config.environment"\n chart:\n name: "./charts/api"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n\n - name: "mysql-database"\n helm:\n deploymentMethod: "native"\n repository: "myorg/api-services"\n branchName: "main"\n chart:\n name: "mysql" # Using public Helm chart\n version: "9.14.1"\n repoUrl: "https://charts.bitnami.com/bitnami"\n valueFiles:\n - "deploy/helm/mysql-values.yaml"\n```\n\n## Templated Variables\n\nLifecycle supports template variables in Helm values that are resolved at deployment time. These variables allow you to reference dynamic values like build UUIDs, docker tags, and internal hostnames.\n\n### Available Variables\n\nTemplate variables use the format `{{{variableName}}}` and are replaced with actual values during deployment:\n\n| Variable | Description | Example Value |\n| ------------------------------------ | ------------------------- | ---------------------------------------- |\n| `{{{serviceName_dockerTag}}}` | Docker tag for a service | `main-abc123` |\n| `{{{serviceName_dockerImage}}}` | Full docker image path | `registry.com/org/repo:main-abc123` |\n| `{{{serviceName_internalHostname}}}` | Internal service hostname | `api-service.env-uuid.svc.cluster.local` |\n| `{{{build.uuid}}}` | Build UUID | `env-12345` |\n| `{{{build.namespace}}}` | Kubernetes namespace | `env-12345` |\n\n### Usage in Values\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: web-api\n helm:\n deploymentMethod: "native"\n chart:\n name: "./charts/app"\n values:\n - "image.tag={{{web-api_dockerTag}}}"\n - "backend.url=http://{{{backend-service_internalHostname}}}:8080"\n - "env.BUILD_ID={{{build.uuid}}}"\n```\n\n\n**Docker Image Mapping**: When using custom charts, you\'ll need to map `{{{serviceName_dockerImage}}}` or `{{{serviceName_dockerTag}}}` to your chart\'s expected value path. Common patterns include:\n- `image.repository` and `image.tag` (most common)\n- `deployment.image` (single image string)\n- `app.image` or `application.image`\n- Custom paths specific to your chart\n\nCheck your chart\'s `values.yaml` to determine the correct path.\n\n\n\n#### Image Mapping Examples\n\n```yaml filename="lifecycle.yaml"\n# Example 1: Separate repository and tag (most common)\nservices:\n - name: web-api\n helm:\n chart:\n name: "./charts/standard"\n values:\n - "image.repository=registry.com/org/web-api" # Static repository\n - "image.tag={{{web-api_dockerTag}}}" # Dynamic tag only\n\n# Example 2: Combined image string\nservices:\n - name: worker\n helm:\n chart:\n name: "./charts/custom"\n values:\n - "deployment.image={{{worker_dockerImage}}}" # Full image with tag\n\n# Example 3: Nested structure\nservices:\n - name: backend\n helm:\n chart:\n name: "./charts/microservice"\n values:\n - "app.container.image={{{backend_dockerImage}}}" # Full image with tag\n```\n\n\n**Important**: Always use triple braces `{{{variable}}}` instead of double braces `{{variable}}` for Lifecycle template variables. This prevents Helm from trying to process them as Helm template functions and ensures they are passed through correctly for Lifecycle to resolve.\n\n\n### Template Resolution Order\n\n1. Lifecycle resolves `{{{variables}}}` before passing values to Helm\n2. The resolved values are then passed to Helm using `--set` flags\n3. Helm processes its own template functions (if any) after receiving the resolved values\n\n### Example with Service Dependencies\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: api-gateway\n helm:\n chart:\n name: "./charts/gateway"\n values:\n - "config.authServiceUrl=http://{{{auth-service_internalHostname}}}:3000"\n - "config.userServiceUrl=http://{{{user-service_internalHostname}}}:3000"\n - "image.tag={{{api-gateway_dockerTag}}}"\n\n - name: auth-service\n helm:\n chart:\n name: "./charts/microservice"\n values:\n - "image.tag={{{auth-service_dockerTag}}}"\n - "database.host={{{postgres-db_internalHostname}}}"\n```\n\n## Deployment Process\n\n\n 1. **Job Creation**: A Kubernetes job is created in the ephemeral namespace 2.\n **RBAC Setup**: Service account with namespace-scoped permissions is created\n 3. **Git Clone**: Init container clones the repository 4. **Helm Deploy**:\n Main container executes the Helm deployment 5. **Monitoring**: Logs are\n streamed in real-time via WebSocket\n\n\n### Concurrent Deployment Handling\n\nNative Helm automatically handles concurrent deployments by:\n\n- Detecting existing deployment jobs\n- Force-deleting the old job\n- Starting the new deployment\n\nThis ensures the newest deployment always takes precedence.\n\n## Monitoring Deployments\n\n### Deploy Logs Access\n\nFor services using native Helm deployment, you can access deployment logs through the Lifecycle PR comment:\n\n1. Add the `lifecycle-status-comments!` label to your PR\n2. In the status comment that appears, you\'ll see a **Deploy Logs** link for each service using native Helm\n3. Click the link to view real-time deployment logs\n\n### Log Contents\n\nThe deployment logs show:\n\n- Git repository cloning progress (`clone-repo` container)\n- Helm deployment execution (`helm-deploy` container)\n- Real-time streaming of all deployment output\n- Success or failure status\n\n## Chart Types\n\nLifecycle automatically detects and handles three chart types:\n\n| Type | Detection | Features |\n| ------------- | -------------------------------------------- | ---------------------------------------------- |\n| **ORG_CHART** | Matches `orgChartName` AND has `helm.docker` | Docker image injection, env var transformation |\n| **LOCAL** | Name is "local" or starts with "./" or "../" | Flexible `envMapping` support |\n| **PUBLIC** | Everything else | Standard labels and tolerations |\n\n\n The `orgChartName` is configured in the database\'s `global_config` table with\n key `orgChart`. This allows organizations to define their standard internal\n Helm chart.\n\n\n## Troubleshooting\n\n### Deployment Fails with "Another Operation in Progress"\n\n**Symptom**: Helm reports an existing operation is blocking deployment\n\n**Solution**: Native Helm automatically handles this by killing existing jobs. If the issue persists:\n\n```bash\n# Check for stuck jobs\nkubectl get jobs -n env-{uuid} -l service={serviceName}\n\n# Force delete if needed\nkubectl delete job {jobName} -n env-{uuid} --force --grace-period=0\n```\n\n### Environment Variables Not Working\n\n**Symptom**: Environment variables not passed to the deployment\n\n**Common Issues**:\n\n1. `envMapping` placed under `chart` instead of directly under `helm`\n2. Incorrect format specification (array vs map)\n3. Missing path configuration\n\n**Correct Configuration**:\n\n```yaml {4-7}\nhelm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping: # Correct: directly under helm\n app:\n format: "array"\n path: "env"\n```\n\n## Migration Example\n\nHere\'s a complete example showing how to migrate from GitHub-type services to Helm-type services:\n\n### Before: GitHub-type Services\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: "api-gateway"\n github:\n repository: "myorg/api-services"\n branchName: "main"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/api.dockerfile"\n env:\n BACKEND_URL: "{{backend-service_internalHostname}}:3000"\n LOG_LEVEL: "info"\n ENV_NAME: "production"\n ports:\n - 8080\n deployment:\n public: true\n resource:\n cpu:\n request: "100m"\n memory:\n request: "256Mi"\n readiness:\n tcpSocketPort: 8080\n hostnames:\n host: "example.com"\n defaultInternalHostname: "api-gateway-prod"\n defaultPublicUrl: "api.example.com"\n\n - name: "backend-service"\n github:\n repository: "myorg/api-services"\n branchName: "main"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n deployment:\n public: false\n resource:\n cpu:\n request: "50m"\n memory:\n request: "128Mi"\n readiness:\n tcpSocketPort: 3000\n\n - name: "mysql-database"\n docker:\n dockerImage: "mysql"\n defaultTag: "8.0-debian"\n ports:\n - 3306\n env:\n MYSQL_ROOT_PASSWORD: "strongpassword123"\n MYSQL_DATABASE: "app_database"\n MYSQL_USER: "app_user"\n MYSQL_PASSWORD: "apppassword456"\n deployment:\n public: false\n resource:\n cpu:\n request: "100m"\n memory:\n request: "512Mi"\n readiness:\n tcpSocketPort: 3306\n serviceDisks:\n - name: "mysql-data"\n mountPath: "/var/lib/mysql"\n accessModes: "ReadWriteOnce"\n storageSize: "10Gi"\n```\n\n### After: Helm-type Services with Native Deployment\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: "api-gateway"\n helm:\n deploymentMethod: "native" # Enable native Helm\n version: "3.14.0"\n repository: "myorg/api-services"\n branchName: "main"\n args: "--wait --timeout 10m"\n envMapping:\n app:\n format: "array"\n path: "containers.api.env"\n chart:\n name: "./charts/microservices"\n values:\n - \'image.tag="{{{api-gateway_dockerTag}}}"\'\n - "service.type=LoadBalancer"\n - "ingress.enabled=true"\n valueFiles:\n - "deploy/helm/base-values.yaml"\n - "deploy/helm/api-gateway-values.yaml"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/api.dockerfile"\n env:\n BACKEND_URL: "{{backend-service_internalHostname}}:3000"\n LOG_LEVEL: "info"\n ENV_NAME: "production"\n ports:\n - 8080\n\n - name: "backend-service"\n helm:\n deploymentMethod: "native"\n version: "3.14.0"\n repository: "myorg/api-services"\n branchName: "main"\n envMapping:\n app:\n format: "map" # Using map format for this service\n path: "env"\n chart:\n name: "./charts/microservices"\n values:\n - \'image.tag="{{{backend-service_dockerTag}}}"\'\n - "replicaCount=2"\n valueFiles:\n - "deploy/helm/base-values.yaml"\n - "deploy/helm/backend-values.yaml"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n\n - name: "mysql-database"\n helm:\n deploymentMethod: "native"\n repository: "myorg/api-services"\n branchName: "main"\n chart:\n name: "mysql" # Using public Helm chart\n version: "9.14.1"\n repoUrl: "https://charts.bitnami.com/bitnami"\n valueFiles:\n - "deploy/helm/mysql-values.yaml"\n```\n\n### Key Migration Points\n\n1. **Service Type Change**: Changed from `github:` to `helm:` configuration\n2. **Repository Location**: `repository` and `branchName` move from under `github:` to directly under `helm:`\n3. **Deployment Method**: Added `deploymentMethod: "native"` to enable native Helm\n4. **Chart Configuration**: Added `chart:` section with local or public charts\n5. **Environment Mapping**: Added `envMapping:` to control how environment variables are passed\n6. **Helm Arguments**: Added `args:` for Helm command customization\n7. **Docker Configuration**: Kept existing `docker:` config for build process\n\n\n Note that when converting from GitHub-type to Helm-type services, the\n `repository` and `branchName` fields move from being nested under `github:` to\n being directly under `helm:`.\n\n\n\n Many configuration options (like Helm version, args, and chart details) can be\n defined in the `global_config` database table, making the service YAML\n cleaner. Only override when needed.', }, @@ -48,7 +48,7 @@ export const blogContent = [ title: "Service Dependencies", description: "Understand service dependencies, their impact, and configuration.", - date: "2025-02-16", + date: null, path: "docs/features/service-dependencies", body: 'This document will cover `environment.{defaultServices,optionalServices}` and `service.requires`, their differences, impact scope, and usage.\n\n## `environment.{defaultServices,optionalServices}`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ❌ |\n| dev-0\\* | ❌ |\n\nThis represents the default environment that will be created by lifecycle when a pull request is opened in the service repo\\* and does not have any impact on outside repos, dev-0, or any other static environments that use this service.\n\n## `services.requires`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ✅ |\n| dev-0\\* | ✅ |\n\n`services.requires` has an impact across the board; hence, it is important to understand how it works and when we should use them.\n\n**Please read the info blocks below carefully.**\n\nYou can think of `services.requires` as a hard dependency definition. For example, if you have an API service and a database, the API service will have a hard dependency on the database.\nIn this scenario, the database should not be defined as the default service. Instead, we should make the dependency explicitly clear by adding the database to the API’s `requires` block.\nBy doing this, we ensure that any outside repo that wants to use our API service will get the database along with it but only needs to specify the API service in their `defaultServices` or `optionalServices`.\n\n\n Only services defined in `lifecycle.yaml` should be used in the `requires`\n array. If a service is defined in an outside repo, use\n `environment.defaultServices` instead.\n\n\nDo not use services in the `services.requires` if the service itself is not\ndefined in the same lifecycle.yaml.\n\n\n Services defined in the `requires` block will only be resolved 1 level down.\n\n\n**This is a very important nuance, which we get tripped by regularly.**\n\n---\n\n## Examples\n\nTo better illustrate the above statement, consider this example.\n\nRepository A `r-A` has 3 services `s-A`, `s-B`, and `s-C`.\n\n- `s-A` requires `s-B`.\n- `s-B` requires `s-C`.\n\nAs you can see, `s-A` has an indirect dependency on `s-C` through `s-B`.\n\n### Scenario 1: Pull Request in Service repo\\* ✅\n\nWhen we open a pull request in `r-A` repo, lifecycle will deploy 3 services: `s-A`, `s-B`, and `s-C`.\n\n#### Breakdown\n\n- Lifecycle deploys `s-A` and `s-B` because they are defined in `defaultServices`.\n- Services defined in the `requires` block will only be resolved **one level down**.\n- Only services defined in `lifecycle.yaml` should be used in the `requires` array. If a service is defined in an outside repo, use `environment.defaultServices` instead.\n\n```yaml\n# `r-A.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: "s-A"\n - name: "s-B"\n\nservices:\n - name: "s-A"\n requires:\n - name: "s-B"\n helm: ...\n\n - name: "s-B"\n requires:\n - name: "s-C"\n helm: ...\n\n - name: "s-C"\n helm: ...\n```\n\n### Scenario 2: ❌\n\nRepository B `r-B` has service `s-X` and also defines an outside repo `r-A` service `s-A` as `environment.defaultServices`.\n\n```yaml\n# `r-B.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: "s-X"\n - name: "s-A"\n repository: "lifecycle/r-A"\n branch: "main"\n\nservices:\n - name: "s-X"\n helm: ...\n```\n\n#### Breakdown\n\n1. Lifecycle deploys `s-X` and `s-A` because they are defined in `defaultServices`.\n2. Lifecycle deploys `s-B` because it is a 1st level dependency of a service (`s-A`) listed in `defaultServices`.\n3. Lifecycle **does not** deploy `s-C` since it is **not** a 1st level dependency of any service listed in `defaultServices` or `optionalServices`.\n\nThe way this scenario manifests is lifecycle will deploy `s-X`, `s-A`, and `s-B`, but the build will likely **fail** because `s-B` is missing a required dependency `s-C`.\n\n### Solutions\n\nThere are 2 ways to address this depending on your use case.\n\n#### Solution 1\n\nAdd `s-B` to `r-B`’s `environment.defaultServices` block in `r-B.lifecycle.yaml`. In effect, this will make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: "s-X"\n - name: "s-A"\n repository: "lifecycle/r-A"\n branch: "main"\n - name: "s-B"\n repository: "lifecycle/r-A"\n branch: "main"\n```\n\n#### Solution 2\n\nAdd `s-C` to the `services.requires` block of `r-A` in `r-A.lifecycle.yaml`. This will also make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: "s-A"\n - name: "s-B"\n\nservices:\n - name: "s-A"\n requires:\n - name: "s-B"\n - name: "s-C"\n helm: ...\n```\n\n### Choosing the Right Solution\n\nIn summary, the solution you should use depends on how you want your service to be consumed in an outside repo\\*.\n\n- If you want outside repos to explicitly include `s-A` and `s-B`, use **Solution 1**.\n- If you want outside repos to only include `s-A` and let dependencies resolve automatically, use **Solution 2**.\n\n---\n\n### Terminology\n\n- **Service repo**: The repository where `lifecycle.yaml` is defined.\n- **Outside repo**: Another repository referencing it.\n- **dev-0**: Default static environment.', }, @@ -91,7 +91,7 @@ export const blogContent = [ title: "Deploy Issues", description: "Understand how to handle common deploy issues with environments", - date: "2025-03-11", + date: null, path: "docs/troubleshooting/deploy-issues", body: "TODO: This document will cover common deploy issues that you may encounter\n when working with Lifecycle environments.", }, @@ -114,7 +114,7 @@ export const blogContent = [ title: "Lifecycle Full Schema", description: "Lifecycle Schema documentation; this page contains the full schema as defined in lifecycle core—all at once.", - date: "2025-05-31", + date: null, path: "docs/schema/full", body: '## Full Lifecycle Schema\n\nBelow is the full Lifecycle schema as defined in the `lifecycle.yaml` file with basic comments for each item.\n\n```yaml\n# @section environment\nenvironment:\n # @param environment.autoDeploy\n autoDeploy: false\n # @param environment.useGithubStatusComment\n useGithubStatusComment: false\n # @param environment.defaultServices\n defaultServices:\n # @param environment.defaultServices[]\n - # @param environment.defaultServices.name (required)\n name: ""\n # @param environment.defaultServices.repository\n repository: ""\n # @param environment.defaultServices.branch\n branch: ""\n # @param environment.optionalServices\n optionalServices:\n # @param environment.optionalServices[]\n - # @param environment.optionalServices.name (required)\n name: ""\n # @param environment.optionalServices.repository\n repository: ""\n # @param environment.optionalServices.branch\n branch: ""\n\n# @section services\nservices:\n # @param services[]\n - # @param services.name (required)\n name: ""\n # @param services.appShort\n appShort: ""\n # @param services.defaultUUID\n defaultUUID: ""\n # @param services.github\n github:\n # @param services.github.repository (required)\n repository: ""\n # @param services.github.branchName (required)\n branchName: ""\n # @param services.github.docker (required)\n docker:\n # @param services.github.docker.defaultTag (required)\n defaultTag: ""\n # @param services.github.docker.pipelineId\n pipelineId: ""\n # @param services.github.docker.ecr\n ecr: ""\n # @param services.github.docker.app (required)\n app:\n # @param services.github.docker.app.afterBuildPipelineConfig\n afterBuildPipelineConfig:\n # @param services.github.docker.app.afterBuildPipelineConfig.afterBuildPipelineId\n afterBuildPipelineId: ""\n # @param services.github.docker.app.afterBuildPipelineConfig.detatchAfterBuildPipeline\n detatchAfterBuildPipeline: false\n # @param services.github.docker.app.afterBuildPipelineConfig.description\n description: ""\n # @param services.github.docker.app.dockerfilePath (required)\n dockerfilePath: ""\n # @param services.github.docker.app.command\n command: ""\n # @param services.github.docker.app.arguments\n arguments: ""\n # @param services.github.docker.app.env\n env:\n\n # @param services.github.docker.app.ports\n ports:\n # @param services.github.docker.app.ports[]\n - ""\n # @param services.github.docker.init\n init:\n # @param services.github.docker.init.dockerfilePath (required)\n dockerfilePath: ""\n # @param services.github.docker.init.command\n command: ""\n # @param services.github.docker.init.arguments\n arguments: ""\n # @param services.github.docker.init.env\n env:\n\n # @param services.github.docker.builder\n builder:\n # @param services.github.docker.builder.engine\n engine: ""\n # @param services.github.deployment\n deployment:\n # @param services.github.deployment.helm\n helm:\n # @param services.github.deployment.helm.enabled\n enabled: false\n # @param services.github.deployment.helm.chartName\n chartName: ""\n # @param services.github.deployment.helm.chartRepoUrl\n chartRepoUrl: ""\n # @param services.github.deployment.helm.chartVersion\n chartVersion: ""\n # @param services.github.deployment.helm.cmdPs\n cmdPs: ""\n # @param services.github.deployment.helm.action\n action: ""\n # @param services.github.deployment.helm.customValues\n customValues:\n # @param services.github.deployment.helm.customValues[]\n - ""\n # @param services.github.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.github.deployment.helm.customValueFiles[]\n - ""\n # @param services.github.deployment.helm.helmVersion\n helmVersion: ""\n # @param services.github.deployment.helm.attachPvc\n attachPvc:\n # @param services.github.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.github.deployment.helm.attachPvc.mountPath\n mountPath: ""\n # @param services.github.deployment.public\n public: false\n # @param services.github.deployment.capacityType\n capacityType: ""\n # @param services.github.deployment.resource\n resource:\n # @param services.github.deployment.resource.cpu\n cpu:\n # @param services.github.deployment.resource.cpu.request\n request: ""\n # @param services.github.deployment.resource.cpu.limit\n limit: ""\n # @param services.github.deployment.resource.memory\n memory:\n # @param services.github.deployment.resource.memory.request\n request: ""\n # @param services.github.deployment.resource.memory.limit\n limit: ""\n # @param services.github.deployment.readiness\n readiness:\n # @param services.github.deployment.readiness.disabled\n disabled: false\n # @param services.github.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.github.deployment.readiness.httpGet\n httpGet:\n # @param services.github.deployment.readiness.httpGet.path\n path: ""\n # @param services.github.deployment.readiness.httpGet.port\n port: 0\n # @param services.github.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.github.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.github.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.github.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.github.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.github.deployment.hostnames\n hostnames:\n # @param services.github.deployment.hostnames.host\n host: ""\n # @param services.github.deployment.hostnames.acmARN\n acmARN: ""\n # @param services.github.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: ""\n # @param services.github.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: ""\n # @param services.github.deployment.network\n network:\n # @param services.github.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.github.deployment.network.ipWhitelist[]\n - ""\n # @param services.github.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.github.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.github.deployment.network.grpc\n grpc:\n # @param services.github.deployment.network.grpc.enable\n enable: false\n # @param services.github.deployment.network.grpc.host\n host: ""\n # @param services.github.deployment.network.grpc.defaultHost\n defaultHost: ""\n # @param services.github.deployment.serviceDisks\n serviceDisks:\n # @param services.github.deployment.serviceDisks[]\n - # @param services.github.deployment.serviceDisks.name (required)\n name: ""\n # @param services.github.deployment.serviceDisks.mountPath (required)\n mountPath: ""\n # @param services.github.deployment.serviceDisks.accessModes\n accessModes: ""\n # @param services.github.deployment.serviceDisks.storageSize (required)\n storageSize: ""\n # @param services.github.deployment.serviceDisks.medium\n medium: ""\n # @param services.docker\n docker:\n # @param services.docker.dockerImage (required)\n dockerImage: ""\n # @param services.docker.defaultTag (required)\n defaultTag: ""\n # @param services.docker.command\n command: ""\n # @param services.docker.arguments\n arguments: ""\n # @param services.docker.env\n env:\n\n # @param services.docker.ports\n ports:\n # @param services.docker.ports[]\n - ""\n # @param services.docker.deployment\n deployment:\n # @param services.docker.deployment.helm\n helm:\n # @param services.docker.deployment.helm.enabled\n enabled: false\n # @param services.docker.deployment.helm.chartName\n chartName: ""\n # @param services.docker.deployment.helm.chartRepoUrl\n chartRepoUrl: ""\n # @param services.docker.deployment.helm.chartVersion\n chartVersion: ""\n # @param services.docker.deployment.helm.cmdPs\n cmdPs: ""\n # @param services.docker.deployment.helm.action\n action: ""\n # @param services.docker.deployment.helm.customValues\n customValues:\n # @param services.docker.deployment.helm.customValues[]\n - ""\n # @param services.docker.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.docker.deployment.helm.customValueFiles[]\n - ""\n # @param services.docker.deployment.helm.helmVersion\n helmVersion: ""\n # @param services.docker.deployment.helm.attachPvc\n attachPvc:\n # @param services.docker.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.docker.deployment.helm.attachPvc.mountPath\n mountPath: ""\n # @param services.docker.deployment.public\n public: false\n # @param services.docker.deployment.capacityType\n capacityType: ""\n # @param services.docker.deployment.resource\n resource:\n # @param services.docker.deployment.resource.cpu\n cpu:\n # @param services.docker.deployment.resource.cpu.request\n request: ""\n # @param services.docker.deployment.resource.cpu.limit\n limit: ""\n # @param services.docker.deployment.resource.memory\n memory:\n # @param services.docker.deployment.resource.memory.request\n request: ""\n # @param services.docker.deployment.resource.memory.limit\n limit: ""\n # @param services.docker.deployment.readiness\n readiness:\n # @param services.docker.deployment.readiness.disabled\n disabled: false\n # @param services.docker.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.docker.deployment.readiness.httpGet\n httpGet:\n # @param services.docker.deployment.readiness.httpGet.path\n path: ""\n # @param services.docker.deployment.readiness.httpGet.port\n port: 0\n # @param services.docker.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.docker.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.docker.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.docker.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.docker.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.docker.deployment.hostnames\n hostnames:\n # @param services.docker.deployment.hostnames.host\n host: ""\n # @param services.docker.deployment.hostnames.acmARN\n acmARN: ""\n # @param services.docker.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: ""\n # @param services.docker.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: ""\n # @param services.docker.deployment.network\n network:\n # @param services.docker.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.docker.deployment.network.ipWhitelist[]\n - ""\n # @param services.docker.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.docker.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.docker.deployment.network.grpc\n grpc:\n # @param services.docker.deployment.network.grpc.enable\n enable: false\n # @param services.docker.deployment.network.grpc.host\n host: ""\n # @param services.docker.deployment.network.grpc.defaultHost\n defaultHost: ""\n # @param services.docker.deployment.serviceDisks\n serviceDisks:\n # @param services.docker.deployment.serviceDisks[]\n - # @param services.docker.deployment.serviceDisks.name (required)\n name: ""\n # @param services.docker.deployment.serviceDisks.mountPath (required)\n mountPath: ""\n # @param services.docker.deployment.serviceDisks.accessModes\n accessModes: ""\n # @param services.docker.deployment.serviceDisks.storageSize (required)\n storageSize: ""\n # @param services.docker.deployment.serviceDisks.medium\n medium: ""\n```', }, diff --git a/src/pages/articles/introduction.mdx b/src/pages/articles/introduction.mdx index f59974e..9a75ebf 100644 --- a/src/pages/articles/introduction.mdx +++ b/src/pages/articles/introduction.mdx @@ -4,7 +4,6 @@ tags: - intro - lifecycle - core -date: "2025-05-23" --- import { Link } from "next/link"; diff --git a/src/pages/docs/what-is-lifecycle.mdx b/src/pages/docs/what-is-lifecycle.mdx index 48f5446..3cfe53d 100644 --- a/src/pages/docs/what-is-lifecycle.mdx +++ b/src/pages/docs/what-is-lifecycle.mdx @@ -5,7 +5,6 @@ tags: - core - lifecycle - intro -date: "2025-03-12" --- import { Image } from "@lifecycle-docs/components"; diff --git a/src/pages/index.mdx b/src/pages/index.mdx index 637607e..80973e0 100644 --- a/src/pages/index.mdx +++ b/src/pages/index.mdx @@ -9,13 +9,7 @@ import { Info, Sparkle } from "lucide-react"; import { buttonVariants } from "@lifecycle-docs/components/ui/button"; import { Iframe } from "@lifecycle-docs/components"; import { Separator } from "@lifecycle-docs/components/ui/separator"; -import { - Bg, - Services, - Static, - LatestPosts, - Main, -} from "@lifecycle-docs/components/home"; +import { Bg, Services, Static, Main } from "@lifecycle-docs/components/home";
@@ -27,4 +21,3 @@ import {
- From 6189978a4b899bfc0003b06134e413cbdfd48df1 Mon Sep 17 00:00:00 2001 From: vigneshrajsb Date: Sat, 17 Jan 2026 14:19:52 -0800 Subject: [PATCH 2/6] more cleanup --- src/pages/_meta.ts | 2 +- src/pages/articles/_meta.ts | 2 +- src/pages/docs/_meta.ts | 2 +- src/pages/docs/features/_meta.ts | 2 +- src/pages/docs/features/auto-deployment.mdx | 1 - src/pages/docs/features/native-helm-deployment.mdx | 1 - src/pages/docs/features/service-dependencies.mdx | 1 - src/pages/docs/features/webhooks.mdx | 1 - src/pages/docs/getting-started/_meta.ts | 2 +- src/pages/docs/schema/_meta.ts | 2 +- src/pages/docs/schema/full.mdx | 1 - src/pages/docs/schema/index.mdx | 1 - src/pages/docs/setup/_meta.ts | 2 +- src/pages/docs/tips/_meta.ts | 2 +- src/pages/docs/troubleshooting/_meta.ts | 2 +- src/pages/docs/troubleshooting/deploy-issues.mdx | 1 - 16 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/pages/_meta.ts b/src/pages/_meta.ts index f4b9861..f81fbda 100644 --- a/src/pages/_meta.ts +++ b/src/pages/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/articles/_meta.ts b/src/pages/articles/_meta.ts index fe54bb5..212ed3b 100644 --- a/src/pages/articles/_meta.ts +++ b/src/pages/articles/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/_meta.ts b/src/pages/docs/_meta.ts index 7bc67de..3a1ac69 100644 --- a/src/pages/docs/_meta.ts +++ b/src/pages/docs/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/features/_meta.ts b/src/pages/docs/features/_meta.ts index fe87c16..8de6f0f 100644 --- a/src/pages/docs/features/_meta.ts +++ b/src/pages/docs/features/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/features/auto-deployment.mdx b/src/pages/docs/features/auto-deployment.mdx index cf5c095..67160be 100644 --- a/src/pages/docs/features/auto-deployment.mdx +++ b/src/pages/docs/features/auto-deployment.mdx @@ -7,7 +7,6 @@ tags: - labels - lifecycle-deploy - lifecycle-disabled -date: "2025-01-29" --- ## Auto-Deploy Configuration diff --git a/src/pages/docs/features/native-helm-deployment.mdx b/src/pages/docs/features/native-helm-deployment.mdx index 9d13369..bcd5383 100644 --- a/src/pages/docs/features/native-helm-deployment.mdx +++ b/src/pages/docs/features/native-helm-deployment.mdx @@ -6,7 +6,6 @@ tags: - deployment - kubernetes - native -date: "2025-01-29" --- import { Callout, Steps } from "nextra/components"; diff --git a/src/pages/docs/features/service-dependencies.mdx b/src/pages/docs/features/service-dependencies.mdx index b50dd6c..1b0f7ff 100644 --- a/src/pages/docs/features/service-dependencies.mdx +++ b/src/pages/docs/features/service-dependencies.mdx @@ -7,7 +7,6 @@ tags: - configuration - defaultServices - optionalServices -date: "2025-02-16" --- import { Callout } from "nextra/components"; diff --git a/src/pages/docs/features/webhooks.mdx b/src/pages/docs/features/webhooks.mdx index 6825781..6edcb5a 100644 --- a/src/pages/docs/features/webhooks.mdx +++ b/src/pages/docs/features/webhooks.mdx @@ -8,7 +8,6 @@ tags: - lifecycle - docker - command -date: "2025-02-16" --- import { Callout } from "nextra/components"; diff --git a/src/pages/docs/getting-started/_meta.ts b/src/pages/docs/getting-started/_meta.ts index 5f80bfb..989b7bd 100644 --- a/src/pages/docs/getting-started/_meta.ts +++ b/src/pages/docs/getting-started/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/schema/_meta.ts b/src/pages/docs/schema/_meta.ts index 32bed56..9c0fd09 100644 --- a/src/pages/docs/schema/_meta.ts +++ b/src/pages/docs/schema/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/schema/full.mdx b/src/pages/docs/schema/full.mdx index 0a076db..921185e 100644 --- a/src/pages/docs/schema/full.mdx +++ b/src/pages/docs/schema/full.mdx @@ -5,7 +5,6 @@ navtext: All at once tags: - schema - lifecycle -date: "2025-05-31" --- ## Full Lifecycle Schema diff --git a/src/pages/docs/schema/index.mdx b/src/pages/docs/schema/index.mdx index 062b776..7637e53 100644 --- a/src/pages/docs/schema/index.mdx +++ b/src/pages/docs/schema/index.mdx @@ -5,7 +5,6 @@ navtext: Section by section tags: - schema - lifecycle -date: "2025-03-28" --- import Link from "next/link"; diff --git a/src/pages/docs/setup/_meta.ts b/src/pages/docs/setup/_meta.ts index 8856cc3..b8cfa19 100644 --- a/src/pages/docs/setup/_meta.ts +++ b/src/pages/docs/setup/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/tips/_meta.ts b/src/pages/docs/tips/_meta.ts index 126d7e7..6270149 100644 --- a/src/pages/docs/tips/_meta.ts +++ b/src/pages/docs/tips/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/troubleshooting/_meta.ts b/src/pages/docs/troubleshooting/_meta.ts index 4bf05a7..aa85da6 100644 --- a/src/pages/docs/troubleshooting/_meta.ts +++ b/src/pages/docs/troubleshooting/_meta.ts @@ -1,5 +1,5 @@ /** - * Copyright 2025 GoodRx, Inc. + * Copyright 2026 GoodRx, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src/pages/docs/troubleshooting/deploy-issues.mdx b/src/pages/docs/troubleshooting/deploy-issues.mdx index 5a964de..7d46066 100644 --- a/src/pages/docs/troubleshooting/deploy-issues.mdx +++ b/src/pages/docs/troubleshooting/deploy-issues.mdx @@ -7,7 +7,6 @@ tags: - error - todo - codefresh -date: "2025-03-11" --- import { Callout } from "nextra/components"; From 64d83c150bf020433e51108848c52292db54bc38 Mon Sep 17 00:00:00 2001 From: vigneshrajsb Date: Sat, 17 Jan 2026 14:45:54 -0800 Subject: [PATCH 3/6] cleanup schema docs and blog content --- .gitignore | 2 - .husky/pre-commit | 2 - package.json | 6 +- scripts/generateAllContent.ts | 131 --- scripts/generateSectionData.ts | 90 -- src/lib/static/blogcontent/blogcontent.json | 170 ---- src/lib/static/blogcontent/blogcontent.ts | 178 ---- src/pages/docs/_meta.ts | 6 - src/pages/docs/schema/_meta.ts | 24 - src/pages/docs/schema/full.mdx | 348 -------- src/pages/docs/schema/index.mdx | 926 -------------------- 11 files changed, 2 insertions(+), 1881 deletions(-) delete mode 100755 scripts/generateAllContent.ts delete mode 100755 scripts/generateSectionData.ts delete mode 100644 src/lib/static/blogcontent/blogcontent.json delete mode 100644 src/lib/static/blogcontent/blogcontent.ts delete mode 100644 src/pages/docs/schema/_meta.ts delete mode 100644 src/pages/docs/schema/full.mdx delete mode 100644 src/pages/docs/schema/index.mdx diff --git a/.gitignore b/.gitignore index 502cd62..62b1a12 100644 --- a/.gitignore +++ b/.gitignore @@ -131,8 +131,6 @@ dist .yarn/install-state.gz .pnp.* -src/pages/docs/schema/yaml - src/lib/data src/pages/tags diff --git a/.husky/pre-commit b/.husky/pre-commit index 9537097..4687701 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -5,8 +5,6 @@ bun run build:prep echo "Staging changes from build:prep..." # add all page changes after build:prep git add src/pages/articles/*.mdx src/pages/docs/*.mdx -# add all static changes after build:prep -git add src/lib/static echo "Running lint-staged..." lint-staged echo "Pre-commit hooks completed successfully. ✨" diff --git a/package.json b/package.json index a80daa2..71423e7 100644 --- a/package.json +++ b/package.json @@ -8,10 +8,8 @@ "build:styles": "tailwindcss -i ./src/styles/globals.css -o public/styles.css", "build:meta": "bun run ./scripts/generateMeta.ts", "build:tags": "bun run ./scripts/generateTagPages.ts", - "build:sectiondata": "bun run ./scripts/generateSectionData.ts", - "build:prep": "bun run clean && bun run build:tags && bun run build:meta && bun run build:sectiondata && bun run sync:content", - "sync:content": "bun run ./scripts/generateAllContent.ts", - "clean": "rimraf src/pages/schema src/pages/tags src/lib/data", + "build:prep": "bun run clean && bun run build:tags && bun run build:meta", + "clean": "rimraf src/pages/tags src/lib/data", "dev": "bun run build:prep && next dev -p 3333", "deploy": "bun run build && touch out/.nojekyll", "start": "next start", diff --git a/scripts/generateAllContent.ts b/scripts/generateAllContent.ts deleted file mode 100755 index f31b8fc..0000000 --- a/scripts/generateAllContent.ts +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { readFileSync, existsSync, mkdirSync, writeFileSync } from "node:fs"; -import { dirname, relative } from "node:path"; -import matter from "gray-matter"; -import { sync } from "fast-glob"; -import { Command } from "commander"; - -export const findMdxFiles = (dir, ignorePatterns) => { - const globPattern = `${dir}/**/*.mdx`; - return sync(globPattern, { ignore: ignorePatterns }); -}; - -export const extractBodyText = (content) => { - content = content.replace(/import\s.+from\s.+;\n?/g, ""); - content = content.replace(/export\s.+;/g, ""); - - content = content.replace(/<[^>]+>/g, ""); - return content.trim(); -}; - -export const extractFileContent = (filePath, baseDir) => { - const content = readFileSync(filePath, "utf8"); - const { - data: { title = null, description = null, date = null }, - content: rawBody, - } = matter(content); - - const body = extractBodyText(rawBody); - - const path = relative(baseDir, filePath).replace(/\.mdx$/, ""); - - return { - title, - description, - date, - path, - body, - }; -}; - -export const ensureDirectoryExists = (dir) => { - if (!existsSync(dir)) mkdirSync(dir, { recursive: true }); -}; - -export const organizeFileContent = ( - inputDir, - outputFilePath, - ignore, - debug, -) => { - if (debug) console.log(`Scanning directory: ${inputDir}`); - const mdxFiles = findMdxFiles(inputDir, ignore).filter( - (filePath) => !filePath.includes("/tags/"), - ); - - if (debug) - console.log( - `Found ${mdxFiles.length} MDX files (after applying ignore and exclude patterns)`, - ); - - const fileData = mdxFiles.map((file) => extractFileContent(file, inputDir)); - - if (debug) console.log(`Processed content for ${fileData.length} files`); - - const outputDir = dirname(outputFilePath); - ensureDirectoryExists(outputDir); - const content = JSON.stringify(fileData, null, 2); - const outputData = `export const blogContent = ${content};\n`; - writeFileSync(outputFilePath, outputData, "utf8"); - const outJSON = outputFilePath.replace(".ts", ".json"); - console.log(`File content saved to ${outJSON}`); - writeFileSync(outJSON, content, "utf8"); - if (debug) console.log(`File content saved to ${outputFilePath}`); -}; - -export const actionGenerateContent = (options) => { - const { input, output, ignore, debug } = options; - - if (!input || !output) { - console.error("Error: Both --input and --output options are required."); - process.exit(1); - } - - try { - organizeFileContent(input, output, ignore, debug); - } catch (error) { - console.error("Error:", error.message); - process.exit(1); - } -}; - -const program = new Command(); - -program - .name("extract-file-content") - .description("Extract body text from MDX files and save it as a JSON file.") - .option( - "-i, --input ", - "Input directory to scan for MDX files", - "src/pages", - ) - .option( - "-o, --output ", - "Output file path (JSON)", - "src/lib/static/blogcontent/blogcontent.ts", - ) - .option( - "--ignore ", - "Comma-separated list of glob patterns to ignore", - (val) => val.split(","), - ["**/index.mdx", "src/pages/tags/**"], - ) - .option("-d, --debug", "Enable debug logging", false) - .action(actionGenerateContent); - -program.parse(process.argv); diff --git a/scripts/generateSectionData.ts b/scripts/generateSectionData.ts deleted file mode 100755 index a9e4bee..0000000 --- a/scripts/generateSectionData.ts +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2025 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { - existsSync, - mkdirSync, - readdirSync, - readFileSync, - statSync, - writeFileSync, -} from "node:fs"; -import { join, relative } from "node:path"; -import matter from "gray-matter"; -import { Command } from "commander"; - -const program = new Command(); - -async function generateDataFiles( - directoryPath: string, - outDir: string, - isDebugging: boolean, -): Promise { - if (isDebugging) console.log(`Processing directory: ${directoryPath}`); - const files = readdirSync(directoryPath); - - const data: { name: string; title: string; description: string }[] = []; - - await Promise.all( - files.map(async (file) => { - const filePath = join(directoryPath, file); - const stats = statSync(filePath); - - if (stats.isDirectory()) { - if (isDebugging) console.log(`Processing directory: ${filePath}`); - await generateDataFiles(filePath, outDir, isDebugging); - } else if (file.endsWith(".mdx")) { - if (isDebugging) console.log(`Processing file: ${filePath}`); - const fileContent = readFileSync(filePath, "utf8"); - const { data: frontmatter } = matter(fileContent); - data.push({ - name: file.replace(".mdx", ""), - title: frontmatter.title, - description: frontmatter.description, - }); - } - }), - ); - - if (data.length > 0) { - const relativeDirPath = relative(program.opts().dir, directoryPath); - const outputDirPath = join(outDir, relativeDirPath); - if (!existsSync(outputDirPath)) { - mkdirSync(outputDirPath, { recursive: true }); - } - - const dataFilePath = join(outputDirPath, `section.data.ts`); - - const dataContent = `export default ${JSON.stringify(data, null, 2)};`; - writeFileSync(dataFilePath, dataContent); - - if (isDebugging) console.log(`Generated data file: ${dataFilePath}`); - } -} - -program - .option("-d, --dir ", "directory of pages", "./src/pages") - .option("-o, --out ", "output directory", "./src/lib/data") - .option("--debug", "enable debug logs", false); - -program.parse(process.argv); - -const { dir, debug, out } = program.opts(); - -(async () => { - await generateDataFiles(dir, out, debug); - console.log("Meta files generated successfully!"); -})(); diff --git a/src/lib/static/blogcontent/blogcontent.json b/src/lib/static/blogcontent/blogcontent.json deleted file mode 100644 index 1f37d98..0000000 --- a/src/lib/static/blogcontent/blogcontent.json +++ /dev/null @@ -1,170 +0,0 @@ -[ - { - "title": "Introducing Lifecycle", - "description": null, - "date": null, - "path": "articles/introduction", - "body": "We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like \"Is anyone using staging?\" \"Staging is broken again,\" and \"Who just overwrote my changes?\" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn't scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we're thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and \"aha\" moments, and we can't wait to see how you'll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who'll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/M5fhHJuEX8) to connect!!" - }, - { - "title": "What is Lifecycle?", - "description": "Lifecycle is your effortless way to test and create ephemeral environments", - "date": null, - "path": "docs/what-is-lifecycle", - "body": "Lifecycle is an **ephemeral** _(/əˈfem(ə)rəl/, lasting for a very short time)_ environment orchestrator that transforms your GitHub pull requests into fully functional development environments. It enables developers to test, validate, and collaborate on features without the hassle of managing infrastructure manually.\n\n> With **Lifecycle**, every pull request gets its own connected playground—ensuring that changes can be previewed, integrated, and verified before merging into its main branch.\n\n## A Developer’s Story\n\nImagine working in an organization that develops multiple services. Managing and testing changes across these services can be challenging, especially when multiple developers are working on different features simultaneously.\n\nMeet **Nick Holiday 👨‍💻**, an engineer who needs to update a database schema and modify the corresponding API in a backend service. Additionally, his change requires frontend service updates to display the new data correctly.\n\n### Traditional Workflow Challenges\n\n- **Shared environments** – Nick deploys his backend service changes to a shared dev or staging environment, but another engineer is testing unrelated changes at the same time.\n- **Conflicting updates** – The frontend engineers working on the UI might face issues if their code depends on a stable backend service that keeps changing.\n- **Environment management** – Setting up and maintaining an isolated environment for testing requires significant effort.\n\n### Enter Lifecycle\n\nWith Lifecycle, as soon as Nick opens a pull request, the system automatically:\n\n1. 🏗️ **Creates an isolated development environment** – This environment includes Nick’s updated backend service along with the necessary frontend services.\n2. 🚀 **Deploys the application** – Everything is set up exactly as it would be in production, ensuring a reliable test scenario.\n3. 🔗 **Generates a shareable URL** – Nick and his teammates can interact with the new features without setting up anything locally.\n4. 🧹 **Cleans up automatically** – Once the PR is merged or closed, Lifecycle removes the environment, keeping things tidy.\n\n## Watch a Quick Demo\n\n\n\n## How It Works\n\n\n\n## Why Use Lifecycle?\n\n- **Faster Feedback Loops** - Get instant previews of your changes without waiting for staging deployments.\n- **Isolation** - Each PR runs in its own sandbox, preventing conflicts.\n- **Seamless Collaboration** - Share URLs with stakeholders, designers, or QA engineers.\n- **Automatic Cleanup** - No more stale test environments; Lifecycle manages cleanup for you.\n- **Works with Your Stack** - Supports containerized applications and integrates with Kubernetes." - }, - { - "title": "Auto Deploy & Labels", - "description": "How to setup auto deploy for pull requests and control envionment with labels", - "date": null, - "path": "docs/features/auto-deployment", - "body": "## Auto-Deploy Configuration\n\nTo enable **automatic deployment** when a PR is created, set the `autoDeploy` attribute in your repository's `lifecycle.yaml` file:\n\n```yaml {2} filename=\"lifecycle.yaml\"\nenvironment:\n autoDeploy: true\n defaultServices:\n```\n\n- Lifecycle will **automatically create** the environment as soon as a PR is opened.\n- A `lifecycle-deploy!` label will be added to the PR to indicate that the environment has been deployed.\n\n---\n\n## Managing Deployments with Labels\n\nIf **auto-deploy is not enabled**, you can manually control the environment using PR labels.\n\n### Deploy an Environment\n\nTo create an ephemeral environment for a PR, **add** the `lifecycle-deploy!` label.\n\n### Tear Down an Environment\n\nTo **delete** an active environment, use either of these labels:\n\n- **Remove** `lifecycle-deploy!`\n- **Add** `lifecycle-disabled!`\n\n---\n\n## Automatic Cleanup on PR Closure\n\nWhen a PR is **closed**, Lifecycle will:\n\n1. **Tear down** the active environment.\n2. **Remove** the `lifecycle-deploy!` label from the PR.\n\nThis ensures that unused environments do not persist after the PR lifecycle is complete.\n\n---\n\n## Summary\n\n| Feature | Behavior |\n| ---------------------------- | ----------------------------------------------- |\n| `autoDeploy: true` in config | PR environments are **automatically** deployed. |\n| `lifecycle-deploy!` | **Manually deploy** an environment. |\n| Remove `lifecycle-deploy!` | **Tear down** the environment. |\n| Add `lifecycle-disabled!` | **Tear down** the environment manually. |\n| PR closed | **Environment is deleted automatically**. |\n\nUsing these configurations and labels, teams can efficiently manage **ephemeral environments** in their development workflow." - }, - { - "title": "Webhooks", - "description": null, - "date": null, - "path": "docs/features/webhooks", - "body": "Lifecycle can invoke **third-party services** when a build state changes.\n\nWebhooks allow users to automate external processes such as running tests, performing cleanup tasks, or sending notifications based on environment build states.\n\n## Supported Types\n\nLifecycle supports three types of webhooks:\n\n1. **`codefresh`** - Trigger Codefresh pipelines\n2. **`docker`** - Execute Docker images as Kubernetes jobs\n3. **`command`** - Run shell commands in a specified Docker image\n\n## Common Use Cases\n\n- When a build status is `deployed`, trigger **end-to-end tests**.\n- When a build status is `error`, trigger **infrastructure cleanup** or alert the team.\n- Run **security scans** on built containers.\n- Execute **database migrations** after deployment.\n- Send **notifications** to Slack, Discord, or other communication channels.\n- Perform **smoke tests** using custom test containers.\n\n## Configuration\n\nWebhooks are defined in the `lifecycle.yaml` under the `environment.webhooks` section.\n\nBelow is an example configuration for triggering end-to-end tests when the `deployed` state is reached.\n\n## Examples\n\n### `codefresh`\n\nThe `codefresh` type triggers existing Codefresh pipelines when build states change.\n\n```yaml\n# Trigger End-to-End Tests on Deployment\nenvironment:\n # ...\n defaultServices:\n - name: \"frontend\"\n optionalServices:\n - name: \"backend\"\n repository: \"lifecycle/backend\"\n branch: \"main\"\n webhooks:\n - state: deployed\n type: codefresh\n name: \"End to End Tests\"\n pipelineId: 64598362453cc650c0c9cd4d\n trigger: tests\n env:\n branch: \"{{frontend_branchName}}\"\n TEST_URL: \"https://{{frontend_publicUrl}}\"\n # ...\n```\n\n- **`state: deployed`** → Triggers the webhook when the build reaches the `deployed` state.\n- **`type: codefresh`** → Specifies that this webhook triggers a **Codefresh pipeline**.\n- **`name`** → A human-readable name for the webhook.\n- **`pipelineId`** → The unique Codefresh pipeline ID.\n- **`trigger`** → Codefresh pipeline's trigger to execute.\n- **`env`** → Passes relevant environment variables (e.g., `branch` and `TEST_URL`).\n\n---\n\n```yaml\n# Trigger Cleanup on Build Error\nenvironment:\n # ...\n webhooks:\n - state: error\n type: codefresh\n name: \"Cleanup Failed Deployment\"\n pipelineId: 74283905723ff650c0d9ab7e\n trigger: cleanup\n env:\n branch: \"{{frontend_branchName}}\"\n CLEANUP_TARGET: \"frontend\"\n # ...\n```\n\n- **`state: error`** → Triggers the webhook when the build fails.\n- **`type: codefresh`** → Invokes a Codefresh cleanup pipeline.\n- **`trigger: cleanup`** → Codefresh pipeline's trigger to execute.\n- **`env`** → Includes necessary variables, such as `branch` and `CLEANUP_TARGET`.\n\n### `docker`\n\nThe `docker` type allows you to execute any Docker image as a Kubernetes job when build states change.\n\n\n Docker webhooks run as Kubernetes jobs in the same namespace as your build.\n They have a default timeout of 30 minutes and resource limits of 200m CPU and\n 1Gi memory.\n\n\n```yaml\n# Run E2E Tests in Custom Container\nenvironment:\n # ...\n webhooks:\n - name: \"E2E Test Suite\"\n description: \"Execute comprehensive E2E tests\"\n state: deployed\n type: docker\n docker:\n image: \"myorg/e2e-tests:latest\"\n command: [\"npm\", \"run\", \"e2e\"]\n timeout: 1200 # 1 hour (optional, default: 1800 seconds)\n env:\n BASE_URL: \"https://{{frontend_publicUrl}}\"\n ENVIRONMENT: \"ephemeral\"\n```\n\n- **`docker.image`** → Docker image to execute (required)\n- **`docker.command`** → Override the default entrypoint (optional)\n- **`docker.args`** → Arguments to pass to the command (optional)\n- **`docker.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n### `command`\n\nThe `command` type is a simplified version of Docker webhooks, ideal for running shell scripts or simple commands.\n\n```yaml\n# Slack Notification Example\nenvironment:\n # ...\n webhooks:\n - name: \"Deployment Notification\"\n description: \"Notify team of successful deployment\"\n state: deployed\n type: command\n command:\n image: \"alpine:latest\"\n script: |\n apk add --no-cache curl\n curl -X POST \"$WEBHOOK_URL\" \\\n -H \"Content-Type: application/json\" \\\n -d \"{\\\"text\\\":\\\"🚀 Deployed $SERVICE_NAME to $DEPLOY_URL\\\"}\"\n timeout: 300 # 5 minutes (optional)\n env:\n WEBHOOK_URL: \"https://hooks.slack.com/services/XXX/YYY/ZZZ\"\n SERVICE_NAME: \"{{frontend_internalHostname}}\"\n DEPLOY_URL: \"https://{{frontend_publicUrl}}\"\n```\n\n\n Make sure to replace placeholder values like webhook URLs and pipeline IDs\n with your actual values.\n\n\n- **`command.image`** → Docker image to run the script in (required)\n- **`command.script`** → Shell script to execute (required)\n- **`command.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n## Trigger states\n\nWebhooks can be triggered on the following build states:\n\n- **`deployed`** → Service successfully deployed and running\n- **`error`** → Build or deployment failed\n- **`torn_down`** → Environment has been destroyed\n\n## Note\n\n- All webhooks for the same state are executed **serially** in the order defined.\n- Webhook failures do not affect the build status.\n- Webhook invocations can be viewed at `/builds/[uuid]/webhooks` page(latest 20 invocations). Use the API to view all invocations.\n- `docker` and `command` type's logs are not streamed when the job is still in progress and are available only after the job completes." - }, - { - "title": "Native Helm Deployment", - "description": "Deploy services using Helm directly in Kubernetes without external CI/CD dependencies", - "date": null, - "path": "docs/features/native-helm-deployment", - "body": "This feature is still in alpha and might change with breaking changes.\n\n\n**Native Helm** is an alternative deployment method that runs Helm deployments directly within Kubernetes jobs, eliminating the need for external CI/CD systems. This provides a more self-contained and portable deployment solution.\n\n\n Native Helm deployment is an opt-in feature that can be enabled globally or\n per-service.\n\n\n## Overview\n\nWhen enabled, Native Helm:\n\n- Creates Kubernetes jobs to execute Helm deployments\n- Runs in ephemeral namespaces with proper RBAC\n- Provides real-time deployment logs via WebSocket\n- Handles concurrent deployments automatically\n- Supports all standard Helm chart types\n\n## Quickstart\n\nWant to try native Helm deployment? Here's the fastest way to get started:\n\n```yaml filename=\"lifecycle.yaml\" {5}\nservices:\n - name: my-api\n defaultUUID: \"dev-0\"\n helm:\n deploymentMethod: \"native\" # That's it!\n chart:\n name: \"local\"\n valueFiles:\n - \"./helm/values.yaml\"\n```\n\nThis configuration:\n\n1. Enables native Helm for the `my-api` service\n2. Uses a local Helm chart from your repository\n3. Applies values from `./helm/values.yaml`\n4. Runs deployment as a Kubernetes job\n\n\n To enable native Helm for all services at once, see [Global\n Configuration](#enabling-native-helm).\n\n\n## Configuration\n\n### Enabling Native Helm\n\nThere are two ways to enable native Helm deployment:\n\n#### Per Service Configuration\n\nEnable native Helm for individual services:\n\n```yaml {4} filename=\"lifecycle.yaml\"\nservices:\n - name: my-service\n helm:\n deploymentMethod: \"native\" # Enable for this service only\n chart:\n name: my-chart\n```\n\n#### Global Configuration\n\nEnable native Helm for all services:\n\n```yaml {3} filename=\"lifecycle.yaml\"\nhelm:\n nativeHelm:\n enabled: true # Enable for all services\n```\n\n### Configuration Precedence\n\nLifecycle uses a hierarchical configuration system with three levels of precedence:\n\n1. **helmDefaults** - Base defaults for all deployments (database: `global_config` table)\n2. **Chart-specific config** - Per-chart defaults (database: `global_config` table)\n3. **Service YAML config** - Service-specific overrides (highest priority)\n\n\n Service-level configuration always takes precedence over global defaults.\n\n\n### Global Configuration (Database)\n\nGlobal configurations are stored in the `global_config` table in the database. Each configuration is stored as a row with:\n\n- **key**: The configuration name (e.g., 'helmDefaults', 'postgresql', 'redis')\n- **config**: JSON object containing the configuration\n\n#### helmDefaults Configuration\n\nStored in database with key `helmDefaults`:\n\n```json\n{\n \"nativeHelm\": {\n \"enabled\": true,\n \"defaultArgs\": \"--wait --timeout 30m\",\n \"defaultHelmVersion\": \"3.12.0\"\n }\n}\n```\n\n**Field Descriptions**:\n\n- `enabled`: When `true`, enables native Helm deployment for all services unless they explicitly set `deploymentMethod: \"ci\"`\n- `defaultArgs`: Arguments automatically appended to every Helm command (appears before service-specific args)\n- `defaultHelmVersion`: The Helm version to use when not specified at the service or chart level\n\n#### Chart-specific Configuration\n\nExample: PostgreSQL configuration stored with key `postgresql`:\n\n```json\n{\n \"version\": \"3.13.0\",\n \"args\": \"--force --timeout 60m0s --wait\",\n \"chart\": {\n \"name\": \"postgresql\",\n \"repoUrl\": \"https://charts.bitnami.com/bitnami\",\n \"version\": \"12.9.0\",\n \"values\": [\"auth.username=postgres_user\", \"auth.database=postgres_db\"]\n }\n}\n```\n\n\n These global configurations are managed by administrators and stored in the\n database. They provide consistent defaults across all environments and can be\n overridden at the service level.\n\n\n## Usage Examples\n\n### Quick Experiment: Deploy Jenkins!\n\nWant to see native Helm in action? Let's deploy everyone's favorite CI/CD tool - Jenkins! This example shows how easy it is to deploy popular applications using native Helm.\n\n```yaml filename=\"lifecycle.yaml\"\nenvironment:\n defaultServices:\n - name: \"my-app\"\n - name: \"jenkins\" # Add Jenkins to your default services\n\nservices:\n - name: \"jenkins\"\n helm:\n chart:\n name: \"jenkins\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n version: \"13.6.8\"\n values:\n - \"service.type=ClusterIP\"\n - \"ingress.enabled=true\"\n - \"ingress.hostname={{jenkins_publicUrl}}\"\n - \"ingress.ingressClassName=nginx\"\n```\n\n\n 🎉 That's it! With just a few lines of configuration, you'll have Jenkins\n running in your Kubernetes cluster.\n\n\nTo access your Jenkins instance:\n\n1. Check the deployment status in your PR comment\n2. Click the **Deploy Logs** link to monitor the deployment\n3. Once deployed, Jenkins will be available at the internal hostname\n\n\n For more Jenkins configuration options and values, check out the [Bitnami\n Jenkins chart\n documentation](https://github.com/bitnami/charts/tree/main/bitnami/jenkins).\n This same pattern works for any Bitnami chart (PostgreSQL, Redis, MongoDB) or\n any other public Helm chart!\n\n\n### Basic Service Deployment\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: web-api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: web-app\n version: \"1.2.0\"\n```\n\n### PostgreSQL with Overrides\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: database\n helm:\n deploymentMethod: \"native\"\n version: \"3.14.0\" # Override Helm version\n args: \"--atomic\" # Override deployment args\n chart:\n name: postgresql\n values: # Additional values merged with defaults\n - \"persistence.size=20Gi\"\n - \"replicaCount=2\"\n```\n\n### Custom Environment Variables\n\nLifecycle supports flexible environment variable formatting through the `envMapping` configuration. This feature allows you to control how environment variables from your service configuration are passed to your Helm chart.\n\n\n **Why envMapping?** Different Helm charts expect environment variables in\n different formats. Some expect an array of objects with `name` and `value`\n fields (Kubernetes standard), while others expect a simple key-value map. The\n `envMapping` feature lets you adapt to your chart's requirements.\n\n\n#### Default envMapping Configuration\n\nYou can define default `envMapping` configurations in the `global_config` database table. These defaults apply to all services using that chart unless overridden at the service level.\n\n**Example: Setting defaults for your organization's chart**\n\n```json\n// In global_config table, key: \"myorg-web-app\"\n{\n \"chart\": {\n \"name\": \"myorg-web-app\",\n \"repoUrl\": \"https://charts.myorg.com\"\n },\n \"envMapping\": {\n \"app\": {\n \"format\": \"array\",\n \"path\": \"deployment.containers[0].env\"\n }\n }\n}\n```\n\nWith this configuration, any service using the `myorg-web-app` chart will automatically use array format for environment variables:\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: \"myorg-web-app\" # Inherits envMapping from global_config\n docker:\n app:\n env:\n API_KEY: \"secret\"\n # These will be formatted as array automatically\n```\n\n\n Setting `envMapping` in global_config is particularly useful when: - You have\n a standard organizational chart used by many services - You want consistent\n environment variable handling across services - You're migrating multiple\n services and want to reduce configuration duplication\n\n\n#### Array Format\n\nBest for charts that expect Kubernetes-style env arrays.\n\n```yaml {7-9} filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping:\n app:\n format: \"array\"\n path: \"env\"\n docker:\n app:\n env:\n DATABASE_URL: \"postgres://localhost:5432/mydb\"\n API_KEY: \"secret-key-123\"\n NODE_ENV: \"production\"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set env[0].name=DATABASE_URL\n--set env[0].value=postgres://localhost:5432/mydb\n--set env[1].name=API_KEY\n--set env[1].value=secret-key-123\n--set env[2].name=NODE_ENV\n--set env[2].value=production\n```\n\n**Your chart's values.yaml would use it like:**\n\n```yaml\nenv:\n - name: DATABASE_URL\n value: postgres://localhost:5432/mydb\n - name: API_KEY\n value: secret-key-123\n - name: NODE_ENV\n value: production\n```\n\n#### Map Format\n\nBest for charts that expect a simple key-value object.\n\n```yaml {7-9} filename=\"lifecycle.yaml\"\nservices:\n - name: api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping:\n app:\n format: \"map\"\n path: \"envVars\"\n docker:\n app:\n env:\n DATABASE_URL: \"postgres://localhost:5432/mydb\"\n API_KEY: \"secret-key-123\"\n NODE_ENV: \"production\"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set envVars.DATABASE__URL=postgres://localhost:5432/mydb\n--set envVars.API__KEY=secret-key-123\n--set envVars.NODE__ENV=production\n```\n\n\n Note: Underscores in environment variable names are converted to double\n underscores (`__`) in map format to avoid Helm parsing issues.\n\n\n**Your chart's values.yaml would use it like:**\n\n```yaml\nenvVars:\n DATABASE__URL: postgres://localhost:5432/mydb\n API__KEY: secret-key-123\n NODE__ENV: production\n```\n\n#### Complete Example with Multiple Services\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n # Service using array format (common for standard Kubernetes deployments)\n - name: frontend\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/apps\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"array\"\n path: \"deployment.env\"\n chart:\n name: \"./charts/web-app\"\n docker:\n app:\n dockerfilePath: \"frontend/Dockerfile\"\n env:\n REACT_APP_API_URL: \"https://api.example.com\"\n REACT_APP_VERSION: \"{{build.uuid}}\"\n\n # Service using map format (common for custom charts)\n - name: backend\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/apps\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"map\"\n path: \"config.environment\"\n chart:\n name: \"./charts/api\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n\n - name: \"mysql-database\"\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n chart:\n name: \"mysql\" # Using public Helm chart\n version: \"9.14.1\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n valueFiles:\n - \"deploy/helm/mysql-values.yaml\"\n```\n\n## Templated Variables\n\nLifecycle supports template variables in Helm values that are resolved at deployment time. These variables allow you to reference dynamic values like build UUIDs, docker tags, and internal hostnames.\n\n### Available Variables\n\nTemplate variables use the format `{{{variableName}}}` and are replaced with actual values during deployment:\n\n| Variable | Description | Example Value |\n| ------------------------------------ | ------------------------- | ---------------------------------------- |\n| `{{{serviceName_dockerTag}}}` | Docker tag for a service | `main-abc123` |\n| `{{{serviceName_dockerImage}}}` | Full docker image path | `registry.com/org/repo:main-abc123` |\n| `{{{serviceName_internalHostname}}}` | Internal service hostname | `api-service.env-uuid.svc.cluster.local` |\n| `{{{build.uuid}}}` | Build UUID | `env-12345` |\n| `{{{build.namespace}}}` | Kubernetes namespace | `env-12345` |\n\n### Usage in Values\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: web-api\n helm:\n deploymentMethod: \"native\"\n chart:\n name: \"./charts/app\"\n values:\n - \"image.tag={{{web-api_dockerTag}}}\"\n - \"backend.url=http://{{{backend-service_internalHostname}}}:8080\"\n - \"env.BUILD_ID={{{build.uuid}}}\"\n```\n\n\n**Docker Image Mapping**: When using custom charts, you'll need to map `{{{serviceName_dockerImage}}}` or `{{{serviceName_dockerTag}}}` to your chart's expected value path. Common patterns include:\n- `image.repository` and `image.tag` (most common)\n- `deployment.image` (single image string)\n- `app.image` or `application.image`\n- Custom paths specific to your chart\n\nCheck your chart's `values.yaml` to determine the correct path.\n\n\n\n#### Image Mapping Examples\n\n```yaml filename=\"lifecycle.yaml\"\n# Example 1: Separate repository and tag (most common)\nservices:\n - name: web-api\n helm:\n chart:\n name: \"./charts/standard\"\n values:\n - \"image.repository=registry.com/org/web-api\" # Static repository\n - \"image.tag={{{web-api_dockerTag}}}\" # Dynamic tag only\n\n# Example 2: Combined image string\nservices:\n - name: worker\n helm:\n chart:\n name: \"./charts/custom\"\n values:\n - \"deployment.image={{{worker_dockerImage}}}\" # Full image with tag\n\n# Example 3: Nested structure\nservices:\n - name: backend\n helm:\n chart:\n name: \"./charts/microservice\"\n values:\n - \"app.container.image={{{backend_dockerImage}}}\" # Full image with tag\n```\n\n\n**Important**: Always use triple braces `{{{variable}}}` instead of double braces `{{variable}}` for Lifecycle template variables. This prevents Helm from trying to process them as Helm template functions and ensures they are passed through correctly for Lifecycle to resolve.\n\n\n### Template Resolution Order\n\n1. Lifecycle resolves `{{{variables}}}` before passing values to Helm\n2. The resolved values are then passed to Helm using `--set` flags\n3. Helm processes its own template functions (if any) after receiving the resolved values\n\n### Example with Service Dependencies\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: api-gateway\n helm:\n chart:\n name: \"./charts/gateway\"\n values:\n - \"config.authServiceUrl=http://{{{auth-service_internalHostname}}}:3000\"\n - \"config.userServiceUrl=http://{{{user-service_internalHostname}}}:3000\"\n - \"image.tag={{{api-gateway_dockerTag}}}\"\n\n - name: auth-service\n helm:\n chart:\n name: \"./charts/microservice\"\n values:\n - \"image.tag={{{auth-service_dockerTag}}}\"\n - \"database.host={{{postgres-db_internalHostname}}}\"\n```\n\n## Deployment Process\n\n\n 1. **Job Creation**: A Kubernetes job is created in the ephemeral namespace 2.\n **RBAC Setup**: Service account with namespace-scoped permissions is created\n 3. **Git Clone**: Init container clones the repository 4. **Helm Deploy**:\n Main container executes the Helm deployment 5. **Monitoring**: Logs are\n streamed in real-time via WebSocket\n\n\n### Concurrent Deployment Handling\n\nNative Helm automatically handles concurrent deployments by:\n\n- Detecting existing deployment jobs\n- Force-deleting the old job\n- Starting the new deployment\n\nThis ensures the newest deployment always takes precedence.\n\n## Monitoring Deployments\n\n### Deploy Logs Access\n\nFor services using native Helm deployment, you can access deployment logs through the Lifecycle PR comment:\n\n1. Add the `lifecycle-status-comments!` label to your PR\n2. In the status comment that appears, you'll see a **Deploy Logs** link for each service using native Helm\n3. Click the link to view real-time deployment logs\n\n### Log Contents\n\nThe deployment logs show:\n\n- Git repository cloning progress (`clone-repo` container)\n- Helm deployment execution (`helm-deploy` container)\n- Real-time streaming of all deployment output\n- Success or failure status\n\n## Chart Types\n\nLifecycle automatically detects and handles three chart types:\n\n| Type | Detection | Features |\n| ------------- | -------------------------------------------- | ---------------------------------------------- |\n| **ORG_CHART** | Matches `orgChartName` AND has `helm.docker` | Docker image injection, env var transformation |\n| **LOCAL** | Name is \"local\" or starts with \"./\" or \"../\" | Flexible `envMapping` support |\n| **PUBLIC** | Everything else | Standard labels and tolerations |\n\n\n The `orgChartName` is configured in the database's `global_config` table with\n key `orgChart`. This allows organizations to define their standard internal\n Helm chart.\n\n\n## Troubleshooting\n\n### Deployment Fails with \"Another Operation in Progress\"\n\n**Symptom**: Helm reports an existing operation is blocking deployment\n\n**Solution**: Native Helm automatically handles this by killing existing jobs. If the issue persists:\n\n```bash\n# Check for stuck jobs\nkubectl get jobs -n env-{uuid} -l service={serviceName}\n\n# Force delete if needed\nkubectl delete job {jobName} -n env-{uuid} --force --grace-period=0\n```\n\n### Environment Variables Not Working\n\n**Symptom**: Environment variables not passed to the deployment\n\n**Common Issues**:\n\n1. `envMapping` placed under `chart` instead of directly under `helm`\n2. Incorrect format specification (array vs map)\n3. Missing path configuration\n\n**Correct Configuration**:\n\n```yaml {4-7}\nhelm:\n deploymentMethod: \"native\"\n chart:\n name: local\n envMapping: # Correct: directly under helm\n app:\n format: \"array\"\n path: \"env\"\n```\n\n## Migration Example\n\nHere's a complete example showing how to migrate from GitHub-type services to Helm-type services:\n\n### Before: GitHub-type Services\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: \"api-gateway\"\n github:\n repository: \"myorg/api-services\"\n branchName: \"main\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/api.dockerfile\"\n env:\n BACKEND_URL: \"{{backend-service_internalHostname}}:3000\"\n LOG_LEVEL: \"info\"\n ENV_NAME: \"production\"\n ports:\n - 8080\n deployment:\n public: true\n resource:\n cpu:\n request: \"100m\"\n memory:\n request: \"256Mi\"\n readiness:\n tcpSocketPort: 8080\n hostnames:\n host: \"example.com\"\n defaultInternalHostname: \"api-gateway-prod\"\n defaultPublicUrl: \"api.example.com\"\n\n - name: \"backend-service\"\n github:\n repository: \"myorg/api-services\"\n branchName: \"main\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n deployment:\n public: false\n resource:\n cpu:\n request: \"50m\"\n memory:\n request: \"128Mi\"\n readiness:\n tcpSocketPort: 3000\n\n - name: \"mysql-database\"\n docker:\n dockerImage: \"mysql\"\n defaultTag: \"8.0-debian\"\n ports:\n - 3306\n env:\n MYSQL_ROOT_PASSWORD: \"strongpassword123\"\n MYSQL_DATABASE: \"app_database\"\n MYSQL_USER: \"app_user\"\n MYSQL_PASSWORD: \"apppassword456\"\n deployment:\n public: false\n resource:\n cpu:\n request: \"100m\"\n memory:\n request: \"512Mi\"\n readiness:\n tcpSocketPort: 3306\n serviceDisks:\n - name: \"mysql-data\"\n mountPath: \"/var/lib/mysql\"\n accessModes: \"ReadWriteOnce\"\n storageSize: \"10Gi\"\n```\n\n### After: Helm-type Services with Native Deployment\n\n```yaml filename=\"lifecycle.yaml\"\nservices:\n - name: \"api-gateway\"\n helm:\n deploymentMethod: \"native\" # Enable native Helm\n version: \"3.14.0\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n args: \"--wait --timeout 10m\"\n envMapping:\n app:\n format: \"array\"\n path: \"containers.api.env\"\n chart:\n name: \"./charts/microservices\"\n values:\n - 'image.tag=\"{{{api-gateway_dockerTag}}}\"'\n - \"service.type=LoadBalancer\"\n - \"ingress.enabled=true\"\n valueFiles:\n - \"deploy/helm/base-values.yaml\"\n - \"deploy/helm/api-gateway-values.yaml\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/api.dockerfile\"\n env:\n BACKEND_URL: \"{{backend-service_internalHostname}}:3000\"\n LOG_LEVEL: \"info\"\n ENV_NAME: \"production\"\n ports:\n - 8080\n\n - name: \"backend-service\"\n helm:\n deploymentMethod: \"native\"\n version: \"3.14.0\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n envMapping:\n app:\n format: \"map\" # Using map format for this service\n path: \"env\"\n chart:\n name: \"./charts/microservices\"\n values:\n - 'image.tag=\"{{{backend-service_dockerTag}}}\"'\n - \"replicaCount=2\"\n valueFiles:\n - \"deploy/helm/base-values.yaml\"\n - \"deploy/helm/backend-values.yaml\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"docker/backend.dockerfile\"\n ports:\n - 3000\n env:\n NODE_ENV: \"production\"\n SERVICE_NAME: \"backend\"\n\n - name: \"mysql-database\"\n helm:\n deploymentMethod: \"native\"\n repository: \"myorg/api-services\"\n branchName: \"main\"\n chart:\n name: \"mysql\" # Using public Helm chart\n version: \"9.14.1\"\n repoUrl: \"https://charts.bitnami.com/bitnami\"\n valueFiles:\n - \"deploy/helm/mysql-values.yaml\"\n```\n\n### Key Migration Points\n\n1. **Service Type Change**: Changed from `github:` to `helm:` configuration\n2. **Repository Location**: `repository` and `branchName` move from under `github:` to directly under `helm:`\n3. **Deployment Method**: Added `deploymentMethod: \"native\"` to enable native Helm\n4. **Chart Configuration**: Added `chart:` section with local or public charts\n5. **Environment Mapping**: Added `envMapping:` to control how environment variables are passed\n6. **Helm Arguments**: Added `args:` for Helm command customization\n7. **Docker Configuration**: Kept existing `docker:` config for build process\n\n\n Note that when converting from GitHub-type to Helm-type services, the\n `repository` and `branchName` fields move from being nested under `github:` to\n being directly under `helm:`.\n\n\n\n Many configuration options (like Helm version, args, and chart details) can be\n defined in the `global_config` database table, making the service YAML\n cleaner. Only override when needed." - }, - { - "title": "Template Variables", - "description": null, - "date": null, - "path": "docs/features/template-variables", - "body": "## Overview\n\nLifecycle uses [Mustache](https://github.com/janl/mustache.js) as the template rendering engine.\n\n## Available Template Variables\n\nThe following template variables are available for use within your configuration. Variables related to specific services should use the service name as a prefix.\n\n### General Variables\n\n- **`{{{buildUUID}}}`** - The unique identifier for the Lifecycle environment, e.g., `lively-down-881123`.\n- **`{{{namespace}}}`** - Namespace for the deployments, e.g., `env-lively-down-881123`.\n- **`{{{pullRequestNumber}}}`** - The GitHub pull request number associated with the environment.\n\n### Service-Specific Variables\n\nFor service-specific variables, replace `` with the actual service name.\n\n- **`{{{_internalHostname}}}`** - The internal hostname of the deployed service. If the service is optional and not deployed, it falls back to `defaultInternalHostname`.\n\n \n `service_internalHostname` will be substituted with local cluster full\n domain name like `service.namespace.svc.cluster.local` to be able to work\n with deployments across namespaces.\n \n\n- **`{{{_publicUrl}}}`** - The public URL of the deployed service. If optional and not deployed, it defaults to `defaultPublicUrl` under the `services` table.\n- **`{{{_sha}}}`** - The GitHub SHA that triggered the Lifecycle build.\n- **`{{{_branchName}}}`** - The branch name of the pull request that deployed the environment.\n- **`{{{_UUID}}}`** - The build UUID of the service. If listed under `optionalServices` or `defaultServices`, its value depends on whether the service is selected:\n - If selected, it is equal to `buildUUID`.\n - If not selected (or if service not part of deploys created), it defaults to **`dev-0`**.\n\n## Usage Example\n\n```yaml\nservices:\n frontend:\n # ...\n env:\n API_URL: \"{{{backend_publicUrl}}}\"\n UUID: \"{{{buildUUID}}}\"\n```\n\nThis ensures the `PUBLIC_URL` and `INTERNAL_HOST` variables are dynamically assigned based on the ephemeral environment deployment.\n\n \n- Undefined variables will result in an empty string unless handled explicitly.\n- Use triple curly braces (`{{{ }}}`) to prevent unwanted HTML escaping.\n- Ensure service names are correctly referenced in the template without any spaces.\n\n\nFor more details, refer to the [Mustache.js documentation](https://github.com/janl/mustache.js)." - }, - { - "title": "Service Dependencies", - "description": "Understand service dependencies, their impact, and configuration.", - "date": null, - "path": "docs/features/service-dependencies", - "body": "This document will cover `environment.{defaultServices,optionalServices}` and `service.requires`, their differences, impact scope, and usage.\n\n## `environment.{defaultServices,optionalServices}`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ❌ |\n| dev-0\\* | ❌ |\n\nThis represents the default environment that will be created by lifecycle when a pull request is opened in the service repo\\* and does not have any impact on outside repos, dev-0, or any other static environments that use this service.\n\n## `services.requires`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ✅ |\n| dev-0\\* | ✅ |\n\n`services.requires` has an impact across the board; hence, it is important to understand how it works and when we should use them.\n\n**Please read the info blocks below carefully.**\n\nYou can think of `services.requires` as a hard dependency definition. For example, if you have an API service and a database, the API service will have a hard dependency on the database.\nIn this scenario, the database should not be defined as the default service. Instead, we should make the dependency explicitly clear by adding the database to the API’s `requires` block.\nBy doing this, we ensure that any outside repo that wants to use our API service will get the database along with it but only needs to specify the API service in their `defaultServices` or `optionalServices`.\n\n\n Only services defined in `lifecycle.yaml` should be used in the `requires`\n array. If a service is defined in an outside repo, use\n `environment.defaultServices` instead.\n\n\nDo not use services in the `services.requires` if the service itself is not\ndefined in the same lifecycle.yaml.\n\n\n Services defined in the `requires` block will only be resolved 1 level down.\n\n\n**This is a very important nuance, which we get tripped by regularly.**\n\n---\n\n## Examples\n\nTo better illustrate the above statement, consider this example.\n\nRepository A `r-A` has 3 services `s-A`, `s-B`, and `s-C`.\n\n- `s-A` requires `s-B`.\n- `s-B` requires `s-C`.\n\nAs you can see, `s-A` has an indirect dependency on `s-C` through `s-B`.\n\n### Scenario 1: Pull Request in Service repo\\* ✅\n\nWhen we open a pull request in `r-A` repo, lifecycle will deploy 3 services: `s-A`, `s-B`, and `s-C`.\n\n#### Breakdown\n\n- Lifecycle deploys `s-A` and `s-B` because they are defined in `defaultServices`.\n- Services defined in the `requires` block will only be resolved **one level down**.\n- Only services defined in `lifecycle.yaml` should be used in the `requires` array. If a service is defined in an outside repo, use `environment.defaultServices` instead.\n\n```yaml\n# `r-A.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: \"s-A\"\n - name: \"s-B\"\n\nservices:\n - name: \"s-A\"\n requires:\n - name: \"s-B\"\n helm: ...\n\n - name: \"s-B\"\n requires:\n - name: \"s-C\"\n helm: ...\n\n - name: \"s-C\"\n helm: ...\n```\n\n### Scenario 2: ❌\n\nRepository B `r-B` has service `s-X` and also defines an outside repo `r-A` service `s-A` as `environment.defaultServices`.\n\n```yaml\n# `r-B.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: \"s-X\"\n - name: \"s-A\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n\nservices:\n - name: \"s-X\"\n helm: ...\n```\n\n#### Breakdown\n\n1. Lifecycle deploys `s-X` and `s-A` because they are defined in `defaultServices`.\n2. Lifecycle deploys `s-B` because it is a 1st level dependency of a service (`s-A`) listed in `defaultServices`.\n3. Lifecycle **does not** deploy `s-C` since it is **not** a 1st level dependency of any service listed in `defaultServices` or `optionalServices`.\n\nThe way this scenario manifests is lifecycle will deploy `s-X`, `s-A`, and `s-B`, but the build will likely **fail** because `s-B` is missing a required dependency `s-C`.\n\n### Solutions\n\nThere are 2 ways to address this depending on your use case.\n\n#### Solution 1\n\nAdd `s-B` to `r-B`’s `environment.defaultServices` block in `r-B.lifecycle.yaml`. In effect, this will make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: \"s-X\"\n - name: \"s-A\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n - name: \"s-B\"\n repository: \"lifecycle/r-A\"\n branch: \"main\"\n```\n\n#### Solution 2\n\nAdd `s-C` to the `services.requires` block of `r-A` in `r-A.lifecycle.yaml`. This will also make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: \"s-A\"\n - name: \"s-B\"\n\nservices:\n - name: \"s-A\"\n requires:\n - name: \"s-B\"\n - name: \"s-C\"\n helm: ...\n```\n\n### Choosing the Right Solution\n\nIn summary, the solution you should use depends on how you want your service to be consumed in an outside repo\\*.\n\n- If you want outside repos to explicitly include `s-A` and `s-B`, use **Solution 1**.\n- If you want outside repos to only include `s-A` and let dependencies resolve automatically, use **Solution 2**.\n\n---\n\n### Terminology\n\n- **Service repo**: The repository where `lifecycle.yaml` is defined.\n- **Outside repo**: Another repository referencing it.\n- **dev-0**: Default static environment." - }, - { - "title": "Install Lifecycle", - "description": null, - "date": null, - "path": "docs/setup/install-lifecycle", - "body": "Now that the infrastructure components are setup, let's install the lifecycle app and create a new Github app that will send events to the application to process and create ephemeral dev environments.\n\n\n Make sure you have updated the kube config to be able to `helm install` in the\n cluster you just created!\n\n\n- Follow installation steps in [lifecycle helm chart](https://github.com/GoodRxOSS/helm-charts/blob/main/charts/lifecycle/README.md)\n\n- Wait for the installation to complete and verify that the pods are running:\n\n```sh\nkubectl get pods -n lifecycle-app\n```\n\n- Once the pods are running, you can access the application at your configured domain (e.g. `https://app.0env.com`)\n\n\n\nJust like that, you have successfully installed Lifecycle and set up the necessary infrastructure to start creating ephemeral environments for your GitHub pull requests!\n\nIf you notice any secure certificate issues when accessing the application, you can check the status of your certificate using the following command:\n\n```sh\nkubectl get certificate -n lifecycle-app\n```\n\n\n\nMake sure the certificate is in the `Ready` state. If it is not, you may need to wait a bit longer for the certificate to be issued or troubleshoot any issues with your DNS settings.\n\nLet's move on to the next step where we will create a GitHub app to connect Lifecycle with your repositories." - }, - { - "title": "Prerequisites", - "description": null, - "date": null, - "path": "docs/setup/prerequisites", - "body": "Before we start with the setup, let's make sure the following prerequisites are in place:\n\n- **GitHub Account**: You'll need either a personal or an organization GitHub account. [Sign up for GitHub](https://github.com/join)\n\n- **Cloud Provider Account**: A Google Kubernetes Engine (GKE) or Amazon Web Services (AWS) Account. You'll need an active account with either platform to proceed.\n - [Sign up for Google Cloud](https://cloud.google.com) and create a project\n - [Sign up for AWS](https://aws.amazon.com/)\n\n\n We recommend using an isolated project or account in your cloud provider\n specifically for this setup to begin with. This helps to keep your resources\n organized and manageable as you experiment with Lifecycle.\n\n\n- **CLI Tools**\n\n - **[OpenTofu](https://opentofu.org/docs/intro/install/)** — Infrastructure as code tool (OpenTofu is a community-driven fork of Terraform).\n - **[kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)** — Command-line tool for interacting with Kubernetes clusters.\n - **[gcloud](https://formulae.brew.sh/cask/google-cloud-sdk)** or **[aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)** — Command-line tools for managing Google Cloud or AWS resources, respectively.\n\n- **Custom Domain**: You will need a custom domain (e.g., `0env.com`) to route traffic to your application environments. This is particularly important for setting up:\n\n - Public callback and webhook URLs for the GitHub App\n - Ingress routing within the Kubernetes cluster\n - Secure (HTTPS) access via TLS certificates\n\n- **DNS Provider with Wildcard Support**: The domain must be managed by a DNS provider that supports wildcard DNS records (e.g., \\*.0env.com). This is necessary to dynamically route traffic from GitHub to the Lifecycle app and to ephemeral environments.\n\n Supported DNS providers that support wildcard for infrastructure setup include:\n\n\n\n \n **Manual Setup**:\n Setup a [public DNS zone in Google Cloud](https://cloud.google.com/dns) to manage your domain's DNS records.\n\n - Follow steps [here](https://cloud.google.com/dns/docs/zones#create-pub-zone) to setup a\n public DNS zone.\n\n - Wildcard DNS records will be created by the OpenTofu modules in the next steps.\n\n**CLI Setup**:\nUse the `gcloud` CLI to create a public DNS zone for your domain:\n\n```sh\ngcloud config set project \ngcloud auth application-default login\ngcloud services enable dns.googleapis.com --project=\ngcloud dns --project= managed-zones create --description=\"Lifecycle OSS starter DNS public zone\" --dns-name=\".\" --visibility=\"public\" --dnssec-state=\"off\"\n```\n\n_Update your domain's DNS records with NS records provided by Google Cloud DNS. You can find these in the Google Cloud Console under the DNS zone you created._\n\n \n\n\n **[AWS Route 53](https://aws.amazon.com/route53/)**: Amazon's scalable DNS web\n service designed to route end users to Internet applications.\n\n **Manual Setup**:\n\n - Authenticate with AWS CLI using the role/usr you desire.\n - Ensure you have [your domain provisioned to accept wildcards](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html); eg `*.lifecycle..com`\n\n **CLI Setup**:\n\n ```sh\n aws configure\n ```\n\n ```sh\n aws route53 change-resource-record-sets --hosted-zone-id --change-batch '{\n \"Comment\": \"CREATE wildcard for \",\n \"Changes\": [\n {\n \"Action\": \"CREATE\",\n \"ResourceRecordSet\": {\n \"Name\": \"..com\",\n \"Type\": \"A\",\n \"TTL\": 300,\n \"ResourceRecords\": [\n {\n \"Value\": \"*********\"\n }\n ]\n }\n }\n ]\n }'\n ```\n\n\n\n\n If you want to use Cloudflare as your primary DNS provider and manage your DNS records on Cloudflare, your domain should be using a full setup.\n This means that you are using Cloudflare for your authoritative DNS nameservers.\n Follow the steps [here](https://developers.cloudflare.com/dns/zone-setups/full-setup/setup/) to setup a public DNS zone in Cloudflare.\n\n\n\n\n---\n\n\n Ensure that your domain’s nameservers are pointing to your chosen DNS provider\n at your registrar, and that you have permission to create and manage DNS\n records programmatically. **This is crucial for the setup to work\n correctly and will take time to propagate.**\n\nUse https://dnschecker.org/#NS to verify that your domain's nameservers are correctly set up.\n\n\n\nOnce you have these prerequisites in place, you can proceed to the next steps in setting up the cluster and application." - }, - { - "title": "Setup your cluster", - "description": null, - "date": null, - "path": "docs/setup/setup-infra", - "body": "Based on the prerequisites you've set up, you're now ready to configure your Kubernetes cluster for Lifecycle. This setup will ensure that your cluster is properly configured to run Lifecycle and manage your application environments effectively.\n\n\n Note that the infra setup with the OpenTofu modules below will **open your\n cluster to the world.** \n 🛡️ Make sure to **shield** your cluster by implementing appropriate network policies\n and access controls after the initial setup.\n\n\n\nClick on the cloud provider you are using to set up your cluster:\n\n- [Google Cloud Platform (GCP)](#google-cloud-platform)\n- [Amazon Web Services (AWS)](#amazon-web-services)\n\n## Google Cloud Platform\n\n### Setup application credentials\n\n```sh\n# setup current project\ngcloud config set project \n# creates the application default credentials\ngcloud auth application-default login\n```\n\nEnable Kubernetes Engine and Cloud DNS APIs:\n\n```sh\ngcloud services enable container.googleapis.com --project=\ngcloud services enable dns.googleapis.com --project=\n```\n\n\n Note that you need to replace `` with your actual Google Cloud project ID not the project name.\n\n\n### Bootstrap infrastructure\n\n- Clone the infrastructure repository:\n\n```sh\ngit clone https://github.com/GoodRxOSS/lifecycle-opentofu/\ncd lifecycle-opentofu\n```\n\n- Follow steps in the [infrastructure repository](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#%EF%B8%8F-quick-start) to set up the necessary infrastructure components.\n\n```sh\ncp example.auto.tfvars secrets.auto.tfvars\n```\n\nExample `secrets.auto.tfvars` file:\n\n```hcl secrets.auto.tfvars\ngcp_project = \"\"\ngcp_region = \"\"\n# this is the default credentials file created by gcloud cli\ngcp_credentials_file = \"~/.config/gcloud/application_default_credentials.json\"\ncluster_provider = \"gke\"\ndns_provider = \"cloud-dns\" # [cloudflare|route53|cloud-dns]\napp_domain = \"\" # e.g. 0env.com\n\ncluster_name = \"lifecycle-gke\" # change this to your preferred cluster name\napp_namespace = \"lifecycle-app\" # use default namespace\n```\n\n- Initialize and apply the Terraform configuration:\n\n```sh\ntofu init\ntofu plan\ntofu apply\n```\n\nThis will create the necessary infrastructure components, including the Kubernetes cluster, DNS records, database, redis and other resources required for Lifecycle to function properly.\n\nAfter the Terraform apply completes, you should have a fully functional Kubernetes cluster with the necessary resources set up.\n\nLet's test the public DNS setup by accessing the test application deployed called `kuard` and follow the rest of the setup instructions from the `tofu apply` output.\n\n```sh\ncurl -v https://kuard.0env.com # replace with your domain\n```\n\nRefer example output [here](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#4-initialize--apply) to setup kubeconfig and access the cluster using `kubectl`.\n\nNow that your cluster is set up, you can proceed to installing Lifecycle application to your cluster.\n\n\n}\n title=\"Install Lifecycle\"\n href=\"/docs/setup/install-lifecycle\"\n arrow\n/>\n\n---\n\n## Amazon Web Services\n\n```sh\n# setup current project\naws configure --profile lifecycle-oss-eks\nAWS Access Key ID [***]: \nAWS AWS Secret Access Key [***]: \nDefault Region name: \nDefault output format: \n```\n\n\\*This profile needs to have access a user with `AdministratorAccess` access.\n\n---\n\n### Bootstrap infrastructure\n\n- Clone the infrastructure repository:\n\n```sh\ngit clone https://github.com/GoodRxOSS/lifecycle-opentofu/\ncd lifecycle-opentofu\n```\n\n- Follow steps in the [infrastructure repository](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#%EF%B8%8F-quick-start) to set up the necessary infrastructure components.\n\n```sh\ncp example.auto.tfvars secrets.auto.tfvars\n```\n\nExample `secrets.auto.tfvars` file:\n\n```hcl secrets.auto.tfvars\n# gcp_project = \"\"\n$ gcp_region = \"\"\n# this is the default credentials file created by gcloud cli\n# gcp_credentials_file = \"~/.config/gcloud/application_default_credentials.json\"\ncluster_provider = \"aws\"\ndns_provider = \"route53\" # [cloudflare|route53|cloud-dns]\napp_domain = \"example.com\" # e.g. 0env.com\n\ncluster_name = \"lifecycle-eks\" # change this to your preferred cluster name\napp_namespace = \"lifecycle-app\" # use default namespace\n```\n\n- Initialize and apply the Terraform configuration:\n\n```sh\ntofu init\ntofu plan\ntofu apply\n```\n\nThis will create the necessary infrastructure components, including the Kubernetes cluster, DNS records, database, redis and other resources required for Lifecycle to function properly.\n\nAfter the Terraform apply completes, you should have a fully functional Kubernetes cluster with the necessary resources set up.\n\nLet's test the public DNS setup by accessing the test application deployed called `kuard` and follow the rest of the setup instructions from the `tofu apply` output.\n\n```sh\ncurl -v https://kuard.0env.com # replace with your domain\n```\n\nRefer example output [here](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#4-initialize--apply) to setup kubeconfig and access the cluster using `kubectl`.\n\nNow that your cluster is set up, you can proceed to installing Lifecycle application to your cluster.\n\n\n}\n title=\"Install Lifecycle\"\n href=\"/docs/setup/install-lifecycle\"\n arrow\n/>" - }, - { - "title": "Additional Configuration", - "description": null, - "date": null, - "path": "docs/setup/configure-lifecycle", - "body": "We are in the final step of the setup process.\n\n**This step is Optional but highly recommended to ensure the default IP Whitelist is set for the environments created by the Lifecycle app.** This will help in securing the environments and restricting access to only the specified IPs or CIDR blocks.\n\n## Set Default IP Whitelist\n\n- Connect to the `postgres` database using the `psql` command line tool or any other database client.\n\n \n\n Database password was auto generated during the infra setup and can be found\n retrieved from the `app-postgres` secret in the `lifecycle-app`\n namespace.\n\n \n\n- Retrieve the database password:\n\n```sh\n kubectl get secret app-postgres --namespace lifecycle-app \\\n -o jsonpath='{.data}' | jq 'with_entries(.value |= @base64d)'\n```\n\n- Run the following SQL commands to update the configuration:\n\n```sql\n-- provide a default IP whitelist for the environments, default is open to all IPs\nUPDATE public.global_config\nSET\n config = (\n config::jsonb ||\n '{\n \"defaultIPWhiteList\": \"{ 0.0.0.0/0 }\"\n }'::jsonb\n )::json,\n \"updatedAt\" = NOW()\nWHERE \"key\" = 'serviceDefaults';\n```\n\n\n Note that the infra setup with the OpenTofu modules below will **open your\n cluster to the world.** \n 🛡️ Make sure to **shield** your cluster by implementing appropriate network policies\n and access controls after the initial setup.\n\nReplace the `defaultIPWhiteList` under `global_config.serviceDefaults` with your actual IP whitelist or CIDR block to restrict access to the deployed environments.\n\n\n\n---\n\n## Refresh config cache\n\n```sh\ncurl -X PUT https://app./api/v1/config/cache\n```\n\nThis will refresh the configuration cache and apply the changes you made to the database for the Lifecycle app.\n\nWe are all set! 🎉 And ready to create our first PR based ephemeral environment." - }, - { - "title": "Configure Application", - "description": null, - "date": null, - "path": "docs/setup/create-github-app", - "body": "## Configure BuildKit Endpoint\n\nBefore creating the GitHub app, you need to configure the BuildKit endpoint in the database:\n\n\n Set the `HELM_RELEASE` environment variable to your actual Helm release name\n before running the commands below.\n\n\n\n The following commands will create the `buildkit` object and `endpoint`\n configuration if they don't exist, or update them if they do.\n\n\n### Option 1: Using kubectl exec with psql\n\nExecute the following commands to connect to the PostgreSQL pod and run the query:\n\n```bash\n# Set your Helm release name (replace with your actual release name)\nexport HELM_RELEASE=\n\n# Get the database password from the secret\nexport PGPASSWORD=$(kubectl get secret ${HELM_RELEASE}-postgres -n lifecycle-app -o jsonpath='{.data.POSTGRES_USER_PASSWORD}' | base64 -d)\n\n# Run the query\nkubectl exec -it ${HELM_RELEASE}-postgres-0 -n lifecycle-app -- env PGPASSWORD=$PGPASSWORD psql -U lifecycle -d lifecycle -c \"\nUPDATE global_config\nSET config = jsonb_set(\n jsonb_set(\n COALESCE(config::jsonb, '{}'::jsonb),\n '{buildkit}',\n COALESCE(config::jsonb->'buildkit', '{}'::jsonb),\n true\n ),\n '{buildkit,endpoint}',\n '\\\"tcp://${HELM_RELEASE}-buildkit.lifecycle-app.svc.cluster.local:1234\\\"'::jsonb,\n true\n),\n\\\"updatedAt\\\" = NOW()\nWHERE key = 'buildDefaults';\"\n```\n\n### Option 2: Direct SQL query\n\nIf you have direct database access, run the following SQL query (replace `` with your actual Helm release name):\n\n```sql\nUPDATE global_config\nSET config = jsonb_set(\n jsonb_set(\n COALESCE(config::jsonb, '{}'::jsonb),\n '{buildkit}',\n COALESCE(config::jsonb->'buildkit', '{}'::jsonb),\n true\n ),\n '{buildkit,endpoint}',\n '\"tcp://-buildkit.lifecycle-app.svc.cluster.local:1234\"'::jsonb,\n true\n),\n\"updatedAt\" = NOW()\nWHERE key = 'buildDefaults';\n```\n\n### Refresh Configuration Cache\n\nAfter running either option above, refresh the configuration cache:\n\n```bash\ncurl -X 'PUT' \\\n 'https://app./api/v1/config/cache' \\\n -H 'accept: application/json'\n```\n\nReplace `` with your actual domain (e.g., `0env.com`).\n\n## Create GitHub App\n\nTo create a Github app that will send events to the Lifecycle with necessary permissions, follow these steps:\n\n\n Make sure you have admin access to the Github organization or account where\n you want to create the app.\n\n\n- Navigate to your installed Lifecycle app at `https://app./setup` (replace `` with your actual domain. e.g. `https://app.0env.com/setup`).\n \n- Select `Personal` or `Organization` based on your needs.\n- Fill in the required fields:\n\n - **Github App Name**: A name for your app. (should be unique, use a prefix with your name or organization. Refer Github app naming convention [here](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/)\n - **Organization Name**: Github organization name where the app will be created. Required if you selected `Organization`.\n\n- Click `Create App`\n- On the Github app creation page, confirm the app name and click `Create`\n- Once the app is created, you will be redirected to the app installation page where you can choose one or more repositories to install the the newly minted app.\n\n \n Make sure to select the repositories you want the app to have access to. You\n can always change this later in the app settings but **adding atleast one\n repository is required to proceed with the setup**.\n \n\n \n\n- Voila! 🎉 Your Github app is now created and installed.\n\n\n\n- Click `Configure and Restart` to apply the changes and start using the app.\n\n\n The step above, sets up the global config values that Lifecycle app will use\n creating ephemeral environments and processing pull requests. And restarts the\n `deployment` for the github app secrets to take effect.\n\n\n---\n\nLet's move on the final step where we will configure the Lifecycle app config for processing pull requests and creating ephemeral environments." - }, - { - "title": "Deploy Issues", - "description": "Understand how to handle common deploy issues with environments", - "date": null, - "path": "docs/troubleshooting/deploy-issues", - "body": "TODO: This document will cover common deploy issues that you may encounter\n when working with Lifecycle environments." - }, - { - "title": "Missing PR comment", - "description": null, - "date": null, - "path": "docs/troubleshooting/github-app-webhooks", - "body": "Let's quickly validate that the app is able to send events to the Lifecycle app successfully.\n\n- Navigate to your Github app\n- Click `App Settings` link in the Github application page\n- Choose `Advanced` from the left sidebar\n- `Recent Deliveries` section should show a successful delivery of the `installation` event to the Lifecycle app.\n\n \n If you see an error or no deliveries, make sure the app is installed in the\n atleast one repository and that the webhook URL is set correctly by\n navigating to the `General` section from the left sidebar and checking the\n `Webhook URL` field.\n \n\n- If the delivery is successful, you should see a status code of `200 OK`\n\n## Failing deliveries\n\nIf you see a delivery failure, it could be due to various reasons. Here are some common issues and how to resolve them:\n\n### Github App secrets\n\n- Make sure that the Github App secrets are correctly set in the `lifecycle-app` namespace. You can verify this by running the following command:\n\n```sh\nkubectl get secret app-secrets --namespace lifecycle-app \\\n -o jsonpath='{.data}' | jq 'with_entries(.value |= @base64d)'\n```\n\n- The output should include all the `GITHUB_*` variables with the correct values.\n\n- If the secrets are present but the delivery is still failing, try restarting the following deployments.\n\n```sh\n kubectl rollout restart deployment lifecycle-web lifecycle-worker -n lifecycle-app\n```\n\n- Try triggering a new event (create a pull request) by making a change in the repository or by manually redelivering a failed delivery." - }, - { - "title": "Troubleshooting Build Issues", - "description": "Understand how to handle common build issues with environments", - "date": null, - "path": "docs/troubleshooting/build-issues", - "body": "TODO: This document will cover common build issues that you may encounter when\n working with Lifecycle environments." - }, - { - "title": "Lifecycle Full Schema", - "description": "Lifecycle Schema documentation; this page contains the full schema as defined in lifecycle core—all at once.", - "date": null, - "path": "docs/schema/full", - "body": "## Full Lifecycle Schema\n\nBelow is the full Lifecycle schema as defined in the `lifecycle.yaml` file with basic comments for each item.\n\n```yaml\n# @section environment\nenvironment:\n # @param environment.autoDeploy\n autoDeploy: false\n # @param environment.useGithubStatusComment\n useGithubStatusComment: false\n # @param environment.defaultServices\n defaultServices:\n # @param environment.defaultServices[]\n - # @param environment.defaultServices.name (required)\n name: \"\"\n # @param environment.defaultServices.repository\n repository: \"\"\n # @param environment.defaultServices.branch\n branch: \"\"\n # @param environment.optionalServices\n optionalServices:\n # @param environment.optionalServices[]\n - # @param environment.optionalServices.name (required)\n name: \"\"\n # @param environment.optionalServices.repository\n repository: \"\"\n # @param environment.optionalServices.branch\n branch: \"\"\n\n# @section services\nservices:\n # @param services[]\n - # @param services.name (required)\n name: \"\"\n # @param services.appShort\n appShort: \"\"\n # @param services.defaultUUID\n defaultUUID: \"\"\n # @param services.github\n github:\n # @param services.github.repository (required)\n repository: \"\"\n # @param services.github.branchName (required)\n branchName: \"\"\n # @param services.github.docker (required)\n docker:\n # @param services.github.docker.defaultTag (required)\n defaultTag: \"\"\n # @param services.github.docker.pipelineId\n pipelineId: \"\"\n # @param services.github.docker.ecr\n ecr: \"\"\n # @param services.github.docker.app (required)\n app:\n # @param services.github.docker.app.afterBuildPipelineConfig\n afterBuildPipelineConfig:\n # @param services.github.docker.app.afterBuildPipelineConfig.afterBuildPipelineId\n afterBuildPipelineId: \"\"\n # @param services.github.docker.app.afterBuildPipelineConfig.detatchAfterBuildPipeline\n detatchAfterBuildPipeline: false\n # @param services.github.docker.app.afterBuildPipelineConfig.description\n description: \"\"\n # @param services.github.docker.app.dockerfilePath (required)\n dockerfilePath: \"\"\n # @param services.github.docker.app.command\n command: \"\"\n # @param services.github.docker.app.arguments\n arguments: \"\"\n # @param services.github.docker.app.env\n env:\n\n # @param services.github.docker.app.ports\n ports:\n # @param services.github.docker.app.ports[]\n - \"\"\n # @param services.github.docker.init\n init:\n # @param services.github.docker.init.dockerfilePath (required)\n dockerfilePath: \"\"\n # @param services.github.docker.init.command\n command: \"\"\n # @param services.github.docker.init.arguments\n arguments: \"\"\n # @param services.github.docker.init.env\n env:\n\n # @param services.github.docker.builder\n builder:\n # @param services.github.docker.builder.engine\n engine: \"\"\n # @param services.github.deployment\n deployment:\n # @param services.github.deployment.helm\n helm:\n # @param services.github.deployment.helm.enabled\n enabled: false\n # @param services.github.deployment.helm.chartName\n chartName: \"\"\n # @param services.github.deployment.helm.chartRepoUrl\n chartRepoUrl: \"\"\n # @param services.github.deployment.helm.chartVersion\n chartVersion: \"\"\n # @param services.github.deployment.helm.cmdPs\n cmdPs: \"\"\n # @param services.github.deployment.helm.action\n action: \"\"\n # @param services.github.deployment.helm.customValues\n customValues:\n # @param services.github.deployment.helm.customValues[]\n - \"\"\n # @param services.github.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.github.deployment.helm.customValueFiles[]\n - \"\"\n # @param services.github.deployment.helm.helmVersion\n helmVersion: \"\"\n # @param services.github.deployment.helm.attachPvc\n attachPvc:\n # @param services.github.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.github.deployment.helm.attachPvc.mountPath\n mountPath: \"\"\n # @param services.github.deployment.public\n public: false\n # @param services.github.deployment.capacityType\n capacityType: \"\"\n # @param services.github.deployment.resource\n resource:\n # @param services.github.deployment.resource.cpu\n cpu:\n # @param services.github.deployment.resource.cpu.request\n request: \"\"\n # @param services.github.deployment.resource.cpu.limit\n limit: \"\"\n # @param services.github.deployment.resource.memory\n memory:\n # @param services.github.deployment.resource.memory.request\n request: \"\"\n # @param services.github.deployment.resource.memory.limit\n limit: \"\"\n # @param services.github.deployment.readiness\n readiness:\n # @param services.github.deployment.readiness.disabled\n disabled: false\n # @param services.github.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.github.deployment.readiness.httpGet\n httpGet:\n # @param services.github.deployment.readiness.httpGet.path\n path: \"\"\n # @param services.github.deployment.readiness.httpGet.port\n port: 0\n # @param services.github.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.github.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.github.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.github.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.github.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.github.deployment.hostnames\n hostnames:\n # @param services.github.deployment.hostnames.host\n host: \"\"\n # @param services.github.deployment.hostnames.acmARN\n acmARN: \"\"\n # @param services.github.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: \"\"\n # @param services.github.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: \"\"\n # @param services.github.deployment.network\n network:\n # @param services.github.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.github.deployment.network.ipWhitelist[]\n - \"\"\n # @param services.github.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.github.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.github.deployment.network.grpc\n grpc:\n # @param services.github.deployment.network.grpc.enable\n enable: false\n # @param services.github.deployment.network.grpc.host\n host: \"\"\n # @param services.github.deployment.network.grpc.defaultHost\n defaultHost: \"\"\n # @param services.github.deployment.serviceDisks\n serviceDisks:\n # @param services.github.deployment.serviceDisks[]\n - # @param services.github.deployment.serviceDisks.name (required)\n name: \"\"\n # @param services.github.deployment.serviceDisks.mountPath (required)\n mountPath: \"\"\n # @param services.github.deployment.serviceDisks.accessModes\n accessModes: \"\"\n # @param services.github.deployment.serviceDisks.storageSize (required)\n storageSize: \"\"\n # @param services.github.deployment.serviceDisks.medium\n medium: \"\"\n # @param services.docker\n docker:\n # @param services.docker.dockerImage (required)\n dockerImage: \"\"\n # @param services.docker.defaultTag (required)\n defaultTag: \"\"\n # @param services.docker.command\n command: \"\"\n # @param services.docker.arguments\n arguments: \"\"\n # @param services.docker.env\n env:\n\n # @param services.docker.ports\n ports:\n # @param services.docker.ports[]\n - \"\"\n # @param services.docker.deployment\n deployment:\n # @param services.docker.deployment.helm\n helm:\n # @param services.docker.deployment.helm.enabled\n enabled: false\n # @param services.docker.deployment.helm.chartName\n chartName: \"\"\n # @param services.docker.deployment.helm.chartRepoUrl\n chartRepoUrl: \"\"\n # @param services.docker.deployment.helm.chartVersion\n chartVersion: \"\"\n # @param services.docker.deployment.helm.cmdPs\n cmdPs: \"\"\n # @param services.docker.deployment.helm.action\n action: \"\"\n # @param services.docker.deployment.helm.customValues\n customValues:\n # @param services.docker.deployment.helm.customValues[]\n - \"\"\n # @param services.docker.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.docker.deployment.helm.customValueFiles[]\n - \"\"\n # @param services.docker.deployment.helm.helmVersion\n helmVersion: \"\"\n # @param services.docker.deployment.helm.attachPvc\n attachPvc:\n # @param services.docker.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.docker.deployment.helm.attachPvc.mountPath\n mountPath: \"\"\n # @param services.docker.deployment.public\n public: false\n # @param services.docker.deployment.capacityType\n capacityType: \"\"\n # @param services.docker.deployment.resource\n resource:\n # @param services.docker.deployment.resource.cpu\n cpu:\n # @param services.docker.deployment.resource.cpu.request\n request: \"\"\n # @param services.docker.deployment.resource.cpu.limit\n limit: \"\"\n # @param services.docker.deployment.resource.memory\n memory:\n # @param services.docker.deployment.resource.memory.request\n request: \"\"\n # @param services.docker.deployment.resource.memory.limit\n limit: \"\"\n # @param services.docker.deployment.readiness\n readiness:\n # @param services.docker.deployment.readiness.disabled\n disabled: false\n # @param services.docker.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.docker.deployment.readiness.httpGet\n httpGet:\n # @param services.docker.deployment.readiness.httpGet.path\n path: \"\"\n # @param services.docker.deployment.readiness.httpGet.port\n port: 0\n # @param services.docker.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.docker.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.docker.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.docker.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.docker.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.docker.deployment.hostnames\n hostnames:\n # @param services.docker.deployment.hostnames.host\n host: \"\"\n # @param services.docker.deployment.hostnames.acmARN\n acmARN: \"\"\n # @param services.docker.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: \"\"\n # @param services.docker.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: \"\"\n # @param services.docker.deployment.network\n network:\n # @param services.docker.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.docker.deployment.network.ipWhitelist[]\n - \"\"\n # @param services.docker.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.docker.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.docker.deployment.network.grpc\n grpc:\n # @param services.docker.deployment.network.grpc.enable\n enable: false\n # @param services.docker.deployment.network.grpc.host\n host: \"\"\n # @param services.docker.deployment.network.grpc.defaultHost\n defaultHost: \"\"\n # @param services.docker.deployment.serviceDisks\n serviceDisks:\n # @param services.docker.deployment.serviceDisks[]\n - # @param services.docker.deployment.serviceDisks.name (required)\n name: \"\"\n # @param services.docker.deployment.serviceDisks.mountPath (required)\n mountPath: \"\"\n # @param services.docker.deployment.serviceDisks.accessModes\n accessModes: \"\"\n # @param services.docker.deployment.serviceDisks.storageSize (required)\n storageSize: \"\"\n # @param services.docker.deployment.serviceDisks.medium\n medium: \"\"\n```" - }, - { - "title": "Terminology", - "description": null, - "date": null, - "path": "docs/getting-started/terminology", - "body": "This glossary provides an overview of key Lifecycle concepts and terminology. Let's see how they fit into the environment setup and deployment process.\n\n## Repository\n\nA **repository** refers to a GitHub repository. Each environment that is built **must** have a default repository and an associated pull request.\n\n## Service\n\nA **service** is a deployable artifact. It can be a Docker container, CI pipeline, RDS database, or Helm chart. A single repository can contain multiple services.\n\n**Example:** \n`frontend-service` and `frontend-cache` are two services required for the frontend application to function correctly.\n\n## Environment\n\nAn **environment** is a stack of services built and connected together.\n\n- **`defaultServices`** are built and deployed in an environment by default.\n- **`optionalServices`** can be built and deployed only when needed; otherwise, they fallback to the **default static environment**.\n\n## Static Environment\n\nA **static environment** is a long-lived environment based on a pull request. It tracks branches from configured services and updates automatically when new changes are merged.\n\n## Build\n\nA **build** is the actual instance of the process to build and deploy services within an environment.\n\n- Each build is uniquely identified by Lifecycle using a UUID (e.g., `arm-model-060825` or `dev-0`).\n- A build contains **one deploy per service** in the configuration.\n\n## Deploy\n\nA **deploy** manages the build and deployment execution of a service within an environment.\n\n**Example:**\nIn a frontend environment, `frontend-service` and `frontend-cache` are two deploys created for the environment, each mapped to a unique build UUID.\n\n## Webhook\n\nLifecycle can invoke third-party services when a build state changes. Currently, only **Codefresh triggers** are supported.\n\n### Example\n\n- When the build status is `deployed`, trigger end-to-end tests.\n- When the build status is `error`, trigger infrastructure cleanup." - }, - { - "title": "Explore static environment", - "description": "Create the first and default static environment", - "date": null, - "path": "docs/getting-started/explore-static-environment", - "body": "A **static environment** in Lifecycle is a persistent environment that serves as a fallback when dependent services do not need to be rebuilt.\n\nUnlike ephemeral environments that are built on short lived pull requests, static environments are built on top of long lived pull requests. These environments exist continuously and update automatically as changes are merged into the default branch of configured services.\n\n## What is `dev-0`\n\nThe **default static environment** is `dev-0`. This environment ensures that there is always a **stable and up-to-date version** of services available without needing to build every dependency manually.\n\n\n\nThe `dev-0` environment should be created for your installation.\n\nDuring the initial bootstrapping of Lifecycle, the `dev-0` build record is created automatically but this itself does not have any services built.\n\n\n\n## Create `dev-0`\n\n- Delete the dummy `dev-0` build record from `builds` table in the database\n\n```sql\nDELETE FROM builds WHERE uuid = 'dev-0';\n```\n\n- Create a repository named `lifecycle-static-env` in your GitHub account\n- Install the Lifecycle GitHub App in this repository\n- Create a pull request in this repository with branch `dev-0`\n- Add `lifecycle.yaml` file to the root of the repository with all the services you want to include in the `dev-0` environment\n\n **Example:**\n\n```yaml\nenvironment:\n defaultServices:\n - name: \"frontend\"\n repository: \"account/frontend-repo\"\n branch: \"main\"\n - name: \"grpc\"\n repository: \"account/backend-grpc\"\n branch: \"main\"\n```\n\n- Deploy the `dev-0` environment by adding `lifecycle-deploy!` label to the pull request\n- Update `uuid` for the environment to `dev-0` in the [mission control comment](/docs/tips/using-mission-control#override-uuid)\n- Finally, execute this query to track default branches of the services in the `dev-0` environment:\n\n```sql\nUPDATE builds\nSET\n \"trackDefaultBranches\" = true,\n \"isStatic\" = true\nWHERE\n uuid = 'dev-0';\n```\n\n## Key Features\n\n**🏗️ Fallback for Optional Services**\n\n- When optional services are not explicitly built in an ephemeral environment, Lifecycle defaults to using the latest build from `dev-0`.\n\n**💪 Based on a Persistent PR**\n\n- Similar to ephemeral environments, `dev-0` is based on a PR, but it remains open and continuously updates.\n\n**👣 Tracks Changes on Default Branch Merges**\n\n- Whenever a service has a new change merged to its `main` branch, `dev-0` will **automatically pull, build, and redeploy** the latest changes.\n- This ensures `dev-0` always contains **the freshest version** of all services." - }, - { - "title": "Configure environment", - "description": null, - "date": null, - "path": "docs/getting-started/configure-environment", - "body": "Now that we've created and deployed our first Lifecycle environment, let's learn how to customize it by configuring services and dependencies.\n\n## Understanding Configuration\n\nFirst, let's take a look at the `lifecycle.yaml` configuration file at the root dir of [lifecycle-examples](https://github.com/GoodRxOSS/lifecycle-examples/blob/main/lifecycle.yaml) repository:\n\n```yaml filename=\"lifecycle.yaml\"\nenvironment:\n autoDeploy: true\n defaultServices:\n - name: \"frontend\"\n - name: \"backend\"\n optionalServices:\n - name: \"cache\"\n\nservices:\n - name: \"frontend\"\n defaultUUID: \"dev-0\"\n github:\n repository: \"iceycake/lifecycle-examples\"\n branchName: \"main\"\n docker:\n builder:\n engine: \"buildkit\"\n defaultTag: \"main\"\n app:\n dockerfilePath: \"Dockerfile.frontend\"\n ports:\n - 3000\n env:\n COMPONENT: \"app\"\n ENV: \"lifecycle\"\n API_URL: \"https://{{{backend_publicUrl}}}\"\n CACHE_URL: \"{{{cache_internalHostname}}}\"\n WES_IS: \"GOAT\"\n - name: \"backend\"\n requires:\n - name: \"db\"\n defaultUUID: \"dev-0\"\n # ...\n - name: \"db\"\n defaultUUID: \"dev-0\"\n # ...\n - name: \"cache\"\n defaultUUID: \"dev-0\"\n # ...\n```\n\n### Default and Optional Services\n\nWe have our dependencies defined in **`defaultServices`** and **`optionalServices`**:\n\n- **`defaultServices`** – These services are always **built and deployed** with the environment. They form the core foundation of the environment and are required for it to function correctly.\n- **`optionalServices`** – These services **can be built on demand**, only when explicitly needed. If they are not selected during a PR, they default to using a **static environment** (e.g., `dev-0`).\n\n### Template Variables\n\nNotice how there are template variables defined in service named `frontend` > `github.docker.env`:\n\n```yaml\nAPI_URL: \"https://{{{backend_publicUrl}}}\"\nCACHE_URL: \"{{{cache_internalHostname}}}\"\n```\n\nThis `API_URL` and `CACHE_URL` variables are dynamically templated by Lifecycle and provided during the **build** and **deploy** steps for the frontend service.\n\n\n Read more about supported template variables\n [here](/docs/features/template-variables)\n\n\n## Static Environment as a Fallback\n\nSince `cache` is an **optional service**, this service defaulted to using a **static environment**(`dev-0`) as a fallback. This allows us to reuse existing environments instead of rebuilding everything from scratch when there are no changes.\n\n### Check Template Variables\n\nTo view how the fallback URL works,\n\n1. Open your **Tasks App**(frontend) from the deployed environment.\n2. Navigate to the `Variables` page.\n3. Search for `_URL` and check its value.\n - It should look like:\n ```\n API_URL: https://backend-.\n CACHE_URL: cache-dev-0.env-dev-0.svc.cluster.local\n ```\n - Notice how `CACHE_URL` defaults to the `dev-0`(static) environment for the optional cache.\n\n## Configuring Services\n\nNow, let's say you also want to the `cache` component to **test, build and deploy it in your environment**.\n\n### Enable Cache Deployment\n\n1. Navigate to the **Lifecycle PR comment** on GitHub.\n2. Select the `cache` checkbox in the comment. That's it!\n3. Lifecycle will now start **building and deploying the cache service** for your specific environment.\n4. Wait for the build to complete. You can monitor the progress in the **status comment**.\n\n### Confirm the New Cache URL\n\n5. Once the cache is deployed, go back to your **frontend app’s Variables page**.\n6. Check the `CACHE_URL` value.\n - It should now look like:\n ```\n cache-.env-.svc.cluster.local\n ```\n7. Now, you're running your cache **from your own environment** instead of an existing static deploy!\n8. Check the application’s **Tasks** page while you’re here and observe the completely different data, as this environment uses a freshly built and seeded database.\n\n## Build Flexible Environments\n\nWith this approach, you can:\n\n- Build **any combination** of frontend and backend services.\n- Use **custom branches** for different services.\n- Test **different versions** of your app.\n\n\n Check how to use Mission Control comments for configuring your environment\n [here](/docs/tips/using-mission-control)\n\n\nThis gives you a **custom, isolated testing environment** that mirrors your\nproduction setup while allowing flexibility in development and validation.\n\n## Summary\n\n- Services marked as **optional** in `lifecycle.yaml` will default to static environments unless explicitly built.\n- You can enable/disable any service directly from the **Lifecycle PR comment**.\n- Lifecycle automates dependency management, ensuring your services deploy in the correct order.\n\n**Now you're ready to customize your Lifecycle environments like a pro!** 👩‍💻" - }, - { - "title": "Explore environment", - "description": null, - "date": null, - "path": "docs/getting-started/explore-environment", - "body": "Now that we've deployed our first Lifecycle environment, let’s take a tour of the PR comments to understand how to interact with our ephemeral environment.\n\n## Test Your Application\n\nLet's navigate to the deployed `frontend` app from the PR comment.\n\n1. Click on the `frontend` link in the PR comment to navigate to your deployed application.\n2. Add a task and complete few tasks to update data in backend.\n3. Navigate to the `variables` page and checkout the variables in your application's container.\n4. Thats it! You have successfully deployed and tested the best todo app in the world! 🎉\n\n## Mission Control Comment\n\nThe **Lifecycle PR comment** in your pull request serves as the **mission control** for your ephemeral environment.\n\n\n\n### What You Can Do in the PR Comment\n\n- **Editable Checkboxes**: Select or deselect services to include in your environment.\n- **Redeploy Checkbox**: Triggers a redeploy (useful for transient issues).\n- **Deployment Section**: Provides URLs to your **deployed services**.\n\n}>\n Read more about [Mission Control comment\n here](/docs/tips/using-mission-control.mdx)\n\n\n## Status Comment\n\nWhen we add the `lifecycle-status-comments!` label to our pull request, Lifecycle will automatically add a **status comment** to the PR.\n\nThis comment provides real-time updates on the status, links to your deployments including the build progress and service statuses.\n\n\n\nNotice the following while the environment is being built:\n\n- The status comment is **updated in real-time**.\n- The **status** of each service is displayed.\n- The **build logs** are available for each service.\n\n\n\n### Next Steps\n\nIn the next section, we will:\n\n⚙️ Customize our configuration\n☑️ Enable and build an optional service(`cache`) support your application\n\n**Ready to level up your ephemeral environment? Let\\'s go!** 🏃‍➡️" - }, - { - "title": "Create environment", - "description": null, - "date": null, - "path": "docs/getting-started/create-environment", - "body": "In this walk through, we will make a simple change to an example frontend repository and create our first ephemeral environment using Lifecycle.\n\n## 1. Fork the Repository\n\nFork the [`lifecycle-examples`](https://github.com/GoodRxOSS/lifecycle-examples) repository to your org or personal account and install your newly minted GitHub App to the forked repository.\n\n- Navigate to `https://github.com/settings/apps` (for personal accounts) or `https://github.com/organizations//settings/apps` (for org accounts).\n- Find the **Lifecycle GitHub App** and click on **Edit**.\n- Choose `Install App` from sidebar and click the Settings icon.\n- Select the forked repository from the list and select **Save**.\n\n## 2. Create a New Branch\n\nClone the repo and create a branch named `lfc-config`:\n\n```sh\ngit checkout -b lfc-config\n```\n\nor if you are using GitHub Desktop, you can create a new branch from the UI.\n\n## 3. Update Lifecycle Configuration\n\nOpen the `lifecycle.yaml` file in the root of the repository and update the `frontend` service's repository to your github username or org.\n\n**Before:**\n\n```yaml filename=\"lifecycle.yaml\"\ngithub:\n repository: \"GoodRxOSS/lifecycle-examples\"\n```\n\n**After:**\n\n```yaml filename=\"lifecycle.yaml\"\ngithub:\n repository: \"/lifecycle-examples\"\n```\n\n## 4. Commit & Push Your Changes\n\n```sh\ngit add .\ngit commit -m \"update config\"\ngit push origin lfc-config\n```\n\n## 5. Create a Pull Request\n\n1. Open a **Pull Request (PR)** from `lfc-config` to `main` in the forked repository.\n2. Submit the PR.\n\n## 6. Lifecycle PR Comment\n\nAfter submitting the PR, you’ll see a **GitHub comment from Lifecycle** on your pull request.\n\n🔹 This PR comment is the **mission control** for your ephemeral environment. It provides:\n\n- A **status update** of the build and deploy process.\n- A **list of services** configured for the environment.\n- A **link to the Lifecycle UI** where you can view logs, deployments, and environment details.\n\n\n If there is no comment from Lifecycle, it means the app is not configured\n correctly or the GitHub App is not installed in the repository. Please refer\n to the [Missing Comment](/docs/troubleshooting/github-app-webhooks) page for\n more information.\n\n\n## 7. Add `lifecycle-status-comments!` label\n\nThe additional label `lifecycle-status-comments!` provides more detailed information about the environment status and links to access the running application.\n\n🔹 The comments provides insights into:\n\n- **Build & Deploy Status**: Track when your environment is ready.\n- **Environment URLs**: Access the running frontend app.\n- **Telemetry Links**: Links to telemetry, build and deploy logs. (if enabled)\n\n## 8. Wait for Deployment\n\nWait for the **builds & deploys** to complete. Once the status updates to **`deployed`**, your environment is live! 🚀\n\nWhen a new commit is pushed to your pull request Lifecycle automatically builds and deploys again so you always have the latest version of the application.\n\n\n If there are any errors during the build or deploy process, the environment\n will not be created, and you will see an error message in the Lifecycle\n comment.\n \n \n You can check the logs from `lifecycle-worker` pods in your cluster to debug\n the issue: `kubectl logs deploy/lifecycle-worker -n lifecycle-app -f\n `\n \n\n\n## 9. Checkout the deployed application\n\nOnce the deployment is complete, you can access your environment at the URL provided in the Lifecycle comment on your pull request. Click on the `frontend` link to open your application in a new tab.\n\nThe application has two simple pages:\n\n- **`/tasks`** – A simple to-do list.\n- **`/variables`** – Displays all environment variables from the container.\n\n## Next Steps\n\nNow that your first ephemeral environment is ready, move on to the next section where we:\n\n🧪 Test the environment.\n🧭 Explore the comments and logs.\n⚙️ Customize the configuration." - }, - { - "title": "Delete environment", - "description": null, - "date": null, - "path": "docs/getting-started/delete-environment", - "body": "To **tear down** an environment, you can do one of the following:\n\n1. **Merge or close the pull request**: This will automatically clean up the environment.\n2. **Apply the `lifecycle-disabled!` label**: This will immediately trigger the environment deletion process.\n\n---\n\nThe **`lifecycle-disabled!`** label is useful in scenarios where:\n\n- The environment infrastructure is **experiencing issues**.\n- The data within the environment is **corrupt**.\n- You need to **restart or rebuild** the environment from scratch without waiting for a PR to be merged or closed.\n\nSimply apply the label to the **PR associated with the environment**, and Lifecycle will automatically tear it down.\n\n\n Read more about how pull request labels control auto deploy in repositories\n [here](/docs/features/auto-deployment)\n\n\n---\n\nUsing these methods, you can efficiently manage and clean up environments to ensure smooth development and testing workflows. 🧹" - }, - { - "title": "Telemetry", - "description": null, - "date": null, - "path": "docs/tips/telemetry", - "body": "Lifecycle comes with built-in support for Datadog telemetry. To collect logs and metrics from your cluster and deployed applications, install the Datadog Agent and Cluster Agent in your cluster.\n\nThe deployed applications are already configured with the necessary Datadog labels and environment variables for seamless integration:\n\n**Pod labels:**\n\n```yaml\ntags.datadoghq.com/env: lifecycle-binlab-zero-101010\ntags.datadoghq.com/service: frontend\ntags.datadoghq.com/version: binlab-zero-101010\n```\n\n**Environment variables:**\n\n```yaml\n- name: DD_ENV\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/env']\n- name: DD_SERVICE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/service']\n- name: DD_VERSION\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/version']\n```\n\nThis setup ensures that Datadog automatically detects the environment, service, and version for each application, enabling rich observability and correlation of logs and metrics in the Datadog platform." - }, - { - "title": "Mission Control comment", - "description": "Use the Mission Control PR Comment to modify and customize your environment directly from the pull request comment.", - "date": null, - "path": "docs/tips/using-mission-control", - "body": "Lifecycle uses **Mission Control PR Comments** to allow users to modify and customize their environments directly from the pull request comment. This enables easy **service selection**, **branch customization**, and **environment variable overrides** without modifying `lifecycle.yaml`.\n\n---\n\n## Selecting and Deselecting Services\n\nEach pull request environment includes **default services** and optional additional services. You can enable or disable services using the checkboxes.\n\n- **Enabled Services** are marked with `[x]`.\n- **Disabled Services** are marked with `[ ]`.\n\n**Example:**\n\n```md\n// Default Services\n\n- [x] frontend: dev-default\n- [x] fastly: main\n\n// Optional Additional Services\n\n- [ ] backend-service: main\n- [ ] backend-db: main\n- [ ] backend-cache: main\n```\n\nTo **enable** a service, change `[ ]` to `[x]`. To **disable** a service, change `[x]` to `[ ]`. As simple as that!\n\n\n If you need to make multiple selections or deselections at once, use the\n **Edit Comment** option instead of clicking checkboxes individually. This\n prevents multiple back-to-back builds, as each selection triggers an event in\n Lifecycle without deduplication.\n\n\n## Choosing a Branch\n\nTo deploy a specific branch for a service, modify the branch name after the service name.\n\n**Example:**\n\n```md\n- [x] frontend: feature-branch\n- [x] fastly: main\n```\n\nThis will deploy `frontend` using the `feature-branch` instead of the default branch.\n\n## Overriding Environment Variables\n\nTo set additional environment variables, use the **Override Environment Variables** section in the PR comment.\n\n**Example:**\n\n```md\n// **Override Environment Variables:** _ENV:[KEY]:[VALUE]_\nENV:API_URL:https://api.custom.dev.0env.com\nENV:CHIEF_INTERN:ICEYCAKE\n```\n\nThis sets `API_URL` and `CHIEF_INTERN` in the environment without modifying the service configuration.\n\n## Override UUID\n\nTo set a custom UUID (subdomain) for the environment, use the **Override UUID** section in the PR comment.\n\n```md\n// UUID (Pick your own custom subdomain)\nurl: wagon-builder-060825\n```\n\nReplace `wagon-builder-060825` with your desired subdomain. This allows you to customize the environment URL without changing the underlying service configuration.\n\n---\n\nUsing the **Mission Control PR Comment**, you can easily customize your environment **without modifying code**, making it a flexible way to test and deploy changes dynamically." - } -] \ No newline at end of file diff --git a/src/lib/static/blogcontent/blogcontent.ts b/src/lib/static/blogcontent/blogcontent.ts deleted file mode 100644 index 8421843..0000000 --- a/src/lib/static/blogcontent/blogcontent.ts +++ /dev/null @@ -1,178 +0,0 @@ -export const blogContent = [ - { - title: "Introducing Lifecycle", - description: null, - date: null, - path: "articles/introduction", - body: 'We started building **Lifecycle** at GoodRx in 2019 because managing our lower environments like staging, development, QA had become a daily headache. As our architecture shifted from a monolith to microservices, our internal channels were flooded with messages like "Is anyone using staging?" "Staging is broken again," and "Who just overwrote my changes?" Waiting in line for hours (sometimes days) to test code in a real-world-like environment was the norm.\n\nWe simply couldn\'t scale with our engineering growth. So, as a proof of concept, we spun up **Lifecycle**: a tool that lets you create on-demand, ephemeral environments off of github pull request.\n\nAt first, only a handful of services were onboarded, but our engineers immediately saw the difference, no more static staging servers, no more pipeline gymnastics, and no more accidental overwrites. They wanted Lifecycle wherever they touched code, so we built a simple lifecycle.yaml configuration, replaced our manual database entries, and baked Lifecycle support into every new service template.\n\nAfter ironing out early scaling kinks, we realized Lifecycle had become more than an internal convenience, it was a game-changer for us.\n\nToday (June 5, 2025), we\'re thrilled to open-source five years of collective effort under the Apache 2.0 license. This project represents countless late-night brainstorming sessions, pull requests, and "aha" moments, and we can\'t wait to see how you\'ll make it your own: adding integrations, optimizing performance, or finding novel workflows we never imagined.\n\nBy sharing Lifecycle, we hope to help teams stuck in the same limited environment limbo we once were and build a community of passionate likeminded developers who\'ll shape the the future of Lifecycle.\n\nWe look forward to learning from you, growing together, and making shipping high-quality software faster and more enjoyable for everyone!\n\nJoin our Discord server [here](https://discord.gg/M5fhHJuEX8) to connect!!', - }, - { - title: "What is Lifecycle?", - description: - "Lifecycle is your effortless way to test and create ephemeral environments", - date: null, - path: "docs/what-is-lifecycle", - body: "Lifecycle is an **ephemeral** _(/əˈfem(ə)rəl/, lasting for a very short time)_ environment orchestrator that transforms your GitHub pull requests into fully functional development environments. It enables developers to test, validate, and collaborate on features without the hassle of managing infrastructure manually.\n\n> With **Lifecycle**, every pull request gets its own connected playground—ensuring that changes can be previewed, integrated, and verified before merging into its main branch.\n\n## A Developer’s Story\n\nImagine working in an organization that develops multiple services. Managing and testing changes across these services can be challenging, especially when multiple developers are working on different features simultaneously.\n\nMeet **Nick Holiday 👨‍💻**, an engineer who needs to update a database schema and modify the corresponding API in a backend service. Additionally, his change requires frontend service updates to display the new data correctly.\n\n### Traditional Workflow Challenges\n\n- **Shared environments** – Nick deploys his backend service changes to a shared dev or staging environment, but another engineer is testing unrelated changes at the same time.\n- **Conflicting updates** – The frontend engineers working on the UI might face issues if their code depends on a stable backend service that keeps changing.\n- **Environment management** – Setting up and maintaining an isolated environment for testing requires significant effort.\n\n### Enter Lifecycle\n\nWith Lifecycle, as soon as Nick opens a pull request, the system automatically:\n\n1. 🏗️ **Creates an isolated development environment** – This environment includes Nick’s updated backend service along with the necessary frontend services.\n2. 🚀 **Deploys the application** – Everything is set up exactly as it would be in production, ensuring a reliable test scenario.\n3. 🔗 **Generates a shareable URL** – Nick and his teammates can interact with the new features without setting up anything locally.\n4. 🧹 **Cleans up automatically** – Once the PR is merged or closed, Lifecycle removes the environment, keeping things tidy.\n\n## Watch a Quick Demo\n\n\n\n## How It Works\n\n\n\n## Why Use Lifecycle?\n\n- **Faster Feedback Loops** - Get instant previews of your changes without waiting for staging deployments.\n- **Isolation** - Each PR runs in its own sandbox, preventing conflicts.\n- **Seamless Collaboration** - Share URLs with stakeholders, designers, or QA engineers.\n- **Automatic Cleanup** - No more stale test environments; Lifecycle manages cleanup for you.\n- **Works with Your Stack** - Supports containerized applications and integrates with Kubernetes.", - }, - { - title: "Auto Deploy & Labels", - description: - "How to setup auto deploy for pull requests and control envionment with labels", - date: null, - path: "docs/features/auto-deployment", - body: '## Auto-Deploy Configuration\n\nTo enable **automatic deployment** when a PR is created, set the `autoDeploy` attribute in your repository\'s `lifecycle.yaml` file:\n\n```yaml {2} filename="lifecycle.yaml"\nenvironment:\n autoDeploy: true\n defaultServices:\n```\n\n- Lifecycle will **automatically create** the environment as soon as a PR is opened.\n- A `lifecycle-deploy!` label will be added to the PR to indicate that the environment has been deployed.\n\n---\n\n## Managing Deployments with Labels\n\nIf **auto-deploy is not enabled**, you can manually control the environment using PR labels.\n\n### Deploy an Environment\n\nTo create an ephemeral environment for a PR, **add** the `lifecycle-deploy!` label.\n\n### Tear Down an Environment\n\nTo **delete** an active environment, use either of these labels:\n\n- **Remove** `lifecycle-deploy!`\n- **Add** `lifecycle-disabled!`\n\n---\n\n## Automatic Cleanup on PR Closure\n\nWhen a PR is **closed**, Lifecycle will:\n\n1. **Tear down** the active environment.\n2. **Remove** the `lifecycle-deploy!` label from the PR.\n\nThis ensures that unused environments do not persist after the PR lifecycle is complete.\n\n---\n\n## Summary\n\n| Feature | Behavior |\n| ---------------------------- | ----------------------------------------------- |\n| `autoDeploy: true` in config | PR environments are **automatically** deployed. |\n| `lifecycle-deploy!` | **Manually deploy** an environment. |\n| Remove `lifecycle-deploy!` | **Tear down** the environment. |\n| Add `lifecycle-disabled!` | **Tear down** the environment manually. |\n| PR closed | **Environment is deleted automatically**. |\n\nUsing these configurations and labels, teams can efficiently manage **ephemeral environments** in their development workflow.', - }, - { - title: "Webhooks", - description: null, - date: null, - path: "docs/features/webhooks", - body: 'Lifecycle can invoke **third-party services** when a build state changes.\n\nWebhooks allow users to automate external processes such as running tests, performing cleanup tasks, or sending notifications based on environment build states.\n\n## Supported Types\n\nLifecycle supports three types of webhooks:\n\n1. **`codefresh`** - Trigger Codefresh pipelines\n2. **`docker`** - Execute Docker images as Kubernetes jobs\n3. **`command`** - Run shell commands in a specified Docker image\n\n## Common Use Cases\n\n- When a build status is `deployed`, trigger **end-to-end tests**.\n- When a build status is `error`, trigger **infrastructure cleanup** or alert the team.\n- Run **security scans** on built containers.\n- Execute **database migrations** after deployment.\n- Send **notifications** to Slack, Discord, or other communication channels.\n- Perform **smoke tests** using custom test containers.\n\n## Configuration\n\nWebhooks are defined in the `lifecycle.yaml` under the `environment.webhooks` section.\n\nBelow is an example configuration for triggering end-to-end tests when the `deployed` state is reached.\n\n## Examples\n\n### `codefresh`\n\nThe `codefresh` type triggers existing Codefresh pipelines when build states change.\n\n```yaml\n# Trigger End-to-End Tests on Deployment\nenvironment:\n # ...\n defaultServices:\n - name: "frontend"\n optionalServices:\n - name: "backend"\n repository: "lifecycle/backend"\n branch: "main"\n webhooks:\n - state: deployed\n type: codefresh\n name: "End to End Tests"\n pipelineId: 64598362453cc650c0c9cd4d\n trigger: tests\n env:\n branch: "{{frontend_branchName}}"\n TEST_URL: "https://{{frontend_publicUrl}}"\n # ...\n```\n\n- **`state: deployed`** → Triggers the webhook when the build reaches the `deployed` state.\n- **`type: codefresh`** → Specifies that this webhook triggers a **Codefresh pipeline**.\n- **`name`** → A human-readable name for the webhook.\n- **`pipelineId`** → The unique Codefresh pipeline ID.\n- **`trigger`** → Codefresh pipeline\'s trigger to execute.\n- **`env`** → Passes relevant environment variables (e.g., `branch` and `TEST_URL`).\n\n---\n\n```yaml\n# Trigger Cleanup on Build Error\nenvironment:\n # ...\n webhooks:\n - state: error\n type: codefresh\n name: "Cleanup Failed Deployment"\n pipelineId: 74283905723ff650c0d9ab7e\n trigger: cleanup\n env:\n branch: "{{frontend_branchName}}"\n CLEANUP_TARGET: "frontend"\n # ...\n```\n\n- **`state: error`** → Triggers the webhook when the build fails.\n- **`type: codefresh`** → Invokes a Codefresh cleanup pipeline.\n- **`trigger: cleanup`** → Codefresh pipeline\'s trigger to execute.\n- **`env`** → Includes necessary variables, such as `branch` and `CLEANUP_TARGET`.\n\n### `docker`\n\nThe `docker` type allows you to execute any Docker image as a Kubernetes job when build states change.\n\n\n Docker webhooks run as Kubernetes jobs in the same namespace as your build.\n They have a default timeout of 30 minutes and resource limits of 200m CPU and\n 1Gi memory.\n\n\n```yaml\n# Run E2E Tests in Custom Container\nenvironment:\n # ...\n webhooks:\n - name: "E2E Test Suite"\n description: "Execute comprehensive E2E tests"\n state: deployed\n type: docker\n docker:\n image: "myorg/e2e-tests:latest"\n command: ["npm", "run", "e2e"]\n timeout: 1200 # 1 hour (optional, default: 1800 seconds)\n env:\n BASE_URL: "https://{{frontend_publicUrl}}"\n ENVIRONMENT: "ephemeral"\n```\n\n- **`docker.image`** → Docker image to execute (required)\n- **`docker.command`** → Override the default entrypoint (optional)\n- **`docker.args`** → Arguments to pass to the command (optional)\n- **`docker.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n### `command`\n\nThe `command` type is a simplified version of Docker webhooks, ideal for running shell scripts or simple commands.\n\n```yaml\n# Slack Notification Example\nenvironment:\n # ...\n webhooks:\n - name: "Deployment Notification"\n description: "Notify team of successful deployment"\n state: deployed\n type: command\n command:\n image: "alpine:latest"\n script: |\n apk add --no-cache curl\n curl -X POST "$WEBHOOK_URL" \\\n -H "Content-Type: application/json" \\\n -d "{\\"text\\":\\"🚀 Deployed $SERVICE_NAME to $DEPLOY_URL\\"}"\n timeout: 300 # 5 minutes (optional)\n env:\n WEBHOOK_URL: "https://hooks.slack.com/services/XXX/YYY/ZZZ"\n SERVICE_NAME: "{{frontend_internalHostname}}"\n DEPLOY_URL: "https://{{frontend_publicUrl}}"\n```\n\n\n Make sure to replace placeholder values like webhook URLs and pipeline IDs\n with your actual values.\n\n\n- **`command.image`** → Docker image to run the script in (required)\n- **`command.script`** → Shell script to execute (required)\n- **`command.timeout`** → Maximum execution time in seconds (optional, default: 1800)\n\n## Trigger states\n\nWebhooks can be triggered on the following build states:\n\n- **`deployed`** → Service successfully deployed and running\n- **`error`** → Build or deployment failed\n- **`torn_down`** → Environment has been destroyed\n\n## Note\n\n- All webhooks for the same state are executed **serially** in the order defined.\n- Webhook failures do not affect the build status.\n- Webhook invocations can be viewed at `/builds/[uuid]/webhooks` page(latest 20 invocations). Use the API to view all invocations.\n- `docker` and `command` type\'s logs are not streamed when the job is still in progress and are available only after the job completes.', - }, - { - title: "Native Helm Deployment", - description: - "Deploy services using Helm directly in Kubernetes without external CI/CD dependencies", - date: null, - path: "docs/features/native-helm-deployment", - body: 'This feature is still in alpha and might change with breaking changes.\n\n\n**Native Helm** is an alternative deployment method that runs Helm deployments directly within Kubernetes jobs, eliminating the need for external CI/CD systems. This provides a more self-contained and portable deployment solution.\n\n\n Native Helm deployment is an opt-in feature that can be enabled globally or\n per-service.\n\n\n## Overview\n\nWhen enabled, Native Helm:\n\n- Creates Kubernetes jobs to execute Helm deployments\n- Runs in ephemeral namespaces with proper RBAC\n- Provides real-time deployment logs via WebSocket\n- Handles concurrent deployments automatically\n- Supports all standard Helm chart types\n\n## Quickstart\n\nWant to try native Helm deployment? Here\'s the fastest way to get started:\n\n```yaml filename="lifecycle.yaml" {5}\nservices:\n - name: my-api\n defaultUUID: "dev-0"\n helm:\n deploymentMethod: "native" # That\'s it!\n chart:\n name: "local"\n valueFiles:\n - "./helm/values.yaml"\n```\n\nThis configuration:\n\n1. Enables native Helm for the `my-api` service\n2. Uses a local Helm chart from your repository\n3. Applies values from `./helm/values.yaml`\n4. Runs deployment as a Kubernetes job\n\n\n To enable native Helm for all services at once, see [Global\n Configuration](#enabling-native-helm).\n\n\n## Configuration\n\n### Enabling Native Helm\n\nThere are two ways to enable native Helm deployment:\n\n#### Per Service Configuration\n\nEnable native Helm for individual services:\n\n```yaml {4} filename="lifecycle.yaml"\nservices:\n - name: my-service\n helm:\n deploymentMethod: "native" # Enable for this service only\n chart:\n name: my-chart\n```\n\n#### Global Configuration\n\nEnable native Helm for all services:\n\n```yaml {3} filename="lifecycle.yaml"\nhelm:\n nativeHelm:\n enabled: true # Enable for all services\n```\n\n### Configuration Precedence\n\nLifecycle uses a hierarchical configuration system with three levels of precedence:\n\n1. **helmDefaults** - Base defaults for all deployments (database: `global_config` table)\n2. **Chart-specific config** - Per-chart defaults (database: `global_config` table)\n3. **Service YAML config** - Service-specific overrides (highest priority)\n\n\n Service-level configuration always takes precedence over global defaults.\n\n\n### Global Configuration (Database)\n\nGlobal configurations are stored in the `global_config` table in the database. Each configuration is stored as a row with:\n\n- **key**: The configuration name (e.g., \'helmDefaults\', \'postgresql\', \'redis\')\n- **config**: JSON object containing the configuration\n\n#### helmDefaults Configuration\n\nStored in database with key `helmDefaults`:\n\n```json\n{\n "nativeHelm": {\n "enabled": true,\n "defaultArgs": "--wait --timeout 30m",\n "defaultHelmVersion": "3.12.0"\n }\n}\n```\n\n**Field Descriptions**:\n\n- `enabled`: When `true`, enables native Helm deployment for all services unless they explicitly set `deploymentMethod: "ci"`\n- `defaultArgs`: Arguments automatically appended to every Helm command (appears before service-specific args)\n- `defaultHelmVersion`: The Helm version to use when not specified at the service or chart level\n\n#### Chart-specific Configuration\n\nExample: PostgreSQL configuration stored with key `postgresql`:\n\n```json\n{\n "version": "3.13.0",\n "args": "--force --timeout 60m0s --wait",\n "chart": {\n "name": "postgresql",\n "repoUrl": "https://charts.bitnami.com/bitnami",\n "version": "12.9.0",\n "values": ["auth.username=postgres_user", "auth.database=postgres_db"]\n }\n}\n```\n\n\n These global configurations are managed by administrators and stored in the\n database. They provide consistent defaults across all environments and can be\n overridden at the service level.\n\n\n## Usage Examples\n\n### Quick Experiment: Deploy Jenkins!\n\nWant to see native Helm in action? Let\'s deploy everyone\'s favorite CI/CD tool - Jenkins! This example shows how easy it is to deploy popular applications using native Helm.\n\n```yaml filename="lifecycle.yaml"\nenvironment:\n defaultServices:\n - name: "my-app"\n - name: "jenkins" # Add Jenkins to your default services\n\nservices:\n - name: "jenkins"\n helm:\n chart:\n name: "jenkins"\n repoUrl: "https://charts.bitnami.com/bitnami"\n version: "13.6.8"\n values:\n - "service.type=ClusterIP"\n - "ingress.enabled=true"\n - "ingress.hostname={{jenkins_publicUrl}}"\n - "ingress.ingressClassName=nginx"\n```\n\n\n 🎉 That\'s it! With just a few lines of configuration, you\'ll have Jenkins\n running in your Kubernetes cluster.\n\n\nTo access your Jenkins instance:\n\n1. Check the deployment status in your PR comment\n2. Click the **Deploy Logs** link to monitor the deployment\n3. Once deployed, Jenkins will be available at the internal hostname\n\n\n For more Jenkins configuration options and values, check out the [Bitnami\n Jenkins chart\n documentation](https://github.com/bitnami/charts/tree/main/bitnami/jenkins).\n This same pattern works for any Bitnami chart (PostgreSQL, Redis, MongoDB) or\n any other public Helm chart!\n\n\n### Basic Service Deployment\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: web-api\n helm:\n deploymentMethod: "native"\n chart:\n name: web-app\n version: "1.2.0"\n```\n\n### PostgreSQL with Overrides\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: database\n helm:\n deploymentMethod: "native"\n version: "3.14.0" # Override Helm version\n args: "--atomic" # Override deployment args\n chart:\n name: postgresql\n values: # Additional values merged with defaults\n - "persistence.size=20Gi"\n - "replicaCount=2"\n```\n\n### Custom Environment Variables\n\nLifecycle supports flexible environment variable formatting through the `envMapping` configuration. This feature allows you to control how environment variables from your service configuration are passed to your Helm chart.\n\n\n **Why envMapping?** Different Helm charts expect environment variables in\n different formats. Some expect an array of objects with `name` and `value`\n fields (Kubernetes standard), while others expect a simple key-value map. The\n `envMapping` feature lets you adapt to your chart\'s requirements.\n\n\n#### Default envMapping Configuration\n\nYou can define default `envMapping` configurations in the `global_config` database table. These defaults apply to all services using that chart unless overridden at the service level.\n\n**Example: Setting defaults for your organization\'s chart**\n\n```json\n// In global_config table, key: "myorg-web-app"\n{\n "chart": {\n "name": "myorg-web-app",\n "repoUrl": "https://charts.myorg.com"\n },\n "envMapping": {\n "app": {\n "format": "array",\n "path": "deployment.containers[0].env"\n }\n }\n}\n```\n\nWith this configuration, any service using the `myorg-web-app` chart will automatically use array format for environment variables:\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: "myorg-web-app" # Inherits envMapping from global_config\n docker:\n app:\n env:\n API_KEY: "secret"\n # These will be formatted as array automatically\n```\n\n\n Setting `envMapping` in global_config is particularly useful when: - You have\n a standard organizational chart used by many services - You want consistent\n environment variable handling across services - You\'re migrating multiple\n services and want to reduce configuration duplication\n\n\n#### Array Format\n\nBest for charts that expect Kubernetes-style env arrays.\n\n```yaml {7-9} filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping:\n app:\n format: "array"\n path: "env"\n docker:\n app:\n env:\n DATABASE_URL: "postgres://localhost:5432/mydb"\n API_KEY: "secret-key-123"\n NODE_ENV: "production"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set env[0].name=DATABASE_URL\n--set env[0].value=postgres://localhost:5432/mydb\n--set env[1].name=API_KEY\n--set env[1].value=secret-key-123\n--set env[2].name=NODE_ENV\n--set env[2].value=production\n```\n\n**Your chart\'s values.yaml would use it like:**\n\n```yaml\nenv:\n - name: DATABASE_URL\n value: postgres://localhost:5432/mydb\n - name: API_KEY\n value: secret-key-123\n - name: NODE_ENV\n value: production\n```\n\n#### Map Format\n\nBest for charts that expect a simple key-value object.\n\n```yaml {7-9} filename="lifecycle.yaml"\nservices:\n - name: api\n helm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping:\n app:\n format: "map"\n path: "envVars"\n docker:\n app:\n env:\n DATABASE_URL: "postgres://localhost:5432/mydb"\n API_KEY: "secret-key-123"\n NODE_ENV: "production"\n```\n\n**This produces the following Helm values:**\n\n```bash\n--set envVars.DATABASE__URL=postgres://localhost:5432/mydb\n--set envVars.API__KEY=secret-key-123\n--set envVars.NODE__ENV=production\n```\n\n\n Note: Underscores in environment variable names are converted to double\n underscores (`__`) in map format to avoid Helm parsing issues.\n\n\n**Your chart\'s values.yaml would use it like:**\n\n```yaml\nenvVars:\n DATABASE__URL: postgres://localhost:5432/mydb\n API__KEY: secret-key-123\n NODE__ENV: production\n```\n\n#### Complete Example with Multiple Services\n\n```yaml filename="lifecycle.yaml"\nservices:\n # Service using array format (common for standard Kubernetes deployments)\n - name: frontend\n helm:\n deploymentMethod: "native"\n repository: "myorg/apps"\n branchName: "main"\n envMapping:\n app:\n format: "array"\n path: "deployment.env"\n chart:\n name: "./charts/web-app"\n docker:\n app:\n dockerfilePath: "frontend/Dockerfile"\n env:\n REACT_APP_API_URL: "https://api.example.com"\n REACT_APP_VERSION: "{{build.uuid}}"\n\n # Service using map format (common for custom charts)\n - name: backend\n helm:\n deploymentMethod: "native"\n repository: "myorg/apps"\n branchName: "main"\n envMapping:\n app:\n format: "map"\n path: "config.environment"\n chart:\n name: "./charts/api"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n\n - name: "mysql-database"\n helm:\n deploymentMethod: "native"\n repository: "myorg/api-services"\n branchName: "main"\n chart:\n name: "mysql" # Using public Helm chart\n version: "9.14.1"\n repoUrl: "https://charts.bitnami.com/bitnami"\n valueFiles:\n - "deploy/helm/mysql-values.yaml"\n```\n\n## Templated Variables\n\nLifecycle supports template variables in Helm values that are resolved at deployment time. These variables allow you to reference dynamic values like build UUIDs, docker tags, and internal hostnames.\n\n### Available Variables\n\nTemplate variables use the format `{{{variableName}}}` and are replaced with actual values during deployment:\n\n| Variable | Description | Example Value |\n| ------------------------------------ | ------------------------- | ---------------------------------------- |\n| `{{{serviceName_dockerTag}}}` | Docker tag for a service | `main-abc123` |\n| `{{{serviceName_dockerImage}}}` | Full docker image path | `registry.com/org/repo:main-abc123` |\n| `{{{serviceName_internalHostname}}}` | Internal service hostname | `api-service.env-uuid.svc.cluster.local` |\n| `{{{build.uuid}}}` | Build UUID | `env-12345` |\n| `{{{build.namespace}}}` | Kubernetes namespace | `env-12345` |\n\n### Usage in Values\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: web-api\n helm:\n deploymentMethod: "native"\n chart:\n name: "./charts/app"\n values:\n - "image.tag={{{web-api_dockerTag}}}"\n - "backend.url=http://{{{backend-service_internalHostname}}}:8080"\n - "env.BUILD_ID={{{build.uuid}}}"\n```\n\n\n**Docker Image Mapping**: When using custom charts, you\'ll need to map `{{{serviceName_dockerImage}}}` or `{{{serviceName_dockerTag}}}` to your chart\'s expected value path. Common patterns include:\n- `image.repository` and `image.tag` (most common)\n- `deployment.image` (single image string)\n- `app.image` or `application.image`\n- Custom paths specific to your chart\n\nCheck your chart\'s `values.yaml` to determine the correct path.\n\n\n\n#### Image Mapping Examples\n\n```yaml filename="lifecycle.yaml"\n# Example 1: Separate repository and tag (most common)\nservices:\n - name: web-api\n helm:\n chart:\n name: "./charts/standard"\n values:\n - "image.repository=registry.com/org/web-api" # Static repository\n - "image.tag={{{web-api_dockerTag}}}" # Dynamic tag only\n\n# Example 2: Combined image string\nservices:\n - name: worker\n helm:\n chart:\n name: "./charts/custom"\n values:\n - "deployment.image={{{worker_dockerImage}}}" # Full image with tag\n\n# Example 3: Nested structure\nservices:\n - name: backend\n helm:\n chart:\n name: "./charts/microservice"\n values:\n - "app.container.image={{{backend_dockerImage}}}" # Full image with tag\n```\n\n\n**Important**: Always use triple braces `{{{variable}}}` instead of double braces `{{variable}}` for Lifecycle template variables. This prevents Helm from trying to process them as Helm template functions and ensures they are passed through correctly for Lifecycle to resolve.\n\n\n### Template Resolution Order\n\n1. Lifecycle resolves `{{{variables}}}` before passing values to Helm\n2. The resolved values are then passed to Helm using `--set` flags\n3. Helm processes its own template functions (if any) after receiving the resolved values\n\n### Example with Service Dependencies\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: api-gateway\n helm:\n chart:\n name: "./charts/gateway"\n values:\n - "config.authServiceUrl=http://{{{auth-service_internalHostname}}}:3000"\n - "config.userServiceUrl=http://{{{user-service_internalHostname}}}:3000"\n - "image.tag={{{api-gateway_dockerTag}}}"\n\n - name: auth-service\n helm:\n chart:\n name: "./charts/microservice"\n values:\n - "image.tag={{{auth-service_dockerTag}}}"\n - "database.host={{{postgres-db_internalHostname}}}"\n```\n\n## Deployment Process\n\n\n 1. **Job Creation**: A Kubernetes job is created in the ephemeral namespace 2.\n **RBAC Setup**: Service account with namespace-scoped permissions is created\n 3. **Git Clone**: Init container clones the repository 4. **Helm Deploy**:\n Main container executes the Helm deployment 5. **Monitoring**: Logs are\n streamed in real-time via WebSocket\n\n\n### Concurrent Deployment Handling\n\nNative Helm automatically handles concurrent deployments by:\n\n- Detecting existing deployment jobs\n- Force-deleting the old job\n- Starting the new deployment\n\nThis ensures the newest deployment always takes precedence.\n\n## Monitoring Deployments\n\n### Deploy Logs Access\n\nFor services using native Helm deployment, you can access deployment logs through the Lifecycle PR comment:\n\n1. Add the `lifecycle-status-comments!` label to your PR\n2. In the status comment that appears, you\'ll see a **Deploy Logs** link for each service using native Helm\n3. Click the link to view real-time deployment logs\n\n### Log Contents\n\nThe deployment logs show:\n\n- Git repository cloning progress (`clone-repo` container)\n- Helm deployment execution (`helm-deploy` container)\n- Real-time streaming of all deployment output\n- Success or failure status\n\n## Chart Types\n\nLifecycle automatically detects and handles three chart types:\n\n| Type | Detection | Features |\n| ------------- | -------------------------------------------- | ---------------------------------------------- |\n| **ORG_CHART** | Matches `orgChartName` AND has `helm.docker` | Docker image injection, env var transformation |\n| **LOCAL** | Name is "local" or starts with "./" or "../" | Flexible `envMapping` support |\n| **PUBLIC** | Everything else | Standard labels and tolerations |\n\n\n The `orgChartName` is configured in the database\'s `global_config` table with\n key `orgChart`. This allows organizations to define their standard internal\n Helm chart.\n\n\n## Troubleshooting\n\n### Deployment Fails with "Another Operation in Progress"\n\n**Symptom**: Helm reports an existing operation is blocking deployment\n\n**Solution**: Native Helm automatically handles this by killing existing jobs. If the issue persists:\n\n```bash\n# Check for stuck jobs\nkubectl get jobs -n env-{uuid} -l service={serviceName}\n\n# Force delete if needed\nkubectl delete job {jobName} -n env-{uuid} --force --grace-period=0\n```\n\n### Environment Variables Not Working\n\n**Symptom**: Environment variables not passed to the deployment\n\n**Common Issues**:\n\n1. `envMapping` placed under `chart` instead of directly under `helm`\n2. Incorrect format specification (array vs map)\n3. Missing path configuration\n\n**Correct Configuration**:\n\n```yaml {4-7}\nhelm:\n deploymentMethod: "native"\n chart:\n name: local\n envMapping: # Correct: directly under helm\n app:\n format: "array"\n path: "env"\n```\n\n## Migration Example\n\nHere\'s a complete example showing how to migrate from GitHub-type services to Helm-type services:\n\n### Before: GitHub-type Services\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: "api-gateway"\n github:\n repository: "myorg/api-services"\n branchName: "main"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/api.dockerfile"\n env:\n BACKEND_URL: "{{backend-service_internalHostname}}:3000"\n LOG_LEVEL: "info"\n ENV_NAME: "production"\n ports:\n - 8080\n deployment:\n public: true\n resource:\n cpu:\n request: "100m"\n memory:\n request: "256Mi"\n readiness:\n tcpSocketPort: 8080\n hostnames:\n host: "example.com"\n defaultInternalHostname: "api-gateway-prod"\n defaultPublicUrl: "api.example.com"\n\n - name: "backend-service"\n github:\n repository: "myorg/api-services"\n branchName: "main"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n deployment:\n public: false\n resource:\n cpu:\n request: "50m"\n memory:\n request: "128Mi"\n readiness:\n tcpSocketPort: 3000\n\n - name: "mysql-database"\n docker:\n dockerImage: "mysql"\n defaultTag: "8.0-debian"\n ports:\n - 3306\n env:\n MYSQL_ROOT_PASSWORD: "strongpassword123"\n MYSQL_DATABASE: "app_database"\n MYSQL_USER: "app_user"\n MYSQL_PASSWORD: "apppassword456"\n deployment:\n public: false\n resource:\n cpu:\n request: "100m"\n memory:\n request: "512Mi"\n readiness:\n tcpSocketPort: 3306\n serviceDisks:\n - name: "mysql-data"\n mountPath: "/var/lib/mysql"\n accessModes: "ReadWriteOnce"\n storageSize: "10Gi"\n```\n\n### After: Helm-type Services with Native Deployment\n\n```yaml filename="lifecycle.yaml"\nservices:\n - name: "api-gateway"\n helm:\n deploymentMethod: "native" # Enable native Helm\n version: "3.14.0"\n repository: "myorg/api-services"\n branchName: "main"\n args: "--wait --timeout 10m"\n envMapping:\n app:\n format: "array"\n path: "containers.api.env"\n chart:\n name: "./charts/microservices"\n values:\n - \'image.tag="{{{api-gateway_dockerTag}}}"\'\n - "service.type=LoadBalancer"\n - "ingress.enabled=true"\n valueFiles:\n - "deploy/helm/base-values.yaml"\n - "deploy/helm/api-gateway-values.yaml"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/api.dockerfile"\n env:\n BACKEND_URL: "{{backend-service_internalHostname}}:3000"\n LOG_LEVEL: "info"\n ENV_NAME: "production"\n ports:\n - 8080\n\n - name: "backend-service"\n helm:\n deploymentMethod: "native"\n version: "3.14.0"\n repository: "myorg/api-services"\n branchName: "main"\n envMapping:\n app:\n format: "map" # Using map format for this service\n path: "env"\n chart:\n name: "./charts/microservices"\n values:\n - \'image.tag="{{{backend-service_dockerTag}}}"\'\n - "replicaCount=2"\n valueFiles:\n - "deploy/helm/base-values.yaml"\n - "deploy/helm/backend-values.yaml"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "docker/backend.dockerfile"\n ports:\n - 3000\n env:\n NODE_ENV: "production"\n SERVICE_NAME: "backend"\n\n - name: "mysql-database"\n helm:\n deploymentMethod: "native"\n repository: "myorg/api-services"\n branchName: "main"\n chart:\n name: "mysql" # Using public Helm chart\n version: "9.14.1"\n repoUrl: "https://charts.bitnami.com/bitnami"\n valueFiles:\n - "deploy/helm/mysql-values.yaml"\n```\n\n### Key Migration Points\n\n1. **Service Type Change**: Changed from `github:` to `helm:` configuration\n2. **Repository Location**: `repository` and `branchName` move from under `github:` to directly under `helm:`\n3. **Deployment Method**: Added `deploymentMethod: "native"` to enable native Helm\n4. **Chart Configuration**: Added `chart:` section with local or public charts\n5. **Environment Mapping**: Added `envMapping:` to control how environment variables are passed\n6. **Helm Arguments**: Added `args:` for Helm command customization\n7. **Docker Configuration**: Kept existing `docker:` config for build process\n\n\n Note that when converting from GitHub-type to Helm-type services, the\n `repository` and `branchName` fields move from being nested under `github:` to\n being directly under `helm:`.\n\n\n\n Many configuration options (like Helm version, args, and chart details) can be\n defined in the `global_config` database table, making the service YAML\n cleaner. Only override when needed.', - }, - { - title: "Template Variables", - description: null, - date: null, - path: "docs/features/template-variables", - body: '## Overview\n\nLifecycle uses [Mustache](https://github.com/janl/mustache.js) as the template rendering engine.\n\n## Available Template Variables\n\nThe following template variables are available for use within your configuration. Variables related to specific services should use the service name as a prefix.\n\n### General Variables\n\n- **`{{{buildUUID}}}`** - The unique identifier for the Lifecycle environment, e.g., `lively-down-881123`.\n- **`{{{namespace}}}`** - Namespace for the deployments, e.g., `env-lively-down-881123`.\n- **`{{{pullRequestNumber}}}`** - The GitHub pull request number associated with the environment.\n\n### Service-Specific Variables\n\nFor service-specific variables, replace `` with the actual service name.\n\n- **`{{{_internalHostname}}}`** - The internal hostname of the deployed service. If the service is optional and not deployed, it falls back to `defaultInternalHostname`.\n\n \n `service_internalHostname` will be substituted with local cluster full\n domain name like `service.namespace.svc.cluster.local` to be able to work\n with deployments across namespaces.\n \n\n- **`{{{_publicUrl}}}`** - The public URL of the deployed service. If optional and not deployed, it defaults to `defaultPublicUrl` under the `services` table.\n- **`{{{_sha}}}`** - The GitHub SHA that triggered the Lifecycle build.\n- **`{{{_branchName}}}`** - The branch name of the pull request that deployed the environment.\n- **`{{{_UUID}}}`** - The build UUID of the service. If listed under `optionalServices` or `defaultServices`, its value depends on whether the service is selected:\n - If selected, it is equal to `buildUUID`.\n - If not selected (or if service not part of deploys created), it defaults to **`dev-0`**.\n\n## Usage Example\n\n```yaml\nservices:\n frontend:\n # ...\n env:\n API_URL: "{{{backend_publicUrl}}}"\n UUID: "{{{buildUUID}}}"\n```\n\nThis ensures the `PUBLIC_URL` and `INTERNAL_HOST` variables are dynamically assigned based on the ephemeral environment deployment.\n\n \n- Undefined variables will result in an empty string unless handled explicitly.\n- Use triple curly braces (`{{{ }}}`) to prevent unwanted HTML escaping.\n- Ensure service names are correctly referenced in the template without any spaces.\n\n\nFor more details, refer to the [Mustache.js documentation](https://github.com/janl/mustache.js).', - }, - { - title: "Service Dependencies", - description: - "Understand service dependencies, their impact, and configuration.", - date: null, - path: "docs/features/service-dependencies", - body: 'This document will cover `environment.{defaultServices,optionalServices}` and `service.requires`, their differences, impact scope, and usage.\n\n## `environment.{defaultServices,optionalServices}`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ❌ |\n| dev-0\\* | ❌ |\n\nThis represents the default environment that will be created by lifecycle when a pull request is opened in the service repo\\* and does not have any impact on outside repos, dev-0, or any other static environments that use this service.\n\n## `services.requires`\n\n### Impact scope\n\n| Scope | Impact |\n| ------------------------- | ------ |\n| Service repo\\* | ✅ |\n| Outside repo\\* | ✅ |\n| dev-0\\* | ✅ |\n\n`services.requires` has an impact across the board; hence, it is important to understand how it works and when we should use them.\n\n**Please read the info blocks below carefully.**\n\nYou can think of `services.requires` as a hard dependency definition. For example, if you have an API service and a database, the API service will have a hard dependency on the database.\nIn this scenario, the database should not be defined as the default service. Instead, we should make the dependency explicitly clear by adding the database to the API’s `requires` block.\nBy doing this, we ensure that any outside repo that wants to use our API service will get the database along with it but only needs to specify the API service in their `defaultServices` or `optionalServices`.\n\n\n Only services defined in `lifecycle.yaml` should be used in the `requires`\n array. If a service is defined in an outside repo, use\n `environment.defaultServices` instead.\n\n\nDo not use services in the `services.requires` if the service itself is not\ndefined in the same lifecycle.yaml.\n\n\n Services defined in the `requires` block will only be resolved 1 level down.\n\n\n**This is a very important nuance, which we get tripped by regularly.**\n\n---\n\n## Examples\n\nTo better illustrate the above statement, consider this example.\n\nRepository A `r-A` has 3 services `s-A`, `s-B`, and `s-C`.\n\n- `s-A` requires `s-B`.\n- `s-B` requires `s-C`.\n\nAs you can see, `s-A` has an indirect dependency on `s-C` through `s-B`.\n\n### Scenario 1: Pull Request in Service repo\\* ✅\n\nWhen we open a pull request in `r-A` repo, lifecycle will deploy 3 services: `s-A`, `s-B`, and `s-C`.\n\n#### Breakdown\n\n- Lifecycle deploys `s-A` and `s-B` because they are defined in `defaultServices`.\n- Services defined in the `requires` block will only be resolved **one level down**.\n- Only services defined in `lifecycle.yaml` should be used in the `requires` array. If a service is defined in an outside repo, use `environment.defaultServices` instead.\n\n```yaml\n# `r-A.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: "s-A"\n - name: "s-B"\n\nservices:\n - name: "s-A"\n requires:\n - name: "s-B"\n helm: ...\n\n - name: "s-B"\n requires:\n - name: "s-C"\n helm: ...\n\n - name: "s-C"\n helm: ...\n```\n\n### Scenario 2: ❌\n\nRepository B `r-B` has service `s-X` and also defines an outside repo `r-A` service `s-A` as `environment.defaultServices`.\n\n```yaml\n# `r-B.lifecycle.yaml`\nenvironment:\n defaultServices:\n - name: "s-X"\n - name: "s-A"\n repository: "lifecycle/r-A"\n branch: "main"\n\nservices:\n - name: "s-X"\n helm: ...\n```\n\n#### Breakdown\n\n1. Lifecycle deploys `s-X` and `s-A` because they are defined in `defaultServices`.\n2. Lifecycle deploys `s-B` because it is a 1st level dependency of a service (`s-A`) listed in `defaultServices`.\n3. Lifecycle **does not** deploy `s-C` since it is **not** a 1st level dependency of any service listed in `defaultServices` or `optionalServices`.\n\nThe way this scenario manifests is lifecycle will deploy `s-X`, `s-A`, and `s-B`, but the build will likely **fail** because `s-B` is missing a required dependency `s-C`.\n\n### Solutions\n\nThere are 2 ways to address this depending on your use case.\n\n#### Solution 1\n\nAdd `s-B` to `r-B`’s `environment.defaultServices` block in `r-B.lifecycle.yaml`. In effect, this will make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: "s-X"\n - name: "s-A"\n repository: "lifecycle/r-A"\n branch: "main"\n - name: "s-B"\n repository: "lifecycle/r-A"\n branch: "main"\n```\n\n#### Solution 2\n\nAdd `s-C` to the `services.requires` block of `r-A` in `r-A.lifecycle.yaml`. This will also make `s-C` a first-level dependency.\n\n```yaml\nenvironment:\n defaultServices:\n - name: "s-A"\n - name: "s-B"\n\nservices:\n - name: "s-A"\n requires:\n - name: "s-B"\n - name: "s-C"\n helm: ...\n```\n\n### Choosing the Right Solution\n\nIn summary, the solution you should use depends on how you want your service to be consumed in an outside repo\\*.\n\n- If you want outside repos to explicitly include `s-A` and `s-B`, use **Solution 1**.\n- If you want outside repos to only include `s-A` and let dependencies resolve automatically, use **Solution 2**.\n\n---\n\n### Terminology\n\n- **Service repo**: The repository where `lifecycle.yaml` is defined.\n- **Outside repo**: Another repository referencing it.\n- **dev-0**: Default static environment.', - }, - { - title: "Install Lifecycle", - description: null, - date: null, - path: "docs/setup/install-lifecycle", - body: "Now that the infrastructure components are setup, let's install the lifecycle app and create a new Github app that will send events to the application to process and create ephemeral dev environments.\n\n\n Make sure you have updated the kube config to be able to `helm install` in the\n cluster you just created!\n\n\n- Follow installation steps in [lifecycle helm chart](https://github.com/GoodRxOSS/helm-charts/blob/main/charts/lifecycle/README.md)\n\n- Wait for the installation to complete and verify that the pods are running:\n\n```sh\nkubectl get pods -n lifecycle-app\n```\n\n- Once the pods are running, you can access the application at your configured domain (e.g. `https://app.0env.com`)\n\n\n\nJust like that, you have successfully installed Lifecycle and set up the necessary infrastructure to start creating ephemeral environments for your GitHub pull requests!\n\nIf you notice any secure certificate issues when accessing the application, you can check the status of your certificate using the following command:\n\n```sh\nkubectl get certificate -n lifecycle-app\n```\n\n\n\nMake sure the certificate is in the `Ready` state. If it is not, you may need to wait a bit longer for the certificate to be issued or troubleshoot any issues with your DNS settings.\n\nLet's move on to the next step where we will create a GitHub app to connect Lifecycle with your repositories.", - }, - { - title: "Prerequisites", - description: null, - date: null, - path: "docs/setup/prerequisites", - body: 'Before we start with the setup, let\'s make sure the following prerequisites are in place:\n\n- **GitHub Account**: You\'ll need either a personal or an organization GitHub account. [Sign up for GitHub](https://github.com/join)\n\n- **Cloud Provider Account**: A Google Kubernetes Engine (GKE) or Amazon Web Services (AWS) Account. You\'ll need an active account with either platform to proceed.\n - [Sign up for Google Cloud](https://cloud.google.com) and create a project\n - [Sign up for AWS](https://aws.amazon.com/)\n\n\n We recommend using an isolated project or account in your cloud provider\n specifically for this setup to begin with. This helps to keep your resources\n organized and manageable as you experiment with Lifecycle.\n\n\n- **CLI Tools**\n\n - **[OpenTofu](https://opentofu.org/docs/intro/install/)** — Infrastructure as code tool (OpenTofu is a community-driven fork of Terraform).\n - **[kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)** — Command-line tool for interacting with Kubernetes clusters.\n - **[gcloud](https://formulae.brew.sh/cask/google-cloud-sdk)** or **[aws-cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)** — Command-line tools for managing Google Cloud or AWS resources, respectively.\n\n- **Custom Domain**: You will need a custom domain (e.g., `0env.com`) to route traffic to your application environments. This is particularly important for setting up:\n\n - Public callback and webhook URLs for the GitHub App\n - Ingress routing within the Kubernetes cluster\n - Secure (HTTPS) access via TLS certificates\n\n- **DNS Provider with Wildcard Support**: The domain must be managed by a DNS provider that supports wildcard DNS records (e.g., \\*.0env.com). This is necessary to dynamically route traffic from GitHub to the Lifecycle app and to ephemeral environments.\n\n Supported DNS providers that support wildcard for infrastructure setup include:\n\n\n\n \n **Manual Setup**:\n Setup a [public DNS zone in Google Cloud](https://cloud.google.com/dns) to manage your domain\'s DNS records.\n\n - Follow steps [here](https://cloud.google.com/dns/docs/zones#create-pub-zone) to setup a\n public DNS zone.\n\n - Wildcard DNS records will be created by the OpenTofu modules in the next steps.\n\n**CLI Setup**:\nUse the `gcloud` CLI to create a public DNS zone for your domain:\n\n```sh\ngcloud config set project \ngcloud auth application-default login\ngcloud services enable dns.googleapis.com --project=\ngcloud dns --project= managed-zones create --description="Lifecycle OSS starter DNS public zone" --dns-name="." --visibility="public" --dnssec-state="off"\n```\n\n_Update your domain\'s DNS records with NS records provided by Google Cloud DNS. You can find these in the Google Cloud Console under the DNS zone you created._\n\n \n\n\n **[AWS Route 53](https://aws.amazon.com/route53/)**: Amazon\'s scalable DNS web\n service designed to route end users to Internet applications.\n\n **Manual Setup**:\n\n - Authenticate with AWS CLI using the role/usr you desire.\n - Ensure you have [your domain provisioned to accept wildcards](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html); eg `*.lifecycle..com`\n\n **CLI Setup**:\n\n ```sh\n aws configure\n ```\n\n ```sh\n aws route53 change-resource-record-sets --hosted-zone-id --change-batch \'{\n "Comment": "CREATE wildcard for ",\n "Changes": [\n {\n "Action": "CREATE",\n "ResourceRecordSet": {\n "Name": "..com",\n "Type": "A",\n "TTL": 300,\n "ResourceRecords": [\n {\n "Value": "*********"\n }\n ]\n }\n }\n ]\n }\'\n ```\n\n\n\n\n If you want to use Cloudflare as your primary DNS provider and manage your DNS records on Cloudflare, your domain should be using a full setup.\n This means that you are using Cloudflare for your authoritative DNS nameservers.\n Follow the steps [here](https://developers.cloudflare.com/dns/zone-setups/full-setup/setup/) to setup a public DNS zone in Cloudflare.\n\n\n\n\n---\n\n\n Ensure that your domain’s nameservers are pointing to your chosen DNS provider\n at your registrar, and that you have permission to create and manage DNS\n records programmatically. **This is crucial for the setup to work\n correctly and will take time to propagate.**\n\nUse https://dnschecker.org/#NS to verify that your domain\'s nameservers are correctly set up.\n\n\n\nOnce you have these prerequisites in place, you can proceed to the next steps in setting up the cluster and application.', - }, - { - title: "Setup your cluster", - description: null, - date: null, - path: "docs/setup/setup-infra", - body: 'Based on the prerequisites you\'ve set up, you\'re now ready to configure your Kubernetes cluster for Lifecycle. This setup will ensure that your cluster is properly configured to run Lifecycle and manage your application environments effectively.\n\n\n Note that the infra setup with the OpenTofu modules below will **open your\n cluster to the world.** \n 🛡️ Make sure to **shield** your cluster by implementing appropriate network policies\n and access controls after the initial setup.\n\n\n\nClick on the cloud provider you are using to set up your cluster:\n\n- [Google Cloud Platform (GCP)](#google-cloud-platform)\n- [Amazon Web Services (AWS)](#amazon-web-services)\n\n## Google Cloud Platform\n\n### Setup application credentials\n\n```sh\n# setup current project\ngcloud config set project \n# creates the application default credentials\ngcloud auth application-default login\n```\n\nEnable Kubernetes Engine and Cloud DNS APIs:\n\n```sh\ngcloud services enable container.googleapis.com --project=\ngcloud services enable dns.googleapis.com --project=\n```\n\n\n Note that you need to replace `` with your actual Google Cloud project ID not the project name.\n\n\n### Bootstrap infrastructure\n\n- Clone the infrastructure repository:\n\n```sh\ngit clone https://github.com/GoodRxOSS/lifecycle-opentofu/\ncd lifecycle-opentofu\n```\n\n- Follow steps in the [infrastructure repository](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#%EF%B8%8F-quick-start) to set up the necessary infrastructure components.\n\n```sh\ncp example.auto.tfvars secrets.auto.tfvars\n```\n\nExample `secrets.auto.tfvars` file:\n\n```hcl secrets.auto.tfvars\ngcp_project = ""\ngcp_region = ""\n# this is the default credentials file created by gcloud cli\ngcp_credentials_file = "~/.config/gcloud/application_default_credentials.json"\ncluster_provider = "gke"\ndns_provider = "cloud-dns" # [cloudflare|route53|cloud-dns]\napp_domain = "" # e.g. 0env.com\n\ncluster_name = "lifecycle-gke" # change this to your preferred cluster name\napp_namespace = "lifecycle-app" # use default namespace\n```\n\n- Initialize and apply the Terraform configuration:\n\n```sh\ntofu init\ntofu plan\ntofu apply\n```\n\nThis will create the necessary infrastructure components, including the Kubernetes cluster, DNS records, database, redis and other resources required for Lifecycle to function properly.\n\nAfter the Terraform apply completes, you should have a fully functional Kubernetes cluster with the necessary resources set up.\n\nLet\'s test the public DNS setup by accessing the test application deployed called `kuard` and follow the rest of the setup instructions from the `tofu apply` output.\n\n```sh\ncurl -v https://kuard.0env.com # replace with your domain\n```\n\nRefer example output [here](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#4-initialize--apply) to setup kubeconfig and access the cluster using `kubectl`.\n\nNow that your cluster is set up, you can proceed to installing Lifecycle application to your cluster.\n\n\n}\n title="Install Lifecycle"\n href="/docs/setup/install-lifecycle"\n arrow\n/>\n\n---\n\n## Amazon Web Services\n\n```sh\n# setup current project\naws configure --profile lifecycle-oss-eks\nAWS Access Key ID [***]: \nAWS AWS Secret Access Key [***]: \nDefault Region name: \nDefault output format: \n```\n\n\\*This profile needs to have access a user with `AdministratorAccess` access.\n\n---\n\n### Bootstrap infrastructure\n\n- Clone the infrastructure repository:\n\n```sh\ngit clone https://github.com/GoodRxOSS/lifecycle-opentofu/\ncd lifecycle-opentofu\n```\n\n- Follow steps in the [infrastructure repository](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#%EF%B8%8F-quick-start) to set up the necessary infrastructure components.\n\n```sh\ncp example.auto.tfvars secrets.auto.tfvars\n```\n\nExample `secrets.auto.tfvars` file:\n\n```hcl secrets.auto.tfvars\n# gcp_project = ""\n$ gcp_region = ""\n# this is the default credentials file created by gcloud cli\n# gcp_credentials_file = "~/.config/gcloud/application_default_credentials.json"\ncluster_provider = "aws"\ndns_provider = "route53" # [cloudflare|route53|cloud-dns]\napp_domain = "example.com" # e.g. 0env.com\n\ncluster_name = "lifecycle-eks" # change this to your preferred cluster name\napp_namespace = "lifecycle-app" # use default namespace\n```\n\n- Initialize and apply the Terraform configuration:\n\n```sh\ntofu init\ntofu plan\ntofu apply\n```\n\nThis will create the necessary infrastructure components, including the Kubernetes cluster, DNS records, database, redis and other resources required for Lifecycle to function properly.\n\nAfter the Terraform apply completes, you should have a fully functional Kubernetes cluster with the necessary resources set up.\n\nLet\'s test the public DNS setup by accessing the test application deployed called `kuard` and follow the rest of the setup instructions from the `tofu apply` output.\n\n```sh\ncurl -v https://kuard.0env.com # replace with your domain\n```\n\nRefer example output [here](https://github.com/GoodRxOSS/lifecycle-opentofu/?tab=readme-ov-file#4-initialize--apply) to setup kubeconfig and access the cluster using `kubectl`.\n\nNow that your cluster is set up, you can proceed to installing Lifecycle application to your cluster.\n\n\n}\n title="Install Lifecycle"\n href="/docs/setup/install-lifecycle"\n arrow\n/>', - }, - { - title: "Additional Configuration", - description: null, - date: null, - path: "docs/setup/configure-lifecycle", - body: "We are in the final step of the setup process.\n\n**This step is Optional but highly recommended to ensure the default IP Whitelist is set for the environments created by the Lifecycle app.** This will help in securing the environments and restricting access to only the specified IPs or CIDR blocks.\n\n## Set Default IP Whitelist\n\n- Connect to the `postgres` database using the `psql` command line tool or any other database client.\n\n \n\n Database password was auto generated during the infra setup and can be found\n retrieved from the `app-postgres` secret in the `lifecycle-app`\n namespace.\n\n \n\n- Retrieve the database password:\n\n```sh\n kubectl get secret app-postgres --namespace lifecycle-app \\\n -o jsonpath='{.data}' | jq 'with_entries(.value |= @base64d)'\n```\n\n- Run the following SQL commands to update the configuration:\n\n```sql\n-- provide a default IP whitelist for the environments, default is open to all IPs\nUPDATE public.global_config\nSET\n config = (\n config::jsonb ||\n '{\n \"defaultIPWhiteList\": \"{ 0.0.0.0/0 }\"\n }'::jsonb\n )::json,\n \"updatedAt\" = NOW()\nWHERE \"key\" = 'serviceDefaults';\n```\n\n\n Note that the infra setup with the OpenTofu modules below will **open your\n cluster to the world.** \n 🛡️ Make sure to **shield** your cluster by implementing appropriate network policies\n and access controls after the initial setup.\n\nReplace the `defaultIPWhiteList` under `global_config.serviceDefaults` with your actual IP whitelist or CIDR block to restrict access to the deployed environments.\n\n\n\n---\n\n## Refresh config cache\n\n```sh\ncurl -X PUT https://app./api/v1/config/cache\n```\n\nThis will refresh the configuration cache and apply the changes you made to the database for the Lifecycle app.\n\nWe are all set! 🎉 And ready to create our first PR based ephemeral environment.", - }, - { - title: "Configure Application", - description: null, - date: null, - path: "docs/setup/create-github-app", - body: "## Configure BuildKit Endpoint\n\nBefore creating the GitHub app, you need to configure the BuildKit endpoint in the database:\n\n\n Set the `HELM_RELEASE` environment variable to your actual Helm release name\n before running the commands below.\n\n\n\n The following commands will create the `buildkit` object and `endpoint`\n configuration if they don't exist, or update them if they do.\n\n\n### Option 1: Using kubectl exec with psql\n\nExecute the following commands to connect to the PostgreSQL pod and run the query:\n\n```bash\n# Set your Helm release name (replace with your actual release name)\nexport HELM_RELEASE=\n\n# Get the database password from the secret\nexport PGPASSWORD=$(kubectl get secret ${HELM_RELEASE}-postgres -n lifecycle-app -o jsonpath='{.data.POSTGRES_USER_PASSWORD}' | base64 -d)\n\n# Run the query\nkubectl exec -it ${HELM_RELEASE}-postgres-0 -n lifecycle-app -- env PGPASSWORD=$PGPASSWORD psql -U lifecycle -d lifecycle -c \"\nUPDATE global_config\nSET config = jsonb_set(\n jsonb_set(\n COALESCE(config::jsonb, '{}'::jsonb),\n '{buildkit}',\n COALESCE(config::jsonb->'buildkit', '{}'::jsonb),\n true\n ),\n '{buildkit,endpoint}',\n '\\\"tcp://${HELM_RELEASE}-buildkit.lifecycle-app.svc.cluster.local:1234\\\"'::jsonb,\n true\n),\n\\\"updatedAt\\\" = NOW()\nWHERE key = 'buildDefaults';\"\n```\n\n### Option 2: Direct SQL query\n\nIf you have direct database access, run the following SQL query (replace `` with your actual Helm release name):\n\n```sql\nUPDATE global_config\nSET config = jsonb_set(\n jsonb_set(\n COALESCE(config::jsonb, '{}'::jsonb),\n '{buildkit}',\n COALESCE(config::jsonb->'buildkit', '{}'::jsonb),\n true\n ),\n '{buildkit,endpoint}',\n '\"tcp://-buildkit.lifecycle-app.svc.cluster.local:1234\"'::jsonb,\n true\n),\n\"updatedAt\" = NOW()\nWHERE key = 'buildDefaults';\n```\n\n### Refresh Configuration Cache\n\nAfter running either option above, refresh the configuration cache:\n\n```bash\ncurl -X 'PUT' \\\n 'https://app./api/v1/config/cache' \\\n -H 'accept: application/json'\n```\n\nReplace `` with your actual domain (e.g., `0env.com`).\n\n## Create GitHub App\n\nTo create a Github app that will send events to the Lifecycle with necessary permissions, follow these steps:\n\n\n Make sure you have admin access to the Github organization or account where\n you want to create the app.\n\n\n- Navigate to your installed Lifecycle app at `https://app./setup` (replace `` with your actual domain. e.g. `https://app.0env.com/setup`).\n \n- Select `Personal` or `Organization` based on your needs.\n- Fill in the required fields:\n\n - **Github App Name**: A name for your app. (should be unique, use a prefix with your name or organization. Refer Github app naming convention [here](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/)\n - **Organization Name**: Github organization name where the app will be created. Required if you selected `Organization`.\n\n- Click `Create App`\n- On the Github app creation page, confirm the app name and click `Create`\n- Once the app is created, you will be redirected to the app installation page where you can choose one or more repositories to install the the newly minted app.\n\n \n Make sure to select the repositories you want the app to have access to. You\n can always change this later in the app settings but **adding atleast one\n repository is required to proceed with the setup**.\n \n\n \n\n- Voila! 🎉 Your Github app is now created and installed.\n\n\n\n- Click `Configure and Restart` to apply the changes and start using the app.\n\n\n The step above, sets up the global config values that Lifecycle app will use\n creating ephemeral environments and processing pull requests. And restarts the\n `deployment` for the github app secrets to take effect.\n\n\n---\n\nLet's move on the final step where we will configure the Lifecycle app config for processing pull requests and creating ephemeral environments.", - }, - { - title: "Deploy Issues", - description: - "Understand how to handle common deploy issues with environments", - date: null, - path: "docs/troubleshooting/deploy-issues", - body: "TODO: This document will cover common deploy issues that you may encounter\n when working with Lifecycle environments.", - }, - { - title: "Missing PR comment", - description: null, - date: null, - path: "docs/troubleshooting/github-app-webhooks", - body: "Let's quickly validate that the app is able to send events to the Lifecycle app successfully.\n\n- Navigate to your Github app\n- Click `App Settings` link in the Github application page\n- Choose `Advanced` from the left sidebar\n- `Recent Deliveries` section should show a successful delivery of the `installation` event to the Lifecycle app.\n\n \n If you see an error or no deliveries, make sure the app is installed in the\n atleast one repository and that the webhook URL is set correctly by\n navigating to the `General` section from the left sidebar and checking the\n `Webhook URL` field.\n \n\n- If the delivery is successful, you should see a status code of `200 OK`\n\n## Failing deliveries\n\nIf you see a delivery failure, it could be due to various reasons. Here are some common issues and how to resolve them:\n\n### Github App secrets\n\n- Make sure that the Github App secrets are correctly set in the `lifecycle-app` namespace. You can verify this by running the following command:\n\n```sh\nkubectl get secret app-secrets --namespace lifecycle-app \\\n -o jsonpath='{.data}' | jq 'with_entries(.value |= @base64d)'\n```\n\n- The output should include all the `GITHUB_*` variables with the correct values.\n\n- If the secrets are present but the delivery is still failing, try restarting the following deployments.\n\n```sh\n kubectl rollout restart deployment lifecycle-web lifecycle-worker -n lifecycle-app\n```\n\n- Try triggering a new event (create a pull request) by making a change in the repository or by manually redelivering a failed delivery.", - }, - { - title: "Troubleshooting Build Issues", - description: - "Understand how to handle common build issues with environments", - date: null, - path: "docs/troubleshooting/build-issues", - body: "TODO: This document will cover common build issues that you may encounter when\n working with Lifecycle environments.", - }, - { - title: "Lifecycle Full Schema", - description: - "Lifecycle Schema documentation; this page contains the full schema as defined in lifecycle core—all at once.", - date: null, - path: "docs/schema/full", - body: '## Full Lifecycle Schema\n\nBelow is the full Lifecycle schema as defined in the `lifecycle.yaml` file with basic comments for each item.\n\n```yaml\n# @section environment\nenvironment:\n # @param environment.autoDeploy\n autoDeploy: false\n # @param environment.useGithubStatusComment\n useGithubStatusComment: false\n # @param environment.defaultServices\n defaultServices:\n # @param environment.defaultServices[]\n - # @param environment.defaultServices.name (required)\n name: ""\n # @param environment.defaultServices.repository\n repository: ""\n # @param environment.defaultServices.branch\n branch: ""\n # @param environment.optionalServices\n optionalServices:\n # @param environment.optionalServices[]\n - # @param environment.optionalServices.name (required)\n name: ""\n # @param environment.optionalServices.repository\n repository: ""\n # @param environment.optionalServices.branch\n branch: ""\n\n# @section services\nservices:\n # @param services[]\n - # @param services.name (required)\n name: ""\n # @param services.appShort\n appShort: ""\n # @param services.defaultUUID\n defaultUUID: ""\n # @param services.github\n github:\n # @param services.github.repository (required)\n repository: ""\n # @param services.github.branchName (required)\n branchName: ""\n # @param services.github.docker (required)\n docker:\n # @param services.github.docker.defaultTag (required)\n defaultTag: ""\n # @param services.github.docker.pipelineId\n pipelineId: ""\n # @param services.github.docker.ecr\n ecr: ""\n # @param services.github.docker.app (required)\n app:\n # @param services.github.docker.app.afterBuildPipelineConfig\n afterBuildPipelineConfig:\n # @param services.github.docker.app.afterBuildPipelineConfig.afterBuildPipelineId\n afterBuildPipelineId: ""\n # @param services.github.docker.app.afterBuildPipelineConfig.detatchAfterBuildPipeline\n detatchAfterBuildPipeline: false\n # @param services.github.docker.app.afterBuildPipelineConfig.description\n description: ""\n # @param services.github.docker.app.dockerfilePath (required)\n dockerfilePath: ""\n # @param services.github.docker.app.command\n command: ""\n # @param services.github.docker.app.arguments\n arguments: ""\n # @param services.github.docker.app.env\n env:\n\n # @param services.github.docker.app.ports\n ports:\n # @param services.github.docker.app.ports[]\n - ""\n # @param services.github.docker.init\n init:\n # @param services.github.docker.init.dockerfilePath (required)\n dockerfilePath: ""\n # @param services.github.docker.init.command\n command: ""\n # @param services.github.docker.init.arguments\n arguments: ""\n # @param services.github.docker.init.env\n env:\n\n # @param services.github.docker.builder\n builder:\n # @param services.github.docker.builder.engine\n engine: ""\n # @param services.github.deployment\n deployment:\n # @param services.github.deployment.helm\n helm:\n # @param services.github.deployment.helm.enabled\n enabled: false\n # @param services.github.deployment.helm.chartName\n chartName: ""\n # @param services.github.deployment.helm.chartRepoUrl\n chartRepoUrl: ""\n # @param services.github.deployment.helm.chartVersion\n chartVersion: ""\n # @param services.github.deployment.helm.cmdPs\n cmdPs: ""\n # @param services.github.deployment.helm.action\n action: ""\n # @param services.github.deployment.helm.customValues\n customValues:\n # @param services.github.deployment.helm.customValues[]\n - ""\n # @param services.github.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.github.deployment.helm.customValueFiles[]\n - ""\n # @param services.github.deployment.helm.helmVersion\n helmVersion: ""\n # @param services.github.deployment.helm.attachPvc\n attachPvc:\n # @param services.github.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.github.deployment.helm.attachPvc.mountPath\n mountPath: ""\n # @param services.github.deployment.public\n public: false\n # @param services.github.deployment.capacityType\n capacityType: ""\n # @param services.github.deployment.resource\n resource:\n # @param services.github.deployment.resource.cpu\n cpu:\n # @param services.github.deployment.resource.cpu.request\n request: ""\n # @param services.github.deployment.resource.cpu.limit\n limit: ""\n # @param services.github.deployment.resource.memory\n memory:\n # @param services.github.deployment.resource.memory.request\n request: ""\n # @param services.github.deployment.resource.memory.limit\n limit: ""\n # @param services.github.deployment.readiness\n readiness:\n # @param services.github.deployment.readiness.disabled\n disabled: false\n # @param services.github.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.github.deployment.readiness.httpGet\n httpGet:\n # @param services.github.deployment.readiness.httpGet.path\n path: ""\n # @param services.github.deployment.readiness.httpGet.port\n port: 0\n # @param services.github.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.github.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.github.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.github.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.github.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.github.deployment.hostnames\n hostnames:\n # @param services.github.deployment.hostnames.host\n host: ""\n # @param services.github.deployment.hostnames.acmARN\n acmARN: ""\n # @param services.github.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: ""\n # @param services.github.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: ""\n # @param services.github.deployment.network\n network:\n # @param services.github.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.github.deployment.network.ipWhitelist[]\n - ""\n # @param services.github.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.github.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.github.deployment.network.grpc\n grpc:\n # @param services.github.deployment.network.grpc.enable\n enable: false\n # @param services.github.deployment.network.grpc.host\n host: ""\n # @param services.github.deployment.network.grpc.defaultHost\n defaultHost: ""\n # @param services.github.deployment.serviceDisks\n serviceDisks:\n # @param services.github.deployment.serviceDisks[]\n - # @param services.github.deployment.serviceDisks.name (required)\n name: ""\n # @param services.github.deployment.serviceDisks.mountPath (required)\n mountPath: ""\n # @param services.github.deployment.serviceDisks.accessModes\n accessModes: ""\n # @param services.github.deployment.serviceDisks.storageSize (required)\n storageSize: ""\n # @param services.github.deployment.serviceDisks.medium\n medium: ""\n # @param services.docker\n docker:\n # @param services.docker.dockerImage (required)\n dockerImage: ""\n # @param services.docker.defaultTag (required)\n defaultTag: ""\n # @param services.docker.command\n command: ""\n # @param services.docker.arguments\n arguments: ""\n # @param services.docker.env\n env:\n\n # @param services.docker.ports\n ports:\n # @param services.docker.ports[]\n - ""\n # @param services.docker.deployment\n deployment:\n # @param services.docker.deployment.helm\n helm:\n # @param services.docker.deployment.helm.enabled\n enabled: false\n # @param services.docker.deployment.helm.chartName\n chartName: ""\n # @param services.docker.deployment.helm.chartRepoUrl\n chartRepoUrl: ""\n # @param services.docker.deployment.helm.chartVersion\n chartVersion: ""\n # @param services.docker.deployment.helm.cmdPs\n cmdPs: ""\n # @param services.docker.deployment.helm.action\n action: ""\n # @param services.docker.deployment.helm.customValues\n customValues:\n # @param services.docker.deployment.helm.customValues[]\n - ""\n # @param services.docker.deployment.helm.customValueFiles\n customValueFiles:\n # @param services.docker.deployment.helm.customValueFiles[]\n - ""\n # @param services.docker.deployment.helm.helmVersion\n helmVersion: ""\n # @param services.docker.deployment.helm.attachPvc\n attachPvc:\n # @param services.docker.deployment.helm.attachPvc.enabled\n enabled: false\n # @param services.docker.deployment.helm.attachPvc.mountPath\n mountPath: ""\n # @param services.docker.deployment.public\n public: false\n # @param services.docker.deployment.capacityType\n capacityType: ""\n # @param services.docker.deployment.resource\n resource:\n # @param services.docker.deployment.resource.cpu\n cpu:\n # @param services.docker.deployment.resource.cpu.request\n request: ""\n # @param services.docker.deployment.resource.cpu.limit\n limit: ""\n # @param services.docker.deployment.resource.memory\n memory:\n # @param services.docker.deployment.resource.memory.request\n request: ""\n # @param services.docker.deployment.resource.memory.limit\n limit: ""\n # @param services.docker.deployment.readiness\n readiness:\n # @param services.docker.deployment.readiness.disabled\n disabled: false\n # @param services.docker.deployment.readiness.tcpSocketPort\n tcpSocketPort: 0\n # @param services.docker.deployment.readiness.httpGet\n httpGet:\n # @param services.docker.deployment.readiness.httpGet.path\n path: ""\n # @param services.docker.deployment.readiness.httpGet.port\n port: 0\n # @param services.docker.deployment.readiness.initialDelaySeconds\n initialDelaySeconds: 0\n # @param services.docker.deployment.readiness.periodSeconds\n periodSeconds: 0\n # @param services.docker.deployment.readiness.timeoutSeconds\n timeoutSeconds: 0\n # @param services.docker.deployment.readiness.successThreshold\n successThreshold: 0\n # @param services.docker.deployment.readiness.failureThreshold\n failureThreshold: 0\n # @param services.docker.deployment.hostnames\n hostnames:\n # @param services.docker.deployment.hostnames.host\n host: ""\n # @param services.docker.deployment.hostnames.acmARN\n acmARN: ""\n # @param services.docker.deployment.hostnames.defaultInternalHostname\n defaultInternalHostname: ""\n # @param services.docker.deployment.hostnames.defaultPublicUrl\n defaultPublicUrl: ""\n # @param services.docker.deployment.network\n network:\n # @param services.docker.deployment.network.ipWhitelist\n ipWhitelist:\n # @param services.docker.deployment.network.ipWhitelist[]\n - ""\n # @param services.docker.deployment.network.pathPortMapping\n pathPortMapping:\n\n # @param services.docker.deployment.network.hostPortMapping\n hostPortMapping:\n\n # @param services.docker.deployment.network.grpc\n grpc:\n # @param services.docker.deployment.network.grpc.enable\n enable: false\n # @param services.docker.deployment.network.grpc.host\n host: ""\n # @param services.docker.deployment.network.grpc.defaultHost\n defaultHost: ""\n # @param services.docker.deployment.serviceDisks\n serviceDisks:\n # @param services.docker.deployment.serviceDisks[]\n - # @param services.docker.deployment.serviceDisks.name (required)\n name: ""\n # @param services.docker.deployment.serviceDisks.mountPath (required)\n mountPath: ""\n # @param services.docker.deployment.serviceDisks.accessModes\n accessModes: ""\n # @param services.docker.deployment.serviceDisks.storageSize (required)\n storageSize: ""\n # @param services.docker.deployment.serviceDisks.medium\n medium: ""\n```', - }, - { - title: "Terminology", - description: null, - date: null, - path: "docs/getting-started/terminology", - body: "This glossary provides an overview of key Lifecycle concepts and terminology. Let's see how they fit into the environment setup and deployment process.\n\n## Repository\n\nA **repository** refers to a GitHub repository. Each environment that is built **must** have a default repository and an associated pull request.\n\n## Service\n\nA **service** is a deployable artifact. It can be a Docker container, CI pipeline, RDS database, or Helm chart. A single repository can contain multiple services.\n\n**Example:** \n`frontend-service` and `frontend-cache` are two services required for the frontend application to function correctly.\n\n## Environment\n\nAn **environment** is a stack of services built and connected together.\n\n- **`defaultServices`** are built and deployed in an environment by default.\n- **`optionalServices`** can be built and deployed only when needed; otherwise, they fallback to the **default static environment**.\n\n## Static Environment\n\nA **static environment** is a long-lived environment based on a pull request. It tracks branches from configured services and updates automatically when new changes are merged.\n\n## Build\n\nA **build** is the actual instance of the process to build and deploy services within an environment.\n\n- Each build is uniquely identified by Lifecycle using a UUID (e.g., `arm-model-060825` or `dev-0`).\n- A build contains **one deploy per service** in the configuration.\n\n## Deploy\n\nA **deploy** manages the build and deployment execution of a service within an environment.\n\n**Example:**\nIn a frontend environment, `frontend-service` and `frontend-cache` are two deploys created for the environment, each mapped to a unique build UUID.\n\n## Webhook\n\nLifecycle can invoke third-party services when a build state changes. Currently, only **Codefresh triggers** are supported.\n\n### Example\n\n- When the build status is `deployed`, trigger end-to-end tests.\n- When the build status is `error`, trigger infrastructure cleanup.", - }, - { - title: "Explore static environment", - description: "Create the first and default static environment", - date: null, - path: "docs/getting-started/explore-static-environment", - body: 'A **static environment** in Lifecycle is a persistent environment that serves as a fallback when dependent services do not need to be rebuilt.\n\nUnlike ephemeral environments that are built on short lived pull requests, static environments are built on top of long lived pull requests. These environments exist continuously and update automatically as changes are merged into the default branch of configured services.\n\n## What is `dev-0`\n\nThe **default static environment** is `dev-0`. This environment ensures that there is always a **stable and up-to-date version** of services available without needing to build every dependency manually.\n\n\n\nThe `dev-0` environment should be created for your installation.\n\nDuring the initial bootstrapping of Lifecycle, the `dev-0` build record is created automatically but this itself does not have any services built.\n\n\n\n## Create `dev-0`\n\n- Delete the dummy `dev-0` build record from `builds` table in the database\n\n```sql\nDELETE FROM builds WHERE uuid = \'dev-0\';\n```\n\n- Create a repository named `lifecycle-static-env` in your GitHub account\n- Install the Lifecycle GitHub App in this repository\n- Create a pull request in this repository with branch `dev-0`\n- Add `lifecycle.yaml` file to the root of the repository with all the services you want to include in the `dev-0` environment\n\n **Example:**\n\n```yaml\nenvironment:\n defaultServices:\n - name: "frontend"\n repository: "account/frontend-repo"\n branch: "main"\n - name: "grpc"\n repository: "account/backend-grpc"\n branch: "main"\n```\n\n- Deploy the `dev-0` environment by adding `lifecycle-deploy!` label to the pull request\n- Update `uuid` for the environment to `dev-0` in the [mission control comment](/docs/tips/using-mission-control#override-uuid)\n- Finally, execute this query to track default branches of the services in the `dev-0` environment:\n\n```sql\nUPDATE builds\nSET\n "trackDefaultBranches" = true,\n "isStatic" = true\nWHERE\n uuid = \'dev-0\';\n```\n\n## Key Features\n\n**🏗️ Fallback for Optional Services**\n\n- When optional services are not explicitly built in an ephemeral environment, Lifecycle defaults to using the latest build from `dev-0`.\n\n**💪 Based on a Persistent PR**\n\n- Similar to ephemeral environments, `dev-0` is based on a PR, but it remains open and continuously updates.\n\n**👣 Tracks Changes on Default Branch Merges**\n\n- Whenever a service has a new change merged to its `main` branch, `dev-0` will **automatically pull, build, and redeploy** the latest changes.\n- This ensures `dev-0` always contains **the freshest version** of all services.', - }, - { - title: "Configure environment", - description: null, - date: null, - path: "docs/getting-started/configure-environment", - body: 'Now that we\'ve created and deployed our first Lifecycle environment, let\'s learn how to customize it by configuring services and dependencies.\n\n## Understanding Configuration\n\nFirst, let\'s take a look at the `lifecycle.yaml` configuration file at the root dir of [lifecycle-examples](https://github.com/GoodRxOSS/lifecycle-examples/blob/main/lifecycle.yaml) repository:\n\n```yaml filename="lifecycle.yaml"\nenvironment:\n autoDeploy: true\n defaultServices:\n - name: "frontend"\n - name: "backend"\n optionalServices:\n - name: "cache"\n\nservices:\n - name: "frontend"\n defaultUUID: "dev-0"\n github:\n repository: "iceycake/lifecycle-examples"\n branchName: "main"\n docker:\n builder:\n engine: "buildkit"\n defaultTag: "main"\n app:\n dockerfilePath: "Dockerfile.frontend"\n ports:\n - 3000\n env:\n COMPONENT: "app"\n ENV: "lifecycle"\n API_URL: "https://{{{backend_publicUrl}}}"\n CACHE_URL: "{{{cache_internalHostname}}}"\n WES_IS: "GOAT"\n - name: "backend"\n requires:\n - name: "db"\n defaultUUID: "dev-0"\n # ...\n - name: "db"\n defaultUUID: "dev-0"\n # ...\n - name: "cache"\n defaultUUID: "dev-0"\n # ...\n```\n\n### Default and Optional Services\n\nWe have our dependencies defined in **`defaultServices`** and **`optionalServices`**:\n\n- **`defaultServices`** – These services are always **built and deployed** with the environment. They form the core foundation of the environment and are required for it to function correctly.\n- **`optionalServices`** – These services **can be built on demand**, only when explicitly needed. If they are not selected during a PR, they default to using a **static environment** (e.g., `dev-0`).\n\n### Template Variables\n\nNotice how there are template variables defined in service named `frontend` > `github.docker.env`:\n\n```yaml\nAPI_URL: "https://{{{backend_publicUrl}}}"\nCACHE_URL: "{{{cache_internalHostname}}}"\n```\n\nThis `API_URL` and `CACHE_URL` variables are dynamically templated by Lifecycle and provided during the **build** and **deploy** steps for the frontend service.\n\n\n Read more about supported template variables\n [here](/docs/features/template-variables)\n\n\n## Static Environment as a Fallback\n\nSince `cache` is an **optional service**, this service defaulted to using a **static environment**(`dev-0`) as a fallback. This allows us to reuse existing environments instead of rebuilding everything from scratch when there are no changes.\n\n### Check Template Variables\n\nTo view how the fallback URL works,\n\n1. Open your **Tasks App**(frontend) from the deployed environment.\n2. Navigate to the `Variables` page.\n3. Search for `_URL` and check its value.\n - It should look like:\n ```\n API_URL: https://backend-.\n CACHE_URL: cache-dev-0.env-dev-0.svc.cluster.local\n ```\n - Notice how `CACHE_URL` defaults to the `dev-0`(static) environment for the optional cache.\n\n## Configuring Services\n\nNow, let\'s say you also want to the `cache` component to **test, build and deploy it in your environment**.\n\n### Enable Cache Deployment\n\n1. Navigate to the **Lifecycle PR comment** on GitHub.\n2. Select the `cache` checkbox in the comment. That\'s it!\n3. Lifecycle will now start **building and deploying the cache service** for your specific environment.\n4. Wait for the build to complete. You can monitor the progress in the **status comment**.\n\n### Confirm the New Cache URL\n\n5. Once the cache is deployed, go back to your **frontend app’s Variables page**.\n6. Check the `CACHE_URL` value.\n - It should now look like:\n ```\n cache-.env-.svc.cluster.local\n ```\n7. Now, you\'re running your cache **from your own environment** instead of an existing static deploy!\n8. Check the application’s **Tasks** page while you’re here and observe the completely different data, as this environment uses a freshly built and seeded database.\n\n## Build Flexible Environments\n\nWith this approach, you can:\n\n- Build **any combination** of frontend and backend services.\n- Use **custom branches** for different services.\n- Test **different versions** of your app.\n\n\n Check how to use Mission Control comments for configuring your environment\n [here](/docs/tips/using-mission-control)\n\n\nThis gives you a **custom, isolated testing environment** that mirrors your\nproduction setup while allowing flexibility in development and validation.\n\n## Summary\n\n- Services marked as **optional** in `lifecycle.yaml` will default to static environments unless explicitly built.\n- You can enable/disable any service directly from the **Lifecycle PR comment**.\n- Lifecycle automates dependency management, ensuring your services deploy in the correct order.\n\n**Now you\'re ready to customize your Lifecycle environments like a pro!** 👩‍💻', - }, - { - title: "Explore environment", - description: null, - date: null, - path: "docs/getting-started/explore-environment", - body: "Now that we've deployed our first Lifecycle environment, let’s take a tour of the PR comments to understand how to interact with our ephemeral environment.\n\n## Test Your Application\n\nLet's navigate to the deployed `frontend` app from the PR comment.\n\n1. Click on the `frontend` link in the PR comment to navigate to your deployed application.\n2. Add a task and complete few tasks to update data in backend.\n3. Navigate to the `variables` page and checkout the variables in your application's container.\n4. Thats it! You have successfully deployed and tested the best todo app in the world! 🎉\n\n## Mission Control Comment\n\nThe **Lifecycle PR comment** in your pull request serves as the **mission control** for your ephemeral environment.\n\n\n\n### What You Can Do in the PR Comment\n\n- **Editable Checkboxes**: Select or deselect services to include in your environment.\n- **Redeploy Checkbox**: Triggers a redeploy (useful for transient issues).\n- **Deployment Section**: Provides URLs to your **deployed services**.\n\n}>\n Read more about [Mission Control comment\n here](/docs/tips/using-mission-control.mdx)\n\n\n## Status Comment\n\nWhen we add the `lifecycle-status-comments!` label to our pull request, Lifecycle will automatically add a **status comment** to the PR.\n\nThis comment provides real-time updates on the status, links to your deployments including the build progress and service statuses.\n\n\n\nNotice the following while the environment is being built:\n\n- The status comment is **updated in real-time**.\n- The **status** of each service is displayed.\n- The **build logs** are available for each service.\n\n\n\n### Next Steps\n\nIn the next section, we will:\n\n⚙️ Customize our configuration\n☑️ Enable and build an optional service(`cache`) support your application\n\n**Ready to level up your ephemeral environment? Let\\'s go!** 🏃‍➡️", - }, - { - title: "Create environment", - description: null, - date: null, - path: "docs/getting-started/create-environment", - body: 'In this walk through, we will make a simple change to an example frontend repository and create our first ephemeral environment using Lifecycle.\n\n## 1. Fork the Repository\n\nFork the [`lifecycle-examples`](https://github.com/GoodRxOSS/lifecycle-examples) repository to your org or personal account and install your newly minted GitHub App to the forked repository.\n\n- Navigate to `https://github.com/settings/apps` (for personal accounts) or `https://github.com/organizations//settings/apps` (for org accounts).\n- Find the **Lifecycle GitHub App** and click on **Edit**.\n- Choose `Install App` from sidebar and click the Settings icon.\n- Select the forked repository from the list and select **Save**.\n\n## 2. Create a New Branch\n\nClone the repo and create a branch named `lfc-config`:\n\n```sh\ngit checkout -b lfc-config\n```\n\nor if you are using GitHub Desktop, you can create a new branch from the UI.\n\n## 3. Update Lifecycle Configuration\n\nOpen the `lifecycle.yaml` file in the root of the repository and update the `frontend` service\'s repository to your github username or org.\n\n**Before:**\n\n```yaml filename="lifecycle.yaml"\ngithub:\n repository: "GoodRxOSS/lifecycle-examples"\n```\n\n**After:**\n\n```yaml filename="lifecycle.yaml"\ngithub:\n repository: "/lifecycle-examples"\n```\n\n## 4. Commit & Push Your Changes\n\n```sh\ngit add .\ngit commit -m "update config"\ngit push origin lfc-config\n```\n\n## 5. Create a Pull Request\n\n1. Open a **Pull Request (PR)** from `lfc-config` to `main` in the forked repository.\n2. Submit the PR.\n\n## 6. Lifecycle PR Comment\n\nAfter submitting the PR, you’ll see a **GitHub comment from Lifecycle** on your pull request.\n\n🔹 This PR comment is the **mission control** for your ephemeral environment. It provides:\n\n- A **status update** of the build and deploy process.\n- A **list of services** configured for the environment.\n- A **link to the Lifecycle UI** where you can view logs, deployments, and environment details.\n\n\n If there is no comment from Lifecycle, it means the app is not configured\n correctly or the GitHub App is not installed in the repository. Please refer\n to the [Missing Comment](/docs/troubleshooting/github-app-webhooks) page for\n more information.\n\n\n## 7. Add `lifecycle-status-comments!` label\n\nThe additional label `lifecycle-status-comments!` provides more detailed information about the environment status and links to access the running application.\n\n🔹 The comments provides insights into:\n\n- **Build & Deploy Status**: Track when your environment is ready.\n- **Environment URLs**: Access the running frontend app.\n- **Telemetry Links**: Links to telemetry, build and deploy logs. (if enabled)\n\n## 8. Wait for Deployment\n\nWait for the **builds & deploys** to complete. Once the status updates to **`deployed`**, your environment is live! 🚀\n\nWhen a new commit is pushed to your pull request Lifecycle automatically builds and deploys again so you always have the latest version of the application.\n\n\n If there are any errors during the build or deploy process, the environment\n will not be created, and you will see an error message in the Lifecycle\n comment.\n \n \n You can check the logs from `lifecycle-worker` pods in your cluster to debug\n the issue: `kubectl logs deploy/lifecycle-worker -n lifecycle-app -f\n `\n \n\n\n## 9. Checkout the deployed application\n\nOnce the deployment is complete, you can access your environment at the URL provided in the Lifecycle comment on your pull request. Click on the `frontend` link to open your application in a new tab.\n\nThe application has two simple pages:\n\n- **`/tasks`** – A simple to-do list.\n- **`/variables`** – Displays all environment variables from the container.\n\n## Next Steps\n\nNow that your first ephemeral environment is ready, move on to the next section where we:\n\n🧪 Test the environment.\n🧭 Explore the comments and logs.\n⚙️ Customize the configuration.', - }, - { - title: "Delete environment", - description: null, - date: null, - path: "docs/getting-started/delete-environment", - body: "To **tear down** an environment, you can do one of the following:\n\n1. **Merge or close the pull request**: This will automatically clean up the environment.\n2. **Apply the `lifecycle-disabled!` label**: This will immediately trigger the environment deletion process.\n\n---\n\nThe **`lifecycle-disabled!`** label is useful in scenarios where:\n\n- The environment infrastructure is **experiencing issues**.\n- The data within the environment is **corrupt**.\n- You need to **restart or rebuild** the environment from scratch without waiting for a PR to be merged or closed.\n\nSimply apply the label to the **PR associated with the environment**, and Lifecycle will automatically tear it down.\n\n\n Read more about how pull request labels control auto deploy in repositories\n [here](/docs/features/auto-deployment)\n\n\n---\n\nUsing these methods, you can efficiently manage and clean up environments to ensure smooth development and testing workflows. 🧹", - }, - { - title: "Telemetry", - description: null, - date: null, - path: "docs/tips/telemetry", - body: "Lifecycle comes with built-in support for Datadog telemetry. To collect logs and metrics from your cluster and deployed applications, install the Datadog Agent and Cluster Agent in your cluster.\n\nThe deployed applications are already configured with the necessary Datadog labels and environment variables for seamless integration:\n\n**Pod labels:**\n\n```yaml\ntags.datadoghq.com/env: lifecycle-binlab-zero-101010\ntags.datadoghq.com/service: frontend\ntags.datadoghq.com/version: binlab-zero-101010\n```\n\n**Environment variables:**\n\n```yaml\n- name: DD_ENV\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/env']\n- name: DD_SERVICE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/service']\n- name: DD_VERSION\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.labels['tags.datadoghq.com/version']\n```\n\nThis setup ensures that Datadog automatically detects the environment, service, and version for each application, enabling rich observability and correlation of logs and metrics in the Datadog platform.", - }, - { - title: "Mission Control comment", - description: - "Use the Mission Control PR Comment to modify and customize your environment directly from the pull request comment.", - date: null, - path: "docs/tips/using-mission-control", - body: "Lifecycle uses **Mission Control PR Comments** to allow users to modify and customize their environments directly from the pull request comment. This enables easy **service selection**, **branch customization**, and **environment variable overrides** without modifying `lifecycle.yaml`.\n\n---\n\n## Selecting and Deselecting Services\n\nEach pull request environment includes **default services** and optional additional services. You can enable or disable services using the checkboxes.\n\n- **Enabled Services** are marked with `[x]`.\n- **Disabled Services** are marked with `[ ]`.\n\n**Example:**\n\n```md\n// Default Services\n\n- [x] frontend: dev-default\n- [x] fastly: main\n\n// Optional Additional Services\n\n- [ ] backend-service: main\n- [ ] backend-db: main\n- [ ] backend-cache: main\n```\n\nTo **enable** a service, change `[ ]` to `[x]`. To **disable** a service, change `[x]` to `[ ]`. As simple as that!\n\n\n If you need to make multiple selections or deselections at once, use the\n **Edit Comment** option instead of clicking checkboxes individually. This\n prevents multiple back-to-back builds, as each selection triggers an event in\n Lifecycle without deduplication.\n\n\n## Choosing a Branch\n\nTo deploy a specific branch for a service, modify the branch name after the service name.\n\n**Example:**\n\n```md\n- [x] frontend: feature-branch\n- [x] fastly: main\n```\n\nThis will deploy `frontend` using the `feature-branch` instead of the default branch.\n\n## Overriding Environment Variables\n\nTo set additional environment variables, use the **Override Environment Variables** section in the PR comment.\n\n**Example:**\n\n```md\n// **Override Environment Variables:** _ENV:[KEY]:[VALUE]_\nENV:API_URL:https://api.custom.dev.0env.com\nENV:CHIEF_INTERN:ICEYCAKE\n```\n\nThis sets `API_URL` and `CHIEF_INTERN` in the environment without modifying the service configuration.\n\n## Override UUID\n\nTo set a custom UUID (subdomain) for the environment, use the **Override UUID** section in the PR comment.\n\n```md\n// UUID (Pick your own custom subdomain)\nurl: wagon-builder-060825\n```\n\nReplace `wagon-builder-060825` with your desired subdomain. This allows you to customize the environment URL without changing the underlying service configuration.\n\n---\n\nUsing the **Mission Control PR Comment**, you can easily customize your environment **without modifying code**, making it a flexible way to test and deploy changes dynamically.", - }, -]; diff --git a/src/pages/docs/_meta.ts b/src/pages/docs/_meta.ts index 3a1ac69..5ea09a1 100644 --- a/src/pages/docs/_meta.ts +++ b/src/pages/docs/_meta.ts @@ -30,12 +30,6 @@ export default { "--": { "type": "separator" }, - "schema": { - "title": "Lifecycle Schema" - }, - "---": { - "type": "separator" - }, "features": { "title": "Features" }, diff --git a/src/pages/docs/schema/_meta.ts b/src/pages/docs/schema/_meta.ts deleted file mode 100644 index 9c0fd09..0000000 --- a/src/pages/docs/schema/_meta.ts +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Copyright 2026 GoodRx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export default { - "index": { - "title": "Section by section" - }, - "full": { - "title": "All at once" - } -}; \ No newline at end of file diff --git a/src/pages/docs/schema/full.mdx b/src/pages/docs/schema/full.mdx deleted file mode 100644 index 921185e..0000000 --- a/src/pages/docs/schema/full.mdx +++ /dev/null @@ -1,348 +0,0 @@ ---- -title: Lifecycle Full Schema -description: Lifecycle Schema documentation; this page contains the full schema as defined in lifecycle core—all at once. -navtext: All at once -tags: - - schema - - lifecycle ---- - -## Full Lifecycle Schema - -Below is the full Lifecycle schema as defined in the `lifecycle.yaml` file with basic comments for each item. - -```yaml -# @section environment -environment: - # @param environment.autoDeploy - autoDeploy: false - # @param environment.useGithubStatusComment - useGithubStatusComment: false - # @param environment.defaultServices - defaultServices: - # @param environment.defaultServices[] - - # @param environment.defaultServices.name (required) - name: "" - # @param environment.defaultServices.repository - repository: "" - # @param environment.defaultServices.branch - branch: "" - # @param environment.optionalServices - optionalServices: - # @param environment.optionalServices[] - - # @param environment.optionalServices.name (required) - name: "" - # @param environment.optionalServices.repository - repository: "" - # @param environment.optionalServices.branch - branch: "" - -# @section services -services: - # @param services[] - - # @param services.name (required) - name: "" - # @param services.appShort - appShort: "" - # @param services.defaultUUID - defaultUUID: "" - # @param services.github - github: - # @param services.github.repository (required) - repository: "" - # @param services.github.branchName (required) - branchName: "" - # @param services.github.docker (required) - docker: - # @param services.github.docker.defaultTag (required) - defaultTag: "" - # @param services.github.docker.pipelineId - pipelineId: "" - # @param services.github.docker.ecr - ecr: "" - # @param services.github.docker.app (required) - app: - # @param services.github.docker.app.afterBuildPipelineConfig - afterBuildPipelineConfig: - # @param services.github.docker.app.afterBuildPipelineConfig.afterBuildPipelineId - afterBuildPipelineId: "" - # @param services.github.docker.app.afterBuildPipelineConfig.detatchAfterBuildPipeline - detatchAfterBuildPipeline: false - # @param services.github.docker.app.afterBuildPipelineConfig.description - description: "" - # @param services.github.docker.app.dockerfilePath (required) - dockerfilePath: "" - # @param services.github.docker.app.command - command: "" - # @param services.github.docker.app.arguments - arguments: "" - # @param services.github.docker.app.env - env: - - # @param services.github.docker.app.ports - ports: - # @param services.github.docker.app.ports[] - - "" - # @param services.github.docker.init - init: - # @param services.github.docker.init.dockerfilePath (required) - dockerfilePath: "" - # @param services.github.docker.init.command - command: "" - # @param services.github.docker.init.arguments - arguments: "" - # @param services.github.docker.init.env - env: - - # @param services.github.docker.builder - builder: - # @param services.github.docker.builder.engine - engine: "" - # @param services.github.deployment - deployment: - # @param services.github.deployment.helm - helm: - # @param services.github.deployment.helm.enabled - enabled: false - # @param services.github.deployment.helm.chartName - chartName: "" - # @param services.github.deployment.helm.chartRepoUrl - chartRepoUrl: "" - # @param services.github.deployment.helm.chartVersion - chartVersion: "" - # @param services.github.deployment.helm.cmdPs - cmdPs: "" - # @param services.github.deployment.helm.action - action: "" - # @param services.github.deployment.helm.customValues - customValues: - # @param services.github.deployment.helm.customValues[] - - "" - # @param services.github.deployment.helm.customValueFiles - customValueFiles: - # @param services.github.deployment.helm.customValueFiles[] - - "" - # @param services.github.deployment.helm.helmVersion - helmVersion: "" - # @param services.github.deployment.helm.attachPvc - attachPvc: - # @param services.github.deployment.helm.attachPvc.enabled - enabled: false - # @param services.github.deployment.helm.attachPvc.mountPath - mountPath: "" - # @param services.github.deployment.public - public: false - # @param services.github.deployment.capacityType - capacityType: "" - # @param services.github.deployment.resource - resource: - # @param services.github.deployment.resource.cpu - cpu: - # @param services.github.deployment.resource.cpu.request - request: "" - # @param services.github.deployment.resource.cpu.limit - limit: "" - # @param services.github.deployment.resource.memory - memory: - # @param services.github.deployment.resource.memory.request - request: "" - # @param services.github.deployment.resource.memory.limit - limit: "" - # @param services.github.deployment.readiness - readiness: - # @param services.github.deployment.readiness.disabled - disabled: false - # @param services.github.deployment.readiness.tcpSocketPort - tcpSocketPort: 0 - # @param services.github.deployment.readiness.httpGet - httpGet: - # @param services.github.deployment.readiness.httpGet.path - path: "" - # @param services.github.deployment.readiness.httpGet.port - port: 0 - # @param services.github.deployment.readiness.initialDelaySeconds - initialDelaySeconds: 0 - # @param services.github.deployment.readiness.periodSeconds - periodSeconds: 0 - # @param services.github.deployment.readiness.timeoutSeconds - timeoutSeconds: 0 - # @param services.github.deployment.readiness.successThreshold - successThreshold: 0 - # @param services.github.deployment.readiness.failureThreshold - failureThreshold: 0 - # @param services.github.deployment.hostnames - hostnames: - # @param services.github.deployment.hostnames.host - host: "" - # @param services.github.deployment.hostnames.acmARN - acmARN: "" - # @param services.github.deployment.hostnames.defaultInternalHostname - defaultInternalHostname: "" - # @param services.github.deployment.hostnames.defaultPublicUrl - defaultPublicUrl: "" - # @param services.github.deployment.network - network: - # @param services.github.deployment.network.ipWhitelist - ipWhitelist: - # @param services.github.deployment.network.ipWhitelist[] - - "" - # @param services.github.deployment.network.pathPortMapping - pathPortMapping: - - # @param services.github.deployment.network.hostPortMapping - hostPortMapping: - - # @param services.github.deployment.network.grpc - grpc: - # @param services.github.deployment.network.grpc.enable - enable: false - # @param services.github.deployment.network.grpc.host - host: "" - # @param services.github.deployment.network.grpc.defaultHost - defaultHost: "" - # @param services.github.deployment.serviceDisks - serviceDisks: - # @param services.github.deployment.serviceDisks[] - - # @param services.github.deployment.serviceDisks.name (required) - name: "" - # @param services.github.deployment.serviceDisks.mountPath (required) - mountPath: "" - # @param services.github.deployment.serviceDisks.accessModes - accessModes: "" - # @param services.github.deployment.serviceDisks.storageSize (required) - storageSize: "" - # @param services.github.deployment.serviceDisks.medium - medium: "" - # @param services.docker - docker: - # @param services.docker.dockerImage (required) - dockerImage: "" - # @param services.docker.defaultTag (required) - defaultTag: "" - # @param services.docker.command - command: "" - # @param services.docker.arguments - arguments: "" - # @param services.docker.env - env: - - # @param services.docker.ports - ports: - # @param services.docker.ports[] - - "" - # @param services.docker.deployment - deployment: - # @param services.docker.deployment.helm - helm: - # @param services.docker.deployment.helm.enabled - enabled: false - # @param services.docker.deployment.helm.chartName - chartName: "" - # @param services.docker.deployment.helm.chartRepoUrl - chartRepoUrl: "" - # @param services.docker.deployment.helm.chartVersion - chartVersion: "" - # @param services.docker.deployment.helm.cmdPs - cmdPs: "" - # @param services.docker.deployment.helm.action - action: "" - # @param services.docker.deployment.helm.customValues - customValues: - # @param services.docker.deployment.helm.customValues[] - - "" - # @param services.docker.deployment.helm.customValueFiles - customValueFiles: - # @param services.docker.deployment.helm.customValueFiles[] - - "" - # @param services.docker.deployment.helm.helmVersion - helmVersion: "" - # @param services.docker.deployment.helm.attachPvc - attachPvc: - # @param services.docker.deployment.helm.attachPvc.enabled - enabled: false - # @param services.docker.deployment.helm.attachPvc.mountPath - mountPath: "" - # @param services.docker.deployment.public - public: false - # @param services.docker.deployment.capacityType - capacityType: "" - # @param services.docker.deployment.resource - resource: - # @param services.docker.deployment.resource.cpu - cpu: - # @param services.docker.deployment.resource.cpu.request - request: "" - # @param services.docker.deployment.resource.cpu.limit - limit: "" - # @param services.docker.deployment.resource.memory - memory: - # @param services.docker.deployment.resource.memory.request - request: "" - # @param services.docker.deployment.resource.memory.limit - limit: "" - # @param services.docker.deployment.readiness - readiness: - # @param services.docker.deployment.readiness.disabled - disabled: false - # @param services.docker.deployment.readiness.tcpSocketPort - tcpSocketPort: 0 - # @param services.docker.deployment.readiness.httpGet - httpGet: - # @param services.docker.deployment.readiness.httpGet.path - path: "" - # @param services.docker.deployment.readiness.httpGet.port - port: 0 - # @param services.docker.deployment.readiness.initialDelaySeconds - initialDelaySeconds: 0 - # @param services.docker.deployment.readiness.periodSeconds - periodSeconds: 0 - # @param services.docker.deployment.readiness.timeoutSeconds - timeoutSeconds: 0 - # @param services.docker.deployment.readiness.successThreshold - successThreshold: 0 - # @param services.docker.deployment.readiness.failureThreshold - failureThreshold: 0 - # @param services.docker.deployment.hostnames - hostnames: - # @param services.docker.deployment.hostnames.host - host: "" - # @param services.docker.deployment.hostnames.acmARN - acmARN: "" - # @param services.docker.deployment.hostnames.defaultInternalHostname - defaultInternalHostname: "" - # @param services.docker.deployment.hostnames.defaultPublicUrl - defaultPublicUrl: "" - # @param services.docker.deployment.network - network: - # @param services.docker.deployment.network.ipWhitelist - ipWhitelist: - # @param services.docker.deployment.network.ipWhitelist[] - - "" - # @param services.docker.deployment.network.pathPortMapping - pathPortMapping: - - # @param services.docker.deployment.network.hostPortMapping - hostPortMapping: - - # @param services.docker.deployment.network.grpc - grpc: - # @param services.docker.deployment.network.grpc.enable - enable: false - # @param services.docker.deployment.network.grpc.host - host: "" - # @param services.docker.deployment.network.grpc.defaultHost - defaultHost: "" - # @param services.docker.deployment.serviceDisks - serviceDisks: - # @param services.docker.deployment.serviceDisks[] - - # @param services.docker.deployment.serviceDisks.name (required) - name: "" - # @param services.docker.deployment.serviceDisks.mountPath (required) - mountPath: "" - # @param services.docker.deployment.serviceDisks.accessModes - accessModes: "" - # @param services.docker.deployment.serviceDisks.storageSize (required) - storageSize: "" - # @param services.docker.deployment.serviceDisks.medium - medium: "" -``` diff --git a/src/pages/docs/schema/index.mdx b/src/pages/docs/schema/index.mdx deleted file mode 100644 index 7637e53..0000000 --- a/src/pages/docs/schema/index.mdx +++ /dev/null @@ -1,926 +0,0 @@ ---- -title: Lifecycle Schema -description: Lifecycle Schema documentation; a section by section breakdown of the Lifecycle schema. -navtext: Section by section -tags: - - schema - - lifecycle ---- - -import Link from "next/link"; -import { Callout } from "nextra/components"; -import { Info, TriangleAlert, Notebook } from "lucide-react"; - -The Following document covers the Lifecycle schema and how to use it. - -> Our goal is to take all of the information about the Lifecycle schema and put it here so that you can find and share everything you need easily. -> Feedback is appreciated if this is not the case. - -## Lifecycle Schema Breakdown - -The Lifecycle schema is defined via the `lifecycle.yaml` file, which is used to define the environment and services that will be deployed in the Lifecycle environment. -In this section, we will break down the schema into sections and provide examples of how to use each section. - ---- - -### `environment` - -
-
-
- -The environment object is the most important object within the `lifecycle.yaml` schema. -It contains a few key values pairs and, currently, 2 main objects, [`defaultServices`](#defaultServices) and [`optionalServices`](#optionalServices). - -
-
-
- -```yaml filename=lifecycle.yaml ---- - -environment: - autoDeploy: false - defaultServices: - - name: "defined-in-lifecycle" - - name: "define-outside-lifecycle" - repository: "org/repo" - branch: "main" - optionalServices: - - name: "optional-service-1" - - name: "optional-service-2" - repository: "org/repo" - branch: "main" - ... -``` - -
-
- ---- - -#### `environmentTypes` - -Each service object within the services array defines a service that can deployed in the Lifecycle environment. - ---- - -
-
- -If the service is defined within the `lifecycle.yaml`, it does not require all object properties to be defined; only its name. - -
-
- -```yaml ---- - -environment: - defaultServices: - - name: "string" - ... -``` - -
-
- ---- - -
-
- -If the service is defined outside of the `lifecycle.yaml`, it requires all properties to be defined. - -
-
- -```yaml ---- - -environment: - defaultServices: - - name: "string" - ... -``` - -
-
- ---- - -##### `defaultServices` - -
-
-
- -The `environment.defaultServices` array defines services that are always deployed by default when the Lifecycle environment is deployed. - -
-
-
- -```yaml ---- - -environment: - defaultServices: - - name: "always-deployed-1" - - name: "always-deployed-2" - - name: "define-outside-lifecycle-always-deployed" - repository: "org/repo" - branch: "main" - ... -``` - -
-
- ---- - -##### `optionalServices` - -
-
-
- -`environment.optionalServices` services is an array of services objects that contains all services that may be deployed in the Lifecycle environment. - -
-
-
- -```yaml ---- - -environment: - defaultServices: - - name: "always-deployed-1" - - name: "always-deployed-2" - optionalServices: - - name: "optional-service-1" - - name: "optional-service-2" - repository: "org/repo" - branch: "main" - ... -``` - -
-
- ---- - -### `services` - -
-
-
- -The `services` object is an array of objects with child objects and properties describing the services created from this Lifecycle project. -As of right now, OSS Lifecycle supports 2 types of services, `github` and `docker`. - -
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - ... -``` - -
-
- ---- - -#### `docker` - -The `docker` service type is used to define a Docker service that can be deployed in the Lifecycle environment. - ---- - -##### properties - -This section describes properties and \*simple objects, eg `env`, that are used to define a Lifecycle Docker service.
-**\*Simple Objects: more complex objects are described in their own sections below.** - -
-
-
- -- `dockerImage`: The Docker image to use for the service. -- `defaultTag`: The default tag to use for the Docker image. -- `command`: The command to run in the Docker container. -- `arguments`: Stringified arguments to pass to the command. -- `env`: An key value pair object containing environment variables to set in the Docker container. -- `ports`: A list of ports to expose in the Docker container. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - dockerImage: "redis" - defaultTag: "latest" - command: "cmd" - arguments: "-config%%SPLIT%%/config/config.toml" - env: - ENV_VAR: "value" - ports: - - "6379" - ... -``` - -
-
-
- ---- - -##### `deployment` - -
-
-
-
- -- `docker.deployment`: The deployment configuration for the Docker service. -- `deployment.public`: If true, the service will be publicly accessible. -- `deployment.capacityType`: The capacity type to use for the service. -- `deployment.resource`: The resource requirements for the service. -- `deployment.resource.cpu.request`: The CPU request for the service. -- `deployment.resource.cpu.limit`: The CPU limit for the service. -- `deployment.resource.memory.request`: The memory request for the service. -- `deployment.resource.memory.limit`: The memory limit for the service. -- `deployment.hostnames`: The hostnames for the service. -- `deployment.hostnames.host`: The hostname for the service. -- `deployment.hostnames.acmARN`: The ACM ARN for the service. -- `deployment.hostnames.defaultInternalHostname`: The default internal hostname for the service. -- `deployment.hostnames.defaultPublicUrl`: The default public URL for the service. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - ... - deployment: - public: false - capacityType: 'FARGATE' - resource: - cpu: - request: '256' - limit: '512' - memory: - request: '512Mi' - limit: '1Gi' - hostnames: - host: 'my-service.example.com' - acmARN: 'arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-5678-90ab-cdef-EXAMPLE' - defaultInternalHostname: 'my-service.example.com' - defaultPublicUrl: 'https://my-service.example.com' - ... -``` - -
-
-
- ---- - -###### `helm` - -
-
- -
- -- `deployment.helm`: The Helm configuration for the Docker service. -- `deployment.helm.enabled`: If true, Helm will be used to deploy the service. -- `deployment.helm.chartName`: The name of the Helm chart to use for the service. -- `deployment.helm.chartRepoUrl`: The URL of the Helm chart repository to use for the service. -- `deployment.helm.chartVersion`: The version of the Helm chart to use for the service. -- `deployment.helm.cmdPs`: The command to run to get the status of the service. -- `deployment.helm.action`: The action to perform on the service. -- `deployment.helm.customValues`: A list of custom values to pass to the Helm chart. -- `deployment.helm.customValueFiles`: A list of custom value files to pass to the Helm chart. -- `deployment.helm.helmVersion`: The version of Helm to use for the service. -- `deployment.helm.attachPvc`: The PVC configuration for the service. -- `deployment.helm.attachPvc.enabled`: If true, a PVC will be attached to the service. -- `deployment.helm.attachPvc.mountPath`: The path to mount the PVC to. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - ... - deployment: - ... - helm: - enabled: false - chartName: 'my-chart' - chartRepoUrl: 'https://charts.example.com' - chartVersion: '1.0.0' - cmdPs: 'helm list' - action: 'install' - customValues: - - 'key=value' - customValueFiles: - - 'values.yaml' - helmVersion: '3.5.0' - attachPvc: - enabled: false - mountPath: '/data' - ... -``` - -
-
-
- ---- - -###### `readiness` - -
-
- -
- -- `deployment.readiness`: The readiness configuration for the Docker service. -- `deployment.readiness.disabled`: If true, the service will not be ready. -- `deployment.readiness.tcpSocketPort`: The port to use for the TCP socket. -- `deployment.readiness.httpGet`: The network configuration for the Docker service. -- `deployment.readiness.httpGet.path`: The path to use for the HTTP GET request. -- `deployment.readiness.httpGet.port`: The port to use for the HTTP GET request. -- `deployment.readiness.initialDelaySeconds`: The number of seconds to wait before starting the readiness check. -- `deployment.readiness.periodSeconds`: The number of seconds between each readiness check. -- `deployment.readiness.timeoutSeconds`: The number of seconds to wait for a readiness check to complete. -- `deployment.readiness.successThreshold`: The number of consecutive successful readiness checks before considering the service ready. -- `deployment.readiness.failureThreshold`: The number of consecutive failed readiness checks before considering the service not ready. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - ... - deployment: - ... - readiness: - disabled: false - tcpSocketPort: 8080 - httpGet: - path: '/' - port: 80 - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 -``` - -
-
-
- ---- - -###### `network` - -
-
-
- -- `deployment.network`: The network configuration for the Docker service. -- `deployment.network.ipWhitelist`: A list of IP addresses to whitelist for the service. -- `deployment.network.pathPortMapping`: A mapping of paths to ports for the service. -- `deployment.network.hostPortMapping`: A mapping of host ports to container ports for the service. -- `deployment.network.grpc`: The gRPC configuration for the service. -- `deployment.network.grpc.enable`: If true, gRPC will be enabled for the service. -- `deployment.network.grpc.host`: The host to use for gRPC. -- `deployment.network.grpc.defaultHost`: The default host to use for gRPC. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - ... - deployment: - ... - network: - ipWhitelist: - - '192.168.0.0/16' - pathPortMapping: - '/api': 8080 - hostPortMapping: - '80': 8080 - grpc: - enable: true - host: ' my-service-grpc.example.com' - defaultHost: 'my-service-grpc.example.com' -``` - -
-
-
- ---- - -###### `serviceDisks` - -
-
-
- -- `deployment.serviceDisks`: A list of service disks to attach to the Docker service. -- `deployment.serviceDisks.name`: The name of the service disk. -- `deployment.serviceDisks.mountPath`: The path to mount the service disk to. -- `deployment.serviceDisks.accessModes`: The access modes for the service disk. -- `deployment.serviceDisks.storageSize`: The size of the service disk. -- `deployment.serviceDisks.medium`: The medium to use for the service disk. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - docker: - ... - deployment: - ... - serviceDisks: - - name: 'my-service-disk' - mountPath: '/data' - accessModes: 'ReadWriteOnce' - storageSize: '10Gi' - medium: 'gp2' -``` - -
-
-
- ---- - -#### `github` - -The `github` service type is used to define a Github service that can be deployed in the Lifecycle environment. - ---- - -##### properties - -This section describes properties and \*simple objects, eg `env`, that are used to define a Lifecycle Docker service.
-**\*Simple Objects: more complex objects are described in their own sections below.** - -
-
-
- -- `repository`: The repository to use for the service. -- `branchName`: The branch name to use for the service. - -
-
-
-
- -```yaml ---- - -environment: - ... -services: - - name: 'my-service' - appShort: 'mysvc' - defaultUUID: 'my-service-uuid' - github: - repository: "org/repo" - branchName: "main" - ... -``` - -
-
-
- ---- - -##### `docker` - -
-
-
- -- `github.docker`: The Docker configuration for the Github service. -- `github.docker.defaultTag`: The default tag to use for the Docker image. -- `github.docker.builder`: The builder to use for the Docker image. -- `github.docker.builder.engine`: The engine to use for the Docker image. -- `github.docker.app`: The application to use for the Docker image. -- `github.docker.app.dockerfilePath`: The path to the Dockerfile to use for the application. -- `github.docker.app.command`: The command to run in the Docker container. -- `github.docker.app.arguments`: Stringified arguments to pass to the command. -- `github.docker.app.env`: An key value pair object containing environment variables to set in the Docker container. -- `github.docker.app.ports`: A list of ports to expose in the Docker container. -- `github.docker.init`: The init configuration for the Docker service. -- `github.docker.init.dockerfilePath`: The path to the Dockerfile to use for the init container. -- `github.docker.init.command`: The command to run in the init container. -- `github.docker.init.arguments`: Stringified arguments to pass to the command. -- `github.docker.init.env`: An key value pair object containing environment variables to set in the init container. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - docker: - defaultTag: "latest" - builder: - engine: "buildkit" - app: - dockerfilePath: "Dockerfile" - command: "cmd" - arguments: "-config%%SPLIT%%/config/config.toml" - env: - ENV_VAR: "value" - ports: - - "6379" - init: - dockerfilePath: "Dockerfile.init" - command: "init-cmd" - arguments: "-config%%SPLIT%%/config/init.toml" - env: - INIT_ENV_VAR: "value" -``` - -
-
-
- ---- - -##### `deployment` - -
-
-
- -- `github.deployment`: The deployment configuration for the Github service. -- `github.deployment.public`: If true, the service will be publicly accessible. -- `github.deployment.capacityType`: The capacity type to use for the service. -- `github.deployment.resource`: The resource requirements for the service. -- `github.deployment.resource.cpu.request`: The CPU request for the service. -- `github.deployment.resource.cpu.limit`: The CPU limit for the service. -- `github.deployment.resource.memory.request`: The memory request for the service. -- `github.deployment.resource.memory.limit`: The memory limit for the service. -- `github.deployment.hostnames`: The hostnames for the service. -- `github.deployment.hostnames.host`: The hostname for the service. -- `github.deployment.hostnames.acmARN`: The ACM ARN for the service. -- `github.deployment.hostnames.defaultInternalHostname`: The default internal hostname for the service. -- `github.deployment.hostnames.defaultPublicUrl`: The default public URL for the service. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - deployment: - public: false - capacityType: "FARGATE" - resource: - cpu: - request: "256" - limit: "512" - memory: - request: "512Mi" - limit: "1Gi" - hostnames: - host: "my-service.example.com" - acmARN: "arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-5678-90ab-cdef-EXAMPLE" - defaultInternalHostname: "my-service.example.com" - defaultPublicUrl: "https://my-service.example.com" - ... - network: - ipWhitelist: - - "192.168.0.0/16" - pathPortMapping: - "/api": 8080 - hostPortMapping: - "80": 8080 - grpc: - enable: true - host: " my-service-grpc.example.com" - defaultHost: "my-service-grpc.example.com" - serviceDisks: - - name: "my-service-disk" - mountPath: "/data" - accessModes: "ReadWriteOnce" - storageSize: "10Gi" - medium: "gp2" -``` - -
-
-
- ---- - -###### `helm` - -
-
-
- -- `github.deployment.helm`: The Helm configuration for the Github service. -- `github.deployment.helm.enabled`: If true, Helm will be used to deploy the service. -- `github.deployment.helm.chartName`: The name of the Helm chart to use for the service. -- `github.deployment.helm.chartRepoUrl`: The URL of the Helm chart repository to use for the service. -- `github.deployment.helm.chartVersion`: The version of the Helm chart to use for the service. -- `github.deployment.helm.cmdPs`: The command to run to get the status of the service. -- `github.deployment.helm.action`: The action to perform on the service. -- `github.deployment.helm.customValues`: A list of custom values to pass to the Helm -- `github.deployment.helm.customValueFiles`: A list of custom value files to pass to the Helm chart. -- `github.deployment.helm.helmVersion`: The version of Helm to use for the service. -- `github.deployment.helm.attachPvc`: The PVC configuration for the service. -- `github.deployment.helm.attachPvc.enabled`: If true, a PVC will be attached to the service. -- `github.deployment.helm.attachPvc.mountPath`: The path to mount the PVC to. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - deployment: - ... - helm: - enabled: false - chartName: "my-chart" - chartRepoUrl: "https://charts.example.com" - chartVersion: "1.0.0" - cmdPs: "helm list" - action: "install" - customValues: - - "key=value" - customValueFiles: - - "values.yaml" - helmVersion: "3.5.0" - attachPvc: - enabled: false - mountPath: "/data" - ... -``` - -
-
-
- ---- - -###### `readiness` - -
-
-
- -- `github.deployment.readiness`: The readiness configuration for the Github service. -- `github.deployment.readiness.disabled`: If true, the service will not be ready. -- `github.deployment.readiness.tcpSocketPort`: The port to use for the TCP socket. -- `github.deployment.readiness.httpGet`: The network configuration for the Github service. -- `github.deployment.readiness.httpGet.path`: The path to use for the HTTP GET request. -- `github.deployment.readiness.httpGet.port`: The port to use for the HTTP GET request. -- `github.deployment.readiness.initialDelaySeconds`: The number of seconds to wait before starting the readiness check. -- `github.deployment.readiness.periodSeconds`: The number of seconds between each readiness check. -- `github.deployment.readiness.timeoutSeconds`: The number of seconds to wait for a readiness check to complete. -- `github.deployment.readiness.successThreshold`: The number of consecutive successful readiness checks before considering the service ready. -- `github.deployment.readiness.failureThreshold`: The number of consecutive failed readiness checks before considering the service not ready. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - deployment: - ... - readiness: - disabled: false - tcpSocketPort: 8080 - httpGet: - path: "/" - port: 80 - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 -``` - -
-
-
- ---- - -###### `network` - -
-
-
- -- `github.deployment.network`: The network configuration for the Github service. -- `github.deployment.network.ipWhitelist`: A list of IP addresses to whitelist for the service. -- `github.deployment.network.pathPortMapping`: A mapping of paths to ports for the service. -- `github.deployment.network.hostPortMapping`: A mapping of host ports to container ports for the service. -- `github.deployment.network.grpc`: The gRPC configuration for the service. -- `github.deployment.network.grpc.enable`: If true, gRPC will be enabled for the service. -- `github.deployment.network.grpc.host`: The host to use for gRPC. -- `github.deployment.network.grpc.defaultHost`: The default host to use for gRPC. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - deployment: - ... - network: - ipWhitelist: - - "192.168.0.0/16" - pathPortMapping: - "/api": 8080 - hostPortMapping: - "80": 8080 - grpc: - enable: true - host: " my-service-grpc.example.com" - defaultHost: "my-service-grpc.example.com" -``` - -
-
-
- ---- - -###### `serviceDisks` - -
-
-
- -- `github.deployment.serviceDisks`: A list of service disks to attach to the Github service. -- `github.deployment.serviceDisks.name`: The name of the service disk. -- `github.deployment.serviceDisks.mountPath`: The path to mount the service disk to. -- `github.deployment.serviceDisks.accessModes`: The access modes for the service disk. -- `github.deployment.serviceDisks.storageSize`: The size of the service disk. -- `github.deployment.serviceDisks.medium`: The medium to use for the service disk. - -
-
-
-
- -```yaml ---- -environment: ... -services: - - name: "my-service" - appShort: "mysvc" - defaultUUID: "my-service-uuid" - github: - repository: "org/repo" - branchName: "main" - deployment: - ... - serviceDisks: - - name: "my-service-disk" - mountPath: "/data" - accessModes: "ReadWriteOnce" - storageSize: "10Gi" - medium: "gp2" -``` - -
-
-
- ---- From d8f6adc640a24543667e90de5a41b4b1717c6bed Mon Sep 17 00:00:00 2001 From: vigneshrajsb Date: Sat, 17 Jan 2026 16:17:22 -0800 Subject: [PATCH 4/6] feat: home page updates --- bun.lock | 7 ++ package.json | 1 + src/components/home/features/FeatureCard.tsx | 49 +++++++++++++ src/components/home/features/data.ts | 70 ++++++++++++++++++ src/components/home/features/index.tsx | 55 ++++++++++++++ src/components/home/features/types.ts | 24 +++++++ src/components/home/hero/HeroContent.tsx | 76 ++++++++++++++++++++ src/components/home/hero/index.tsx | 41 +++++++++++ src/components/home/how-it-works/Step.tsx | 71 ++++++++++++++++++ src/components/home/how-it-works/data.ts | 53 ++++++++++++++ src/components/home/how-it-works/index.tsx | 59 +++++++++++++++ src/components/home/how-it-works/types.ts | 25 +++++++ src/components/home/index.tsx | 5 ++ src/pages/index.mdx | 30 ++++---- src/theme.config.tsx | 69 +++++++++++++++--- tailwind.config.mjs | 65 +++++++++-------- 16 files changed, 645 insertions(+), 55 deletions(-) create mode 100644 src/components/home/features/FeatureCard.tsx create mode 100644 src/components/home/features/data.ts create mode 100644 src/components/home/features/index.tsx create mode 100644 src/components/home/features/types.ts create mode 100644 src/components/home/hero/HeroContent.tsx create mode 100644 src/components/home/hero/index.tsx create mode 100644 src/components/home/how-it-works/Step.tsx create mode 100644 src/components/home/how-it-works/data.ts create mode 100644 src/components/home/how-it-works/index.tsx create mode 100644 src/components/home/how-it-works/types.ts diff --git a/bun.lock b/bun.lock index 4382d3e..41829a4 100644 --- a/bun.lock +++ b/bun.lock @@ -17,6 +17,7 @@ "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "codehike": "^1.0.7", + "framer-motion": "^12.26.2", "lucide-react": "^0.511.0", "next": "^15.3.2", "nextra": "^^^3.2.4", @@ -1281,6 +1282,8 @@ "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], + "framer-motion": ["framer-motion@12.26.2", "", { "dependencies": { "motion-dom": "^12.26.2", "motion-utils": "^12.24.10", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-lflOQEdjquUi9sCg5Y1LrsZDlsjrHw7m0T9Yedvnk7Bnhqfkc89/Uha10J3CFhkL+TCZVCRw9eUGyM/lyYhXQA=="], + "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], @@ -1737,6 +1740,10 @@ "mlly": ["mlly@1.7.4", "", { "dependencies": { "acorn": "^8.14.0", "pathe": "^2.0.1", "pkg-types": "^1.3.0", "ufo": "^1.5.4" } }, "sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw=="], + "motion-dom": ["motion-dom@12.26.2", "", { "dependencies": { "motion-utils": "^12.24.10" } }, "sha512-KLMT1BroY8oKNeliA3JMNJ+nbCIsTKg6hJpDb4jtRAJ7nCKnnpg/LTq/NGqG90Limitz3kdAnAVXecdFVGlWTw=="], + + "motion-utils": ["motion-utils@12.24.10", "", {}, "sha512-x5TFgkCIP4pPsRLpKoI86jv/q8t8FQOiM/0E8QKBzfMozWHfkKap2gA1hOki+B5g3IsBNpxbUnfOum1+dgvYww=="], + "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], "mz": ["mz@2.7.0", "", { "dependencies": { "any-promise": "^1.0.0", "object-assign": "^4.0.1", "thenify-all": "^1.0.0" } }, "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q=="], diff --git a/package.json b/package.json index 71423e7..ed5c3d1 100644 --- a/package.json +++ b/package.json @@ -37,6 +37,7 @@ "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "codehike": "^1.0.7", + "framer-motion": "^12.26.2", "lucide-react": "^0.511.0", "next": "^15.3.2", "nextra": "^^^3.2.4", diff --git a/src/components/home/features/FeatureCard.tsx b/src/components/home/features/FeatureCard.tsx new file mode 100644 index 0000000..c9b4997 --- /dev/null +++ b/src/components/home/features/FeatureCard.tsx @@ -0,0 +1,49 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +"use client"; + +import { motion } from "framer-motion"; +import type { Feature } from "./types"; + +interface FeatureCardProps { + feature: Feature; + index: number; +} + +export function FeatureCard({ feature, index }: FeatureCardProps) { + const Icon = feature.icon; + + return ( + +
+ +
+

+ {feature.title} +

+

+ {feature.description} +

+
+ ); +} diff --git a/src/components/home/features/data.ts b/src/components/home/features/data.ts new file mode 100644 index 0000000..f19a561 --- /dev/null +++ b/src/components/home/features/data.ts @@ -0,0 +1,70 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { + GitPullRequest, + Network, + Trash2, + GitFork, + Webhook, + MessageSquare, +} from "lucide-react"; +import type { Feature } from "./types"; + +export const features: Feature[] = [ + { + id: "auto-deploy", + title: "Auto-deploy on PR", + description: + "Every pull request automatically gets its own isolated environment. Simple config setup.", + icon: GitPullRequest, + }, + { + id: "multi-service", + title: "Connected Multi-Service", + description: + "Spin up your entire stack - frontend, backend, databases - all connected and working together.", + icon: Network, + }, + { + id: "auto-cleanup", + title: "Automatic Cleanup", + description: + "Environments are automatically torn down when PRs are merged or closed. No resource waste.", + icon: Trash2, + }, + { + id: "cross-repo", + title: "Cross-Repo Composition", + description: + "Test changes across multiple repositories in a single unified environment.", + icon: GitFork, + }, + { + id: "webhooks", + title: "Webhooks & Automation", + description: + "Integrate with your existing CI/CD pipelines and trigger custom workflows on environment events.", + icon: Webhook, + }, + { + id: "mission-control", + title: "Mission Control Comments", + description: + "Get environment URLs, status updates, and deployment logs directly in your PR comments.", + icon: MessageSquare, + }, +]; diff --git a/src/components/home/features/index.tsx b/src/components/home/features/index.tsx new file mode 100644 index 0000000..b8e4d90 --- /dev/null +++ b/src/components/home/features/index.tsx @@ -0,0 +1,55 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +"use client"; + +import { motion } from "framer-motion"; +import { FeatureCard } from "./FeatureCard"; +import { features } from "./data"; + +export function Features() { + return ( +
+
+ +

+ Everything you need for ephemeral environments +

+

+ Lifecycle provides all the tools to create, manage, and scale your + development environments automatically. +

+
+ +
+ {features.map((feature, index) => ( + + ))} +
+
+
+ ); +} + +export { FeatureCard } from "./FeatureCard"; +export { features } from "./data"; +export type { Feature } from "./types"; diff --git a/src/components/home/features/types.ts b/src/components/home/features/types.ts new file mode 100644 index 0000000..72c2bdb --- /dev/null +++ b/src/components/home/features/types.ts @@ -0,0 +1,24 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type { LucideIcon } from "lucide-react"; + +export interface Feature { + id: string; + title: string; + description: string; + icon: LucideIcon; +} diff --git a/src/components/home/hero/HeroContent.tsx b/src/components/home/hero/HeroContent.tsx new file mode 100644 index 0000000..3a002f0 --- /dev/null +++ b/src/components/home/hero/HeroContent.tsx @@ -0,0 +1,76 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +"use client"; + +import Link from "next/link"; +import { motion } from "framer-motion"; +import { ArrowRight, Github } from "lucide-react"; +import { buttonVariants } from "@/components/ui/button"; +import { cn } from "@/lib/utils"; + +export function HeroContent() { + return ( +
+ + Enterprise-grade ephemeral environments{" "} + that grow with you + + + + Instantly spin up connected multi-service development environments from + any pull request. Review, test, and iterate faster than ever before. + + + + + Get Started + + + + + View on GitHub + + +
+ ); +} diff --git a/src/components/home/hero/index.tsx b/src/components/home/hero/index.tsx new file mode 100644 index 0000000..c1b7c9e --- /dev/null +++ b/src/components/home/hero/index.tsx @@ -0,0 +1,41 @@ +/** + * Copyright 2025 GoodRx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +"use client"; + +import { motion } from "framer-motion"; +import { HeroContent } from "./HeroContent"; +import { Iframe } from "@/components/iframe"; + +export function Hero() { + return ( +
+
+ + +