diff --git a/.github/.golangci.yml b/.github/.golangci.yml index b4c89281f0..97584fd424 100644 --- a/.github/.golangci.yml +++ b/.github/.golangci.yml @@ -175,6 +175,13 @@ linters: text: "time.Now" linters: - forbidigo + # Example/demo apps under chasm/lib are not library code — exclude from + # strict chasm/lib rules (forbidigo, deep-exit, errcheck, complexity, etc.). + - path: chasm/lib/.*/examples/ + linters: + - forbidigo + - errcheck + - revive # Cassandra timestamp rules only apply to cassandra persistence package - path-except: common/persistence/cassandra/.*\.go$ text: "Unix|UnixMilli|UnixNano" diff --git a/.github/actions/build-binaries/action.yml b/.github/actions/build-binaries/action.yml index cb4b1fc585..e1a67138c1 100644 --- a/.github/actions/build-binaries/action.yml +++ b/.github/actions/build-binaries/action.yml @@ -14,15 +14,29 @@ inputs: description: "Use release command (true) or build command (false). When true, single-arch is ignored and snapshot is respected." required: false default: "false" + go-private-token: + description: "Token for accessing private Go modules" + required: false + default: "" runs: using: composite steps: + - name: Configure git for private modules + if: inputs.go-private-token != '' + shell: bash + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ inputs.go-private-token }} + - name: Setup Go uses: actions/setup-go@v6 with: go-version-file: "go.mod" cache: true + env: + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs - name: Run GoReleaser (release) if: inputs.release == 'true' @@ -33,6 +47,8 @@ runs: args: release ${{ inputs.snapshot == 'true' && '--snapshot --skip=publish' || '' }} --clean env: GITHUB_TOKEN: ${{ github.token }} + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs - name: Run GoReleaser (build - all architectures) if: inputs.release != 'true' && inputs.single-arch == '' @@ -43,6 +59,8 @@ runs: args: build --snapshot env: GITHUB_TOKEN: ${{ github.token }} + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs - name: Run GoReleaser (build - single architecture) if: inputs.release != 'true' && inputs.single-arch != '' @@ -53,5 +71,7 @@ runs: args: build --snapshot --single-target env: GITHUB_TOKEN: ${{ github.token }} + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs GOOS: linux GOARCH: ${{ inputs.single-arch }} diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml index 2ae58f689d..e921db06e1 100644 --- a/.github/workflows/build-and-publish.yml +++ b/.github/workflows/build-and-publish.yml @@ -30,6 +30,7 @@ jobs: uses: ./.github/actions/build-binaries with: snapshot: true + go-private-token: ${{ secrets.GO_PRIVATE_TOKEN }} - name: Build and push Docker images uses: ./.github/actions/build-docker-images @@ -54,6 +55,7 @@ jobs: uses: ./.github/actions/build-binaries with: snapshot: true + go-private-token: ${{ secrets.GO_PRIVATE_TOKEN }} - name: Build Docker images uses: ./.github/actions/build-docker-images diff --git a/.github/workflows/docker-build-manual.yml b/.github/workflows/docker-build-manual.yml index 26315e48c8..281827170b 100644 --- a/.github/workflows/docker-build-manual.yml +++ b/.github/workflows/docker-build-manual.yml @@ -58,6 +58,7 @@ jobs: with: snapshot: ${{ inputs.snapshot }} single-arch: ${{ steps.arch-param.outputs.single-arch }} + go-private-token: ${{ secrets.GO_PRIVATE_TOKEN }} - name: Build Docker images id: build-docker diff --git a/.github/workflows/features-integration.yml b/.github/workflows/features-integration.yml index 83cdef55be..6cf123ff58 100644 --- a/.github/workflows/features-integration.yml +++ b/.github/workflows/features-integration.yml @@ -30,6 +30,7 @@ jobs: with: snapshot: true single-arch: amd64 + go-private-token: ${{ secrets.GO_PRIVATE_TOKEN }} - name: Build Docker images id: build-docker diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index 6314afd69c..ecfc8d0268 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -93,11 +93,19 @@ jobs: with: fetch-depth: 0 + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" check-latest: true cache: true + env: + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs - name: format golang import statements run: | @@ -145,11 +153,19 @@ jobs: with: fetch-depth: 0 + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" check-latest: true cache: true + env: + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs - name: lint code run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c68f610487..362362c865 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -23,3 +23,4 @@ jobs: with: snapshot: false release: true + go-private-token: ${{ secrets.GO_PRIVATE_TOKEN }} diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 13b11f50e3..17e9f95419 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -25,6 +25,8 @@ env: TEMPORAL_VERSION_CHECK_DISABLED: 1 MAX_TEST_ATTEMPTS: 3 SHARD_COUNT: 3 # NOTE: must match shard count in optimize-test-sharding.yml + GOPRIVATE: github.com/moedash/temporal-fs + GONOSUMCHECK: github.com/moedash/temporal-fs jobs: test-setup: @@ -42,6 +44,11 @@ jobs: ref: ${{ env.COMMIT }} fetch-depth: 0 + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - name: Fetch base branch if: ${{ github.event_name == 'pull_request' }} run: git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} @@ -221,6 +228,11 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" @@ -260,6 +272,11 @@ jobs: # buf-breaking tries to compare HEAD against merge base so we need to be able to find it fetch-depth: 100 + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" @@ -297,6 +314,11 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" @@ -381,6 +403,11 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - name: Start containerized dependencies uses: hoverkraft-tech/compose-action@v2.0.1 with: @@ -501,6 +528,11 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - name: Start containerized dependencies if: ${{ toJson(matrix.containers) != '[]' }} uses: hoverkraft-tech/compose-action@v2.0.1 @@ -611,6 +643,11 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + - name: Configure git for private modules + run: git config --global url."https://x-access-token:${GO_PRIVATE_TOKEN}@github.com/".insteadOf "https://github.com/" + env: + GO_PRIVATE_TOKEN: ${{ secrets.GO_PRIVATE_TOKEN }} + - name: Start PostgreSQL uses: hoverkraft-tech/compose-action@v2.0.1 with: diff --git a/.gitignore b/.gitignore index f6e1955edc..6eb37e6095 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ /tctl* /tdbg /fairsim +/research-agent-demo # proto images /proto/image.bin diff --git a/chasm/lib/temporalzfs/config.go b/chasm/lib/temporalzfs/config.go new file mode 100644 index 0000000000..ea9466462f --- /dev/null +++ b/chasm/lib/temporalzfs/config.go @@ -0,0 +1,48 @@ +package temporalzfs + +import ( + "time" + + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/dynamicconfig" + "google.golang.org/protobuf/types/known/durationpb" +) + +var ( + Enabled = dynamicconfig.NewNamespaceBoolSetting( + "temporalzfs.enabled", + false, + `Toggles TemporalZFS functionality on the server.`, + ) +) + +const ( + defaultChunkSize = 256 * 1024 // 256KB + defaultMaxSize = 1 << 30 // 1GB + defaultMaxFiles = 100_000 + defaultGCInterval = 5 * time.Minute + defaultSnapshotRetention = 24 * time.Hour + defaultOwnerCheckInterval = 10 * time.Minute + ownerCheckNotFoundThreshold = int32(2) + dataCleanupMaxBackoff = 30 * time.Minute +) + +type Config struct { + Enabled dynamicconfig.BoolPropertyFnWithNamespaceFilter +} + +func ConfigProvider(dc *dynamicconfig.Collection) *Config { + return &Config{ + Enabled: Enabled.Get(dc), + } +} + +func defaultConfig() *temporalzfspb.FilesystemConfig { + return &temporalzfspb.FilesystemConfig{ + ChunkSize: defaultChunkSize, + MaxSize: defaultMaxSize, + MaxFiles: defaultMaxFiles, + GcInterval: durationpb.New(defaultGCInterval), + SnapshotRetention: durationpb.New(defaultSnapshotRetention), + } +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/.gitignore b/chasm/lib/temporalzfs/examples/research-agent-demo/.gitignore new file mode 100644 index 0000000000..34d572abfc --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/.gitignore @@ -0,0 +1,7 @@ +# Compiled binary +research-agent-demo + +# Generated artifacts +*.html +demo-output.md +demo-plan.md diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/README.md b/chasm/lib/temporalzfs/examples/research-agent-demo/README.md new file mode 100644 index 0000000000..ee5c64eac8 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/README.md @@ -0,0 +1,230 @@ +# TemporalZFS Research Agent Demo + +A scale demo of AI research agent workflows using TemporalZFS — a durable, versioned +filesystem for agent workflows. Each workflow simulates a 5-step research pipeline +that writes files and MVCC snapshots through TemporalZFS, with injected random failures +handled automatically by Temporal's retry mechanism. + +## What It Does + +Each workflow runs 5 activities in sequence: + +| Step | Activity | Writes | Failure Rate | +|------|----------|--------|--------------| +| 1 | **WebResearch** | 3-5 source files in `/research/{topic}/sources/` | 20% | +| 2 | **Summarize** | `summary.md` | 15% | +| 3 | **FactCheck** | `fact-check.md` | 10% | +| 4 | **FinalReport** | `report.md` | 10% | +| 5 | **PeerReview** | `review.md` | 5% | + +After each step, a named MVCC snapshot is created (e.g., `step-1-research`, +`step-2-summary`). Every workflow gets its own isolated TemporalZFS partition backed +by a shared PebbleDB instance. + +## Prerequisites + +- Go 1.23+ +- [Temporal CLI](https://docs.temporal.io/cli) (`temporal server start-dev`) + +## Quick Start + +The easiest way to run the demo is with the included script, which handles +building, starting the Temporal dev server, running workflows, and generating +the report: + +```bash +cd chasm/lib/temporalzfs/examples/research-agent-demo +./run-demo.sh +``` + +For continuous mode (runs until Ctrl+C): + +```bash +./run-demo.sh --continuous +``` + +Customize the run: + +```bash +./run-demo.sh --workflows 500 --concurrency 100 --failure-rate 2.0 +``` + +### Manual Setup + +If you prefer to run each step yourself: + +```bash +# Terminal 1: Start the Temporal dev server +temporal server start-dev + +# Terminal 2: Run the demo in continuous mode (runs until Ctrl+C) +cd chasm/lib/temporalzfs/examples/research-agent-demo +go run . run --continuous --concurrency 50 +``` + +Or run a fixed number of workflows: + +```bash +go run . run --workflows 200 --concurrency 50 +``` + +The live terminal dashboard shows real-time progress, retry counts, throughput +metrics, and an activity feed. Open http://localhost:8233 to see workflows in the +Temporal UI. + +## `run-demo.sh` — End-to-End Script + +The `run-demo.sh` script automates the full demo: build, start Temporal dev +server (if not already running), run workflows, show workflow counts, browse a +sample filesystem, and generate the HTML report. + +``` +./run-demo.sh [flags] +``` + +| Flag | Default | Description | +|------|---------|-------------| +| `--workflows` | 200 | Number of workflows (ignored in continuous mode) | +| `--concurrency` | 50 | Max concurrent workflows | +| `--failure-rate` | 1.0 | Failure rate multiplier (0 = none, 2 = double) | +| `--seed` | 12345 | Random seed | +| `--data-dir` | /tmp/tzfs-demo | PebbleDB data directory | +| `--continuous` | | Run continuously until Ctrl+C | + +The script cleans up the Temporal dev server on exit. + +## Commands + +### `run` — Execute workflows with live dashboard + +``` +go run . run [flags] +``` + +| Flag | Default | Description | +|------|---------|-------------| +| `--workflows` | 200 | Number of research workflows to run | +| `--concurrency` | 50 | Max concurrent workflows | +| `--failure-rate` | 1.0 | Failure rate multiplier (0 = none, 2 = double) | +| `--data-dir` | /tmp/tzfs-demo | PebbleDB data directory | +| `--seed` | 0 | Random seed (0 = random) | +| `--task-queue` | research-demo | Temporal task queue name | +| `--temporal-addr` | localhost:7233 | Temporal server address | +| `--no-dashboard` | false | Disable live terminal dashboard | +| `--continuous` | false | Run continuously until Ctrl+C, then generate report | +| `--report` | | Auto-generate HTML report on completion (path) | + +### `report` — Generate HTML report + +```bash +go run . report --data-dir /tmp/tzfs-demo --output demo-report.html +open demo-report.html +``` + +Produces a self-contained HTML file with: +- Run summary (workflows, files, snapshots, data volume) +- Workflow table with file counts and snapshot counts +- Expandable filesystem explorer showing file contents and snapshots + +### `browse` — Inspect a workflow's filesystem + +```bash +go run . browse --data-dir /tmp/tzfs-demo --topic quantum-computing +``` + +Prints the directory tree for a specific workflow's TemporalZFS partition, including +file sizes and snapshot names. + +## Demo Script + +### Setup (30 seconds) + +```bash +# Terminal 1 +temporal server start-dev + +# Terminal 2 +cd chasm/lib/temporalzfs/examples/research-agent-demo +``` + +### Run — Continuous Mode (recommended for live demos) + +```bash +go run . run --continuous --concurrency 50 +``` + +This opens the Temporal UI in your browser and keeps running workflows until you +press Ctrl+C. On shutdown it waits for in-flight workflows and auto-generates an +HTML report. + +### Run — Fixed Mode (2-3 minutes) + +```bash +go run . run --workflows 200 --concurrency 50 +``` + +While running: +- Watch the live dashboard fill up with progress, retries, and throughput stats +- Open http://localhost:8233 to see workflows in the Temporal UI +- Click any workflow to see the activity timeline with retry attempts + +### After Completion + +```bash +# Generate and open HTML report (fixed mode — continuous mode does this automatically) +go run . report --output demo-report.html +open demo-report.html + +# Browse a specific workflow's filesystem +go run . browse --topic quantum-computing +``` + +### Key Demo Points + +- **Durability**: Kill the process mid-run, restart — workflows resume from last snapshot +- **Scale**: 200 workflows, 50 concurrent, thousands of files, single PebbleDB +- **Versioning**: Each activity creates an MVCC snapshot; browse them in the report +- **Failure resilience**: Random failures are retried automatically by Temporal +- **Temporal UI**: Full workflow history with retries and timing at http://localhost:8233 + +## Architecture + +``` +temporal server start-dev + | + v ++-------------------+ +---------------------------+ +| Scale Runner |---->| Temporal Server (local) | +| (starts N wfs) | | - Workflow history | ++-------------------+ | - Retry scheduling | + | | - Web UI (:8233) | + v +-------------+--------------+ ++-------------------+ | +| Live Dashboard |<-- +------------v--------------+ +| (terminal TUI) | | Worker (activities) | ++-------------------+ | - 5 activities per wf | + | - Random failure injection | + | - TemporalZFS file I/O | + +------------+---------------+ + | + +------------v---------------+ + | PebbleDB (shared) | + | - PrefixedStore per wf | + | - MVCC snapshots | + +----------------------------+ +``` + +## File Structure + +| File | Description | +|------|-------------| +| `main.go` | Entry point with `run`, `report`, `browse` subcommands | +| `workflow.go` | Temporal workflow definition chaining 5 activities | +| `activities.go` | Activity implementations with FS ops + failure injection | +| `content.go` | Template-based markdown content generators | +| `topics.go` | 120+ research topics with display names and slugs | +| `runner.go` | Scale runner — starts N workflows via Temporal SDK | +| `dashboard.go` | Live ANSI terminal dashboard (no external deps) | +| `report.go` | Post-run HTML report generator | +| `store.go` | Shared PebbleDB wrapper + manifest management | +| `run-demo.sh` | End-to-end demo script (build, server, run, report) | diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/activities.go b/chasm/lib/temporalzfs/examples/research-agent-demo/activities.go new file mode 100644 index 0000000000..b5f7953b4b --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/activities.go @@ -0,0 +1,334 @@ +package main + +import ( + "context" + "errors" + "fmt" + "math/rand" + "time" + + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + "github.com/temporalio/temporal-zfs/pkg/store" + "go.temporal.io/sdk/activity" +) + +// Activities holds the shared store and implements the 5 research agent activities. +// Each activity opens an isolated TemporalZFS partition, reads prior state from the +// previous step's snapshot (guaranteeing a consistent view even if a prior attempt +// left partial writes in HEAD), writes new files, and creates a CoW snapshot. +type Activities struct { + baseStore store.Store + stats *RunStats // shared stats for real-time dashboard updates + eventCh chan<- WorkflowEvent // per-activity events for the dashboard +} + +// emitEvent sends a dashboard event for the current activity step. +func (a *Activities) emitEvent(ctx context.Context, params WorkflowParams, stepIndex int, stepName, state string) { + if a.eventCh == nil { + return + } + select { + case a.eventCh <- WorkflowEvent{ + TopicSlug: params.TopicSlug, + StepIndex: stepIndex, + StepName: stepName, + State: state, + Attempt: int(activity.GetInfo(ctx).Attempt), + Timestamp: time.Now(), + }: + default: // don't block if channel is full + } +} + +// openFS opens an existing FS for the workflow's partition. +func (a *Activities) openFS(partitionID uint64) (*tzfs.FS, error) { + s := store.NewPrefixedStore(a.baseStore, partitionID) + f, err := tzfs.Open(s) + if err != nil { + return nil, fmt.Errorf("open fs: %w", err) + } + return f, nil +} + +// onRetry records a retry in shared stats and logs the recovery with prior state info. +func (a *Activities) onRetry(ctx context.Context, priorFiles int, priorSnapshot string) { + a.stats.Retries.Add(1) + activity.GetLogger(ctx).Info("Retrying with durable FS state intact", + "attempt", activity.GetInfo(ctx).Attempt, + "filesFromPriorStep", priorFiles, + "lastSnapshot", priorSnapshot, + ) +} + +// retries returns the number of retries for the current activity execution. +func retries(ctx context.Context) int { + info := activity.GetInfo(ctx) + if info.Attempt > 1 { + return int(info.Attempt) - 1 + } + return 0 +} + +// maybeFail injects a random failure based on the configured failure rate. +// It incorporates the attempt number so retries can succeed after earlier failures. +func maybeFail(ctx context.Context, seed int64, rate float64, msg string) error { + attempt := int64(activity.GetInfo(ctx).Attempt) + r := rand.New(rand.NewSource(seed + attempt*1000)) + if rate > 0 && r.Float64() < rate { + return errors.New(msg) + } + return nil +} + +// countFiles counts files in a directory (non-recursive). +func countFiles(f *tzfs.FS, dir string) int { + entries, err := f.ReadDir(dir) + if err != nil { + return 0 + } + count := 0 + for _, e := range entries { + if e.Type != tzfs.InodeTypeDir { + count++ + } + } + return count +} + +// WebResearch simulates gathering research sources: creates workspace dirs +// and writes 3-5 source files. Failure rate: 20% * multiplier. +func (a *Activities) WebResearch(ctx context.Context, params WorkflowParams) (StepResult, error) { + a.emitEvent(ctx, params, 0, "WebResearch", "started") + f, err := a.openFS(params.PartitionID) + if err != nil { + return StepResult{}, err + } + defer f.Close() + + // On retry: verify FS opened successfully (partition is durable). + if activity.GetInfo(ctx).Attempt > 1 { + a.emitEvent(ctx, params, 0, "WebResearch", "retrying") + a.onRetry(ctx, 0, "(none — first step)") + } + + // Inject failure AFTER opening FS — proves partition survives failures. + if err := maybeFail(ctx, params.Seed+1, 0.20*params.FailureRate, "simulated web API timeout"); err != nil { + return StepResult{}, err + } + + // Create workspace directories (idempotent — ignore ErrExist). + for _, dir := range []string{ + "/research", + "/research/" + params.TopicSlug, + "/research/" + params.TopicSlug + "/sources", + } { + if mkErr := f.Mkdir(dir, 0o755); mkErr != nil && !errors.Is(mkErr, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("mkdir %s: %w", dir, mkErr) + } + } + + // Generate and write source files. + sources := generateSources(params.TopicName, params.Seed) + var result StepResult + for _, src := range sources { + path := "/research/" + params.TopicSlug + "/sources/" + src.Filename + if err := f.WriteFile(path, src.Content, 0o644); err != nil { + return StepResult{}, fmt.Errorf("write %s: %w", path, err) + } + result.FilesCreated++ + result.BytesWritten += int64(len(src.Content)) + } + + // Snapshot after this step. + if _, err := f.CreateSnapshot("step-1-research"); err != nil && !errors.Is(err, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("snapshot: %w", err) + } + + result.Retries = retries(ctx) + a.emitEvent(ctx, params, 0, "WebResearch", "completed") + return result, nil +} + +// Summarize reads all source files and produces a summary. Failure rate: 15%. +func (a *Activities) Summarize(ctx context.Context, params WorkflowParams) (StepResult, error) { + a.emitEvent(ctx, params, 1, "Summarize", "started") + f, err := a.openFS(params.PartitionID) + if err != nil { + return StepResult{}, err + } + defer f.Close() + + // Open step-1 snapshot for reads — guaranteed consistent view even if a + // prior attempt left partial writes in HEAD. + snapFS, err := f.OpenSnapshot("step-1-research") + if err != nil { + return StepResult{}, fmt.Errorf("open snapshot step-1-research: %w", err) + } + defer snapFS.Close() + + // Read source filenames from snapshot — verifies step 1's files survived. + sourcesDir := "/research/" + params.TopicSlug + "/sources" + entries, err := snapFS.ReadDir(sourcesDir) + if err != nil { + return StepResult{}, fmt.Errorf("readdir %s: %w", sourcesDir, err) + } + + // On retry: step 1's source files are still here — read from snapshot, not HEAD. + if activity.GetInfo(ctx).Attempt > 1 { + a.emitEvent(ctx, params, 1, "Summarize", "retrying") + a.onRetry(ctx, len(entries), "step-1-research") + } + + // Inject failure AFTER verifying prior state. + if err := maybeFail(ctx, params.Seed+2, 0.15*params.FailureRate, "simulated LLM rate limit exceeded"); err != nil { + return StepResult{}, err + } + + sourceNames := make([]string, len(entries)) + for i, e := range entries { + sourceNames[i] = e.Name + } + + // Generate and write summary. + content := generateSummary(params.TopicName, sourceNames, params.Seed) + path := "/research/" + params.TopicSlug + "/summary.md" + if err := f.WriteFile(path, content, 0o644); err != nil { + return StepResult{}, fmt.Errorf("write summary: %w", err) + } + + if _, err := f.CreateSnapshot("step-2-summary"); err != nil && !errors.Is(err, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("snapshot: %w", err) + } + + a.emitEvent(ctx, params, 1, "Summarize", "completed") + return StepResult{FilesCreated: 1, BytesWritten: int64(len(content)), Retries: retries(ctx)}, nil +} + +// FactCheck reads the summary and produces a fact-check report. Failure rate: 10%. +func (a *Activities) FactCheck(ctx context.Context, params WorkflowParams) (StepResult, error) { + a.emitEvent(ctx, params, 2, "FactCheck", "started") + f, err := a.openFS(params.PartitionID) + if err != nil { + return StepResult{}, err + } + defer f.Close() + + // Open step-2 snapshot — read prior state from known-good point. + topicDir := "/research/" + params.TopicSlug + snapFS, err := f.OpenSnapshot("step-2-summary") + if err != nil { + return StepResult{}, fmt.Errorf("open snapshot step-2-summary: %w", err) + } + priorFiles := countFiles(snapFS, topicDir) + snapFS.Close() + + // On retry: summary + sources from prior steps verified via snapshot. + if activity.GetInfo(ctx).Attempt > 1 { + a.emitEvent(ctx, params, 2, "FactCheck", "retrying") + a.onRetry(ctx, priorFiles, "step-2-summary") + } + + // Inject failure AFTER verifying prior state. + if err := maybeFail(ctx, params.Seed+3, 0.10*params.FailureRate, "simulated fact-checking service unavailable"); err != nil { + return StepResult{}, err + } + + content := generateFactCheck(params.TopicName, params.Seed) + path := topicDir + "/fact-check.md" + if err := f.WriteFile(path, content, 0o644); err != nil { + return StepResult{}, fmt.Errorf("write fact-check: %w", err) + } + + if _, err := f.CreateSnapshot("step-3-factcheck"); err != nil && !errors.Is(err, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("snapshot: %w", err) + } + + a.emitEvent(ctx, params, 2, "FactCheck", "completed") + return StepResult{FilesCreated: 1, BytesWritten: int64(len(content)), Retries: retries(ctx)}, nil +} + +// FinalReport reads all artifacts and produces a final report. Failure rate: 10%. +func (a *Activities) FinalReport(ctx context.Context, params WorkflowParams) (StepResult, error) { + a.emitEvent(ctx, params, 3, "FinalReport", "started") + f, err := a.openFS(params.PartitionID) + if err != nil { + return StepResult{}, err + } + defer f.Close() + + // Open step-3 snapshot — read prior state from known-good point. + topicDir := "/research/" + params.TopicSlug + snapFS, err := f.OpenSnapshot("step-3-factcheck") + if err != nil { + return StepResult{}, fmt.Errorf("open snapshot step-3-factcheck: %w", err) + } + priorFiles := countFiles(snapFS, topicDir) + snapFS.Close() + + // On retry: sources + summary + fact-check verified via snapshot. + if activity.GetInfo(ctx).Attempt > 1 { + a.emitEvent(ctx, params, 3, "FinalReport", "retrying") + a.onRetry(ctx, priorFiles, "step-3-factcheck") + } + + // Inject failure AFTER verifying prior state. + if err := maybeFail(ctx, params.Seed+4, 0.10*params.FailureRate, "simulated context window exceeded"); err != nil { + return StepResult{}, err + } + + content := generateFinalReport(params.TopicName, params.Seed) + path := topicDir + "/report.md" + if err := f.WriteFile(path, content, 0o644); err != nil { + return StepResult{}, fmt.Errorf("write report: %w", err) + } + + if _, err := f.CreateSnapshot("step-4-report"); err != nil && !errors.Is(err, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("snapshot: %w", err) + } + + a.emitEvent(ctx, params, 3, "FinalReport", "completed") + return StepResult{FilesCreated: 1, BytesWritten: int64(len(content)), Retries: retries(ctx)}, nil +} + +// PeerReview reads the report and produces a peer review. Failure rate: 5%. +func (a *Activities) PeerReview(ctx context.Context, params WorkflowParams) (StepResult, error) { + a.emitEvent(ctx, params, 4, "PeerReview", "started") + f, err := a.openFS(params.PartitionID) + if err != nil { + return StepResult{}, err + } + defer f.Close() + + // Open step-4 snapshot — read prior state from known-good point. + topicDir := "/research/" + params.TopicSlug + snapFS, err := f.OpenSnapshot("step-4-report") + if err != nil { + return StepResult{}, fmt.Errorf("open snapshot step-4-report: %w", err) + } + priorFiles := countFiles(snapFS, topicDir) + snapFS.Close() + + // On retry: all artifacts from prior steps verified via snapshot. + if activity.GetInfo(ctx).Attempt > 1 { + a.emitEvent(ctx, params, 4, "PeerReview", "retrying") + a.onRetry(ctx, priorFiles, "step-4-report") + } + + // Inject failure AFTER verifying prior state. + if err := maybeFail(ctx, params.Seed+5, 0.05*params.FailureRate, "simulated reviewer model overloaded"); err != nil { + return StepResult{}, err + } + + content := generatePeerReview(params.TopicName, params.Seed) + path := topicDir + "/review.md" + if err := f.WriteFile(path, content, 0o644); err != nil { + return StepResult{}, fmt.Errorf("write review: %w", err) + } + + if _, err := f.CreateSnapshot("step-5-review"); err != nil && !errors.Is(err, tzfs.ErrExist) { + return StepResult{}, fmt.Errorf("snapshot: %w", err) + } + + a.emitEvent(ctx, params, 4, "PeerReview", "completed") + return StepResult{FilesCreated: 1, BytesWritten: int64(len(content)), Retries: retries(ctx)}, nil +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/content.go b/chasm/lib/temporalzfs/examples/research-agent-demo/content.go new file mode 100644 index 0000000000..04a0af638b --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/content.go @@ -0,0 +1,329 @@ +package main + +import ( + "fmt" + "math/rand" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// Source represents a generated research source document. +type Source struct { + Filename string + Content []byte +} + +// Pools of template fragments for realistic content generation. +var ( + authorLastNames = []string{ + "Chen", "Patel", "Smith", "Garcia", "Kim", "Johnson", "Williams", + "Mueller", "Nakamura", "Silva", "Brown", "Lee", "Anderson", "Taylor", + "Wang", "Martinez", "Thompson", "Yamamoto", "Petrov", "Okafor", + } + + authorFirstNames = []string{ + "A.", "B.", "C.", "D.", "E.", "F.", "G.", "H.", "I.", "J.", + "K.", "L.", "M.", "N.", "O.", "P.", "Q.", "R.", "S.", "T.", + } + + journalNames = []string{ + "Nature", "Science", "PNAS", "Physical Review Letters", + "IEEE Transactions", "ACM Computing Surveys", "The Lancet", + "Cell", "arXiv preprint", "Annual Review", + } + + keyPointPrefixes = []string{ + "Demonstrates that", "Proposes a novel framework for", + "Provides evidence suggesting", "Introduces a scalable approach to", + "Challenges the conventional view of", "Extends prior work on", + "Establishes a theoretical foundation for", "Presents experimental results on", + "Surveys recent advances in", "Identifies key limitations of", + } + + findingPrefixes = []string{ + "Recent advances in %s suggest", + "The intersection of %s and adjacent fields reveals", + "A growing body of evidence indicates that %s", + "Computational approaches to %s have shown", + "Cross-disciplinary analysis of %s demonstrates", + "Emerging trends in %s point toward", + "The theoretical foundations of %s are shifting due to", + "Practical applications of %s are increasingly driven by", + } + + verdicts = []string{"Confirmed", "Partially Confirmed", "Needs Context", "Unverified", "Confirmed"} + strengthAdjs = []string{"comprehensive", "rigorous", "innovative", "well-structured", "thorough"} + weaknessAdjs = []string{"limited", "narrow", "incomplete", "surface-level", "brief"} + reviewScores = []string{"7.0", "7.5", "8.0", "8.5", "9.0"} +) + +func generateSources(topic string, seed int64) []Source { + r := rand.New(rand.NewSource(seed)) + count := 3 + r.Intn(3) // 3-5 sources + sources := make([]Source, count) + baseYear := 2015 + r.Intn(5) + + for i := range count { + year := baseYear + i*2 + lastName := authorLastNames[r.Intn(len(authorLastNames))] + firstName := authorFirstNames[r.Intn(len(authorFirstNames))] + journal := journalNames[r.Intn(len(journalNames))] + + title := fmt.Sprintf("On the Foundations of %s: Perspective %d", topic, i+1) + slug := fmt.Sprintf("%s-%d", strings.ToLower(strings.ReplaceAll(lastName, " ", "-")), year) + + numPoints := 2 + r.Intn(3) + var points strings.Builder + for j := range numPoints { + prefix := keyPointPrefixes[r.Intn(len(keyPointPrefixes))] + points.WriteString(fmt.Sprintf("%d. %s %s in the context of modern research.\n", j+1, prefix, strings.ToLower(topic))) + } + + content := fmt.Sprintf(`# %s + +**Authors:** %s %s et al. +**Published:** %s (%d) +**DOI:** 10.1234/example.%d.%d + +## Abstract + +This paper examines recent developments in %s, with particular focus on +emerging methodologies and their implications for the field. Through a +combination of theoretical analysis and empirical evaluation, we present +findings that advance the current understanding of %s. + +## Key Points + +%s +## Citation Impact + +Cited by %d papers as of 2025. H-index contribution: %d. +`, title, firstName, lastName, journal, year, year, i+1, + strings.ToLower(topic), strings.ToLower(topic), + points.String(), 50+r.Intn(200), 5+r.Intn(15)) + + sources[i] = Source{ + Filename: slug + ".md", + Content: []byte(content), + } + } + return sources +} + +func generateSummary(topic string, sourceNames []string, seed int64) []byte { + r := rand.New(rand.NewSource(seed + 100)) + + var sourceList strings.Builder + for _, name := range sourceNames { + sourceList.WriteString(fmt.Sprintf("- %s\n", name)) + } + + numFindings := 3 + r.Intn(3) + var findings strings.Builder + for i := range numFindings { + prefix := findingPrefixes[r.Intn(len(findingPrefixes))] + findings.WriteString(fmt.Sprintf("%d. %s new possibilities for practical application.\n", + i+1, fmt.Sprintf(prefix, strings.ToLower(topic)))) + } + + return []byte(fmt.Sprintf(`# Research Summary — %s + +## Sources Analyzed + +%s +## Key Findings + +%s +## Cross-Cutting Themes + +1. **Scalability Challenges**: Multiple sources highlight the difficulty of scaling + current approaches to %s beyond laboratory conditions. +2. **Interdisciplinary Convergence**: The field is increasingly drawing from adjacent + disciplines, creating new hybrid methodologies. +3. **Data Requirements**: All reviewed approaches require significant high-quality + data, raising questions about accessibility and bias. + +## Open Questions + +- How will regulatory frameworks adapt to advances in %s? +- What are the long-term societal implications of widespread adoption? +- Can current theoretical models account for edge cases observed in practice? +- What role will open-source tools play in democratizing access? +`, topic, sourceList.String(), findings.String(), + strings.ToLower(topic), strings.ToLower(topic))) +} + +func generateFactCheck(topic string, seed int64) []byte { + r := rand.New(rand.NewSource(seed + 200)) + + numClaims := 5 + r.Intn(4) + var rows strings.Builder + for i := range numClaims { + verdict := verdicts[r.Intn(len(verdicts))] + rows.WriteString(fmt.Sprintf("| %s has shown %d%% improvement in key metrics | Source %d | %s | Based on %d-year longitudinal data |\n", + topic, 10+r.Intn(80), r.Intn(5)+1, verdict, 1+r.Intn(10))) + _ = i + } + + return []byte(fmt.Sprintf(`# Fact Check — %s + +## Verification Methodology + +Each claim from the research summary was cross-referenced against the original +source material and, where possible, validated against independent datasets +and peer-reviewed meta-analyses. + +## Results + +| Claim | Source | Verdict | Notes | +|-------|--------|---------|-------| +%s +## Summary + +- **Confirmed**: %d claims fully supported by evidence +- **Partially Confirmed**: %d claims with caveats or limited scope +- **Needs Context**: %d claims require additional qualification +- **Unverified**: %d claims could not be independently verified + +Overall confidence level: **%.1f/10** +`, topic, rows.String(), + 2+r.Intn(3), 1+r.Intn(2), r.Intn(2), r.Intn(2), + 7.0+float64(r.Intn(20))/10.0)) +} + +func generateFinalReport(topic string, seed int64) []byte { + r := rand.New(rand.NewSource(seed + 300)) + + numRecs := 3 + r.Intn(3) + var recs strings.Builder + recPrefixes := []string{ + "Invest in", "Monitor developments in", "Establish partnerships for", + "Develop internal capabilities in", "Commission further research on", + "Create a task force to evaluate", "Begin pilot programs for", + } + for i := range numRecs { + prefix := recPrefixes[r.Intn(len(recPrefixes))] + recs.WriteString(fmt.Sprintf("%d. %s %s to maintain competitive advantage.\n", i+1, prefix, strings.ToLower(topic))) + } + + return []byte(fmt.Sprintf(`# Final Report — %s + +## Executive Summary + +This report synthesizes findings from %d primary sources, cross-referenced +through independent fact-checking, to provide actionable intelligence on +the current state and future trajectory of %s. + +The field is at a critical inflection point. Recent breakthroughs have +shortened the timeline for practical applications from decades to years, +while simultaneously raising important questions about governance, +accessibility, and unintended consequences. + +## Methodology + +1. **Source Collection**: Gathered %d peer-reviewed papers and preprints (2015-2025) +2. **Synthesis**: Identified cross-cutting themes and convergent findings +3. **Fact-Checking**: Independently verified %d%% of quantitative claims +4. **Peer Review**: Internal review by domain experts + +## Detailed Findings + +### Current State of the Art + +The leading approaches to %s have evolved significantly over the past five +years. Key advances include improved scalability, reduced computational +requirements, and novel theoretical frameworks that unify previously +disparate research threads. + +### Emerging Trends + +Three trends are reshaping the landscape: +- **Democratization**: Open-source tooling is lowering barriers to entry +- **Convergence**: Cross-disciplinary approaches are yielding outsized results +- **Regulation**: Governments are beginning to establish frameworks for responsible development + +### Risk Assessment + +| Risk | Probability | Impact | Mitigation | +|------|------------|--------|------------| +| Technical plateau | Medium | High | Diversify research portfolio | +| Regulatory barriers | Medium | Medium | Engage with policymakers early | +| Talent shortage | High | High | Invest in training programs | +| Ethical concerns | Medium | High | Establish ethics review board | + +## Recommendations + +%s +## Conclusion + +%s represents a significant opportunity. Organizations that invest now +in building capabilities, forming strategic partnerships, and engaging +with the broader ecosystem will be best positioned to capture value as +the field matures. + +--- +*Report generated by AI Research Agent • Powered by TemporalZFS* +`, topic, + 3+r.Intn(3), strings.ToLower(topic), + 3+r.Intn(3), 70+r.Intn(25), + strings.ToLower(topic), + recs.String(), topic)) +} + +func generatePeerReview(topic string, seed int64) []byte { + r := rand.New(rand.NewSource(seed + 400)) + + strengthAdj := strengthAdjs[r.Intn(len(strengthAdjs))] + weaknessAdj := weaknessAdjs[r.Intn(len(weaknessAdjs))] + score := reviewScores[r.Intn(len(reviewScores))] + + titleCase := cases.Title(language.English) + return []byte(fmt.Sprintf(`# Peer Review — %s + +## Reviewer Assessment + +### Strengths + +1. **%s coverage** of the source material, drawing from multiple + high-impact publications spanning the last decade. +2. The fact-checking methodology adds credibility and transparency + to the research process. +3. Clear progression from data gathering through analysis to + actionable recommendations. +4. Risk assessment matrix provides practical decision-making support. + +### Weaknesses + +1. **%s treatment** of some counterarguments and alternative + viewpoints in the field. +2. Some claims in the summary could benefit from more specific + quantitative backing. +3. The recommendation section could be more specific about + implementation timelines and resource requirements. + +### Missing Coverage + +- Industry perspective and commercial applications +- Comparison with competing approaches outside the primary literature +- Long-term (10+ year) trend analysis +- Geographic and cultural variations in adoption + +### Suggestions for Improvement + +1. Include a dedicated section on limitations and potential biases +2. Add a glossary of technical terms for non-specialist readers +3. Provide more granular confidence intervals for key claims +4. Consider adding case studies from early adopters + +## Overall Score: %s/10 + +The report provides a solid foundation for understanding %s. +With the suggested improvements, it would serve as a comprehensive +reference for both technical and strategic decision-makers. + +--- +*Peer review conducted by AI Review Agent • Powered by TemporalZFS* +`, topic, titleCase.String(strengthAdj), titleCase.String(weaknessAdj), score, strings.ToLower(topic))) +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/dashboard.go b/chasm/lib/temporalzfs/examples/research-agent-demo/dashboard.go new file mode 100644 index 0000000000..71cfef5b50 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/dashboard.go @@ -0,0 +1,277 @@ +package main + +import ( + "fmt" + "os" + "strings" + "sync" + "time" +) + +const ( + maxFeedLines = 24 + refreshRate = 200 * time.Millisecond + boxInnerWidth = 66 + + colorReset = "\033[0m" + colorGreen = "\033[32m" + colorYellow = "\033[33m" + colorRed = "\033[31m" + colorCyan = "\033[36m" + colorBold = "\033[1m" + colorDim = "\033[2m" + cursorHome = "\033[H" + clearScreen = "\033[2J" +) + +// visibleLen returns the display width of a string, ignoring ANSI escape sequences. +func visibleLen(s string) int { + inEsc := false + n := 0 + for _, r := range s { + if r == '\033' { + inEsc = true + continue + } + if inEsc { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEsc = false + } + continue + } + n++ + } + return n +} + +// boxLine wraps content in ║...║, auto-padding to boxInnerWidth visible chars. +func boxLine(content string) string { + pad := boxInnerWidth - visibleLen(content) + if pad < 0 { + pad = 0 + } + return "║" + content + strings.Repeat(" ", pad) + "║\n" +} + +// FeedEntry is a single line in the live activity feed. +type FeedEntry struct { + TopicSlug string + StepName string + State string // "done", "running", "retry", "failed" + StepIdx string // "1/5", "2/5", etc. + Duration string +} + +// Dashboard renders a live terminal dashboard. +type Dashboard struct { + runner *Runner + startTime time.Time + total int + + mu sync.Mutex + feed []FeedEntry + done chan struct{} +} + +// NewDashboard creates a dashboard that reads events from the runner. +func NewDashboard(runner *Runner, total int) *Dashboard { + return &Dashboard{ + runner: runner, + total: total, + startTime: time.Now(), + feed: make([]FeedEntry, 0, maxFeedLines), + done: make(chan struct{}), + } +} + +// Start begins consuming events and rendering. Call Stop() to end. +func (d *Dashboard) Start() { + // Event consumer goroutine. + go func() { + for ev := range d.runner.EventCh { + d.mu.Lock() + entry := FeedEntry{ + TopicSlug: ev.TopicSlug, + StepName: ev.StepName, + StepIdx: fmt.Sprintf("%d/5", ev.StepIndex+1), + } + switch ev.State { + case "completed": + entry.State = "done" + case "started": + entry.State = "running" + entry.StepName = "WebResearch" + entry.StepIdx = "1/5" + case "retrying": + entry.State = "retry" + default: + entry.State = ev.State + } + d.feed = append(d.feed, entry) + if len(d.feed) > maxFeedLines { + d.feed = d.feed[len(d.feed)-maxFeedLines:] + } + d.mu.Unlock() + } + close(d.done) + }() + + // Render loop goroutine. + go func() { + fmt.Fprint(os.Stdout, clearScreen) + ticker := time.NewTicker(refreshRate) + defer ticker.Stop() + for { + select { + case <-ticker.C: + d.render() + case <-d.done: + d.render() // final render + return + } + } + }() +} + +// Wait blocks until the dashboard is done rendering. +func (d *Dashboard) Wait() { + <-d.done +} + +func (d *Dashboard) render() { + border := strings.Repeat("═", boxInnerWidth) + elapsed := time.Since(d.startTime).Round(time.Second) + started := int(d.runner.stats.Started.Load()) + completed := int(d.runner.stats.Completed.Load()) + failed := int(d.runner.stats.Failed.Load()) + running := started - completed - failed + files := d.runner.stats.FilesCreated.Load() + bytes := d.runner.stats.BytesWritten.Load() + snapshots := d.runner.stats.Snapshots.Load() + retries := d.runner.stats.Retries.Load() + + // Progress bar (30 chars wide to fit in box). + barWidth := 30 + var bar, progressLabel string + if d.total > 0 { + pct := completed * 100 / d.total + filled := barWidth * completed / d.total + bar = strings.Repeat("█", filled) + strings.Repeat("░", barWidth-filled) + progressLabel = fmt.Sprintf("%d/%d %d%%", completed, d.total, pct) + } else { + // Continuous mode — animate a cycling bar. + pos := int(time.Since(d.startTime).Seconds()*4) % barWidth + chars := make([]byte, barWidth) + for i := range chars { + chars[i] = '-' + } + for i := range 4 { + chars[(pos+i)%barWidth] = '=' + } + bar = string(chars) + progressLabel = fmt.Sprintf("%d completed", completed) + } + + // Throughput. + elapsedMin := elapsed.Seconds() / 60.0 + wfPerMin := 0.0 + if elapsedMin > 0.1 { + wfPerMin = float64(completed) / elapsedMin + } + + var b strings.Builder + b.WriteString(cursorHome) + + // Header. + fmt.Fprintf(&b, "%s╔%s╗%s\n", colorBold, border, colorReset) + b.WriteString(boxLine(fmt.Sprintf(" %sTemporalZFS Research Agent Demo%s Elapsed: %5s", colorBold, colorReset, elapsed))) + fmt.Fprintf(&b, "%s╠%s╣%s\n", colorBold, border, colorReset) + b.WriteString(boxLine("")) + + // Progress bar. + b.WriteString(boxLine(fmt.Sprintf(" Progress [%s%s%s] %s%s%s", + colorCyan, bar, colorReset, + colorBold, progressLabel, colorReset))) + b.WriteString(boxLine("")) + + // Status counts. + b.WriteString(boxLine(fmt.Sprintf(" %sRunning: %-4d%s %sCompleted: %-4d%s %sRetries: %-4d%s %sFailed: %d%s", + colorYellow, running, colorReset, + colorGreen, completed, colorReset, + colorRed, retries, colorReset, + colorRed, failed, colorReset))) + b.WriteString(boxLine("")) + + // Throughput section. + b.WriteString(boxLine(fmt.Sprintf(" %s── Throughput ──────────────────────────────────────────────────%s", colorDim, colorReset))) + b.WriteString(boxLine(fmt.Sprintf(" Workflows/min: %s%-6.0f%s Files: %s%-6d%s Snapshots: %s%-6d%s", + colorCyan, wfPerMin, colorReset, + colorCyan, files, colorReset, + colorCyan, snapshots, colorReset))) + b.WriteString(boxLine(fmt.Sprintf(" Data written: %s%-10s%s Total retries: %s%-6d%s", + colorCyan, humanBytes(bytes), colorReset, + colorCyan, retries, colorReset))) + b.WriteString(boxLine("")) + + // Live activity feed. + b.WriteString(boxLine(fmt.Sprintf(" %s── Live Activity Feed ──────────────────────────────────────────%s", colorDim, colorReset))) + + d.mu.Lock() + feed := make([]FeedEntry, len(d.feed)) + copy(feed, d.feed) + d.mu.Unlock() + + for i := range maxFeedLines { + if i < len(feed) { + e := feed[i] + icon, color := stateIcon(e.State) + slug := truncate(e.TopicSlug, 24) + step := truncate(e.StepName, 14) + b.WriteString(boxLine(fmt.Sprintf(" %s%s %-24s %-14s %-7s %s%s", + color, icon, slug, step, e.State, e.StepIdx, colorReset))) + } else { + b.WriteString(boxLine("")) + } + } + + b.WriteString(boxLine("")) + b.WriteString(boxLine(fmt.Sprintf(" Temporal UI: %shttp://localhost:8233%s", colorCyan, colorReset))) + fmt.Fprintf(&b, "%s╚%s╝%s\n", colorBold, border, colorReset) + + fmt.Fprint(os.Stdout, b.String()) +} + +func stateIcon(state string) (string, string) { + switch state { + case "done": + return "✓", colorGreen + case "running": + return "→", colorYellow + case "retry": + return "↻", colorRed + case "failed": + return "✗", colorRed + default: + return "·", colorDim + } +} + +func truncate(s string, max int) string { + if len(s) <= max { + return s + } + return s[:max-1] + "…" +} + +func humanBytes(b int64) string { + switch { + case b >= 1<<30: + return fmt.Sprintf("%.1f GB", float64(b)/float64(1<<30)) + case b >= 1<<20: + return fmt.Sprintf("%.1f MB", float64(b)/float64(1<<20)) + case b >= 1<<10: + return fmt.Sprintf("%.1f KB", float64(b)/float64(1<<10)) + default: + return fmt.Sprintf("%d B", b) + } +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/main.go b/chasm/lib/temporalzfs/examples/research-agent-demo/main.go new file mode 100644 index 0000000000..e7f1ce3e7d --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/main.go @@ -0,0 +1,255 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "log/slog" + "os" + "os/exec" + "os/signal" + "path/filepath" + "runtime" + "syscall" + "time" + + sdkclient "go.temporal.io/sdk/client" + sdklog "go.temporal.io/sdk/log" + "go.temporal.io/sdk/worker" +) + +func main() { + if len(os.Args) < 2 { + printUsage() + os.Exit(1) + } + + switch os.Args[1] { + case "run": + cmdRun(os.Args[2:]) + case "report": + cmdReport(os.Args[2:]) + case "browse": + cmdBrowse(os.Args[2:]) + default: + printUsage() + os.Exit(1) + } +} + +func printUsage() { + fmt.Fprintf(os.Stderr, `TemporalZFS Research Agent Demo + +Usage: + research-agent-demo [flags] + +Commands: + run Run the demo (start workflows, show live dashboard) + report Generate HTML report from completed run + browse Browse a workflow's filesystem + +Run 'research-agent-demo -h' for command-specific help. +`) +} + +func cmdRun(args []string) { + fs := flag.NewFlagSet("run", flag.ExitOnError) + workflows := fs.Int("workflows", 200, "Number of research workflows to run (ignored in continuous mode)") + concurrency := fs.Int("concurrency", 50, "Max concurrent workflows") + failureRate := fs.Float64("failure-rate", 1.0, "Failure rate multiplier (0=none, 2=double)") + dataDir := fs.String("data-dir", "/tmp/tzfs-demo", "PebbleDB data directory") + seed := fs.Int64("seed", 0, "Random seed (0=random)") + taskQueue := fs.String("task-queue", "", "Temporal task queue name (default: research-demo-)") + temporalAddr := fs.String("temporal-addr", "localhost:7233", "Temporal server address") + noDashboard := fs.Bool("no-dashboard", false, "Disable live dashboard") + continuous := fs.Bool("continuous", false, "Run continuously until Ctrl+C, then generate report") + reportOutput := fs.String("report", "", "Auto-generate HTML report on completion (path)") + _ = fs.Parse(args) + + // Set up context with signal handling. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + fmt.Println("\nShutting down gracefully... (waiting for in-flight workflows)") + cancel() + }() + + // Redirect all logs to a file so the dashboard isn't buried. + if err := os.MkdirAll(*dataDir, 0o755); err != nil { + log.Fatalf("Failed to create data dir: %v", err) + } + logPath := filepath.Join(*dataDir, "demo.log") + logFile, err := os.Create(logPath) + if err != nil { + log.Fatalf("Failed to create log file: %v", err) + } + defer logFile.Close() + log.SetOutput(logFile) + sdkLogger := sdklog.NewStructuredLogger(slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{ + Level: slog.LevelWarn, + }))) + + // Open shared PebbleDB. + store, err := NewDemoStore(*dataDir) + if err != nil { + log.Fatalf("Failed to open store: %v", err) + } + defer func() { _ = store.Close() }() + + // Connect to Temporal. + c, err := sdkclient.Dial(sdkclient.Options{ + HostPort: *temporalAddr, + Logger: sdkLogger, + }) + if err != nil { + log.Fatalf("Failed to connect to Temporal: %v", err) + } + defer c.Close() + + // Use a unique task queue per run to avoid stale activity task interference + // from previous runs on the same Temporal server. + if *taskQueue == "" { + *taskQueue = fmt.Sprintf("research-demo-%d", time.Now().UnixMilli()) + } + + // Create runner first so activities can share its stats. + runner := NewRunner(c, store, RunConfig{ + Workflows: *workflows, + Concurrency: *concurrency, + FailureRate: *failureRate, + Seed: *seed, + TaskQueue: *taskQueue, + Continuous: *continuous, + }) + + // Start worker with shared stats for real-time retry tracking. + activities := &Activities{baseStore: store.Base(), stats: &runner.stats, eventCh: runner.EventCh} + w := worker.New(c, *taskQueue, worker.Options{ + MaxConcurrentActivityExecutionSize: *concurrency, + }) + w.RegisterWorkflow(ResearchWorkflow) + w.RegisterActivity(activities) + if err := w.Start(); err != nil { + log.Fatalf("Failed to start worker: %v", err) + } + defer w.Stop() + + // Dashboard total: 0 means continuous (dashboard shows "∞"). + dashTotal := *workflows + if *continuous { + dashTotal = 0 + } + + // Start dashboard or drain events to prevent channel blocking. + if !*noDashboard { + dash := NewDashboard(runner, dashTotal) + dash.Start() + defer dash.Wait() + } else { + go func() { + for range runner.EventCh { + } + }() + } + + if *continuous { + fmt.Printf("Running continuously (concurrency=%d, failure-rate=%.1f) — press Ctrl+C to stop\n", + *concurrency, *failureRate) + } else { + fmt.Printf("Starting %d research workflows (concurrency=%d, failure-rate=%.1f)\n", + *workflows, *concurrency, *failureRate) + } + fmt.Printf("Temporal UI: http://localhost:8233\n") + fmt.Printf("Logs: %s\n\n", logPath) + + // Open Temporal UI in browser for continuous mode. + if *continuous { + openBrowser("http://localhost:8233") + } + + // Run workflows. + if err := runner.Run(ctx); err != nil { + log.Printf("Runner error: %v", err) + } + + // Print final summary. + fmt.Printf("\n\n%s=== Demo Complete ===%s\n", colorBold, colorReset) + fmt.Printf("Workflows: %d completed, %d failed\n", + runner.stats.Completed.Load(), runner.stats.Failed.Load()) + fmt.Printf("Files: %d created (%s)\n", + runner.stats.FilesCreated.Load(), humanBytes(runner.stats.BytesWritten.Load())) + fmt.Printf("Snapshots: %d\n", runner.stats.Snapshots.Load()) + fmt.Printf("Retries: %d\n", runner.stats.Retries.Load()) + + // Auto-generate report if requested or in continuous mode. + reportPath := *reportOutput + if reportPath == "" && *continuous { + reportPath = filepath.Join(*dataDir, "report.html") + } + if reportPath != "" { + fmt.Printf("\nGenerating report...\n") + if err := generateHTMLReport(store, reportPath); err != nil { + log.Printf("Failed to generate report: %v", err) + } else { + fmt.Printf("Report generated: %s\n", reportPath) + openBrowser(reportPath) + } + } else { + fmt.Printf("\nGenerate report: go run . report --data-dir %s\n", *dataDir) + } +} + +// openBrowser opens a URL or file in the default browser. +func openBrowser(url string) { + var cmd *exec.Cmd + switch runtime.GOOS { + case "darwin": + cmd = exec.Command("open", url) + case "linux": + cmd = exec.Command("xdg-open", url) + default: + return + } + _ = cmd.Start() +} + +func cmdReport(args []string) { + fs := flag.NewFlagSet("report", flag.ExitOnError) + dataDir := fs.String("data-dir", "/tmp/tzfs-demo", "PebbleDB data directory") + output := fs.String("output", "demo-report.html", "Output HTML file") + _ = fs.Parse(args) + + store, err := NewDemoStoreReadOnly(*dataDir) + if err != nil { + log.Fatalf("Failed to open store: %v", err) + } + defer func() { _ = store.Close() }() + + if err := generateHTMLReport(store, *output); err != nil { + log.Fatalf("Failed to generate report: %v", err) + } + fmt.Printf("Report generated: %s\n", *output) +} + +func cmdBrowse(args []string) { + fs := flag.NewFlagSet("browse", flag.ExitOnError) + dataDir := fs.String("data-dir", "/tmp/tzfs-demo", "PebbleDB data directory") + topic := fs.String("topic", "", "Topic slug to browse (required)") + _ = fs.Parse(args) + + if *topic == "" { + log.Fatal("--topic is required") + } + + store, err := NewDemoStoreReadOnly(*dataDir) + if err != nil { + log.Fatalf("Failed to open store: %v", err) + } + defer func() { _ = store.Close() }() + + browseWorkflow(store, *topic) +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/report.go b/chasm/lib/temporalzfs/examples/research-agent-demo/report.go new file mode 100644 index 0000000000..cee5393895 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/report.go @@ -0,0 +1,356 @@ +package main + +import ( + "fmt" + "html/template" + "os" + "sort" + "strings" + "time" + + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + "github.com/temporalio/temporal-zfs/pkg/store" +) + +// ReportData is the top-level data structure for the HTML report template. +type ReportData struct { + GeneratedAt string + TotalWFs int + TotalFiles int + TotalSnaps int + TotalBytes string + TotalRetries int + Workflows []ReportWorkflow +} + +// ReportWorkflow describes one workflow's filesystem state for the report. +type ReportWorkflow struct { + TopicName string + TopicSlug string + Files []ReportFile + Snapshots []ReportSnapshot + FileCount int + TotalBytes int64 + Retries int + Status string // "completed", "failed" +} + +// ReportFile represents a file in a workflow's filesystem. +type ReportFile struct { + Path string + Size int64 + Content string +} + +// ReportSnapshot represents a snapshot. +type ReportSnapshot struct { + Name string + Files []string +} + +func generateHTMLReport(ds *DemoStore, outputPath string) error { + manifest, err := ds.LoadManifest() + if err != nil { + return fmt.Errorf("load manifest: %w", err) + } + + var data ReportData + data.GeneratedAt = time.Now().Format(time.RFC3339) + + for _, entry := range manifest { + s := store.NewPrefixedStore(ds.Base(), entry.PartitionID) + f, err := tzfs.Open(s) + if err != nil { + continue // skip broken partitions + } + + status := "completed" + if entry.Failed { + status = "failed" + } + wf := ReportWorkflow{ + TopicName: entry.TopicName, + TopicSlug: entry.TopicSlug, + Retries: entry.Retries, + Status: status, + } + + // Collect files. + wf.Files = collectFiles(f, "/research/"+entry.TopicSlug) + wf.FileCount = len(wf.Files) + for _, file := range wf.Files { + wf.TotalBytes += file.Size + } + + // Collect snapshots. + snapshots, err := f.ListSnapshots() + if err == nil { + for _, snap := range snapshots { + rs := ReportSnapshot{Name: snap.Name} + snapFS, err := f.OpenSnapshot(snap.Name) + if err == nil { + rs.Files = collectFilePaths(snapFS, "/research/"+entry.TopicSlug) + _ = snapFS.Close() + } + wf.Snapshots = append(wf.Snapshots, rs) + } + } + + data.Workflows = append(data.Workflows, wf) + data.TotalFiles += wf.FileCount + data.TotalSnaps += len(wf.Snapshots) + data.TotalRetries += wf.Retries + data.TotalBytes = humanBytes(int64(totalBytesAll(data.Workflows))) + + _ = f.Close() + } + + data.TotalWFs = len(data.Workflows) + data.TotalBytes = humanBytes(int64(totalBytesAll(data.Workflows))) + + // Sort by topic name. + sort.Slice(data.Workflows, func(i, j int) bool { + return data.Workflows[i].TopicName < data.Workflows[j].TopicName + }) + + return writeHTMLReport(data, outputPath) +} + +func totalBytesAll(wfs []ReportWorkflow) int64 { + var total int64 + for _, wf := range wfs { + total += wf.TotalBytes + } + return total +} + +func collectFiles(f *tzfs.FS, dir string) []ReportFile { + var files []ReportFile + entries, err := f.ReadDir(dir) + if err != nil { + return files + } + for _, e := range entries { + path := dir + "/" + e.Name + if e.Type == tzfs.InodeTypeDir { + files = append(files, collectFiles(f, path)...) + } else { + data, err := f.ReadFile(path) + if err != nil { + continue + } + content := string(data) + if len(content) > 2000 { + content = content[:2000] + "\n... (truncated)" + } + files = append(files, ReportFile{ + Path: path, + Size: int64(len(data)), + Content: content, + }) + } + } + return files +} + +func collectFilePaths(f *tzfs.FS, dir string) []string { + var paths []string + entries, err := f.ReadDir(dir) + if err != nil { + return paths + } + for _, e := range entries { + path := dir + "/" + e.Name + if e.Type == tzfs.InodeTypeDir { + paths = append(paths, collectFilePaths(f, path)...) + } else { + paths = append(paths, path) + } + } + return paths +} + +func writeHTMLReport(data ReportData, outputPath string) error { + f, err := os.Create(outputPath) + if err != nil { + return err + } + defer f.Close() + return reportTemplate.Execute(f, data) +} + +var reportTemplate = template.Must(template.New("report").Parse(` + + + +TemporalZFS Demo Report + + + +

TemporalZFS Research Agent Demo

+

Generated {{.GeneratedAt}}

+ +
+
{{.TotalWFs}}
Workflows
+
{{.TotalFiles}}
Files Created
+
{{.TotalSnaps}}
Snapshots
+
{{.TotalRetries}}
Retries Survived
+
{{.TotalBytes}}
Data Written
+
+ +

Workflow Summary

+ + + + {{range .Workflows}} + + + + + + + + + {{end}} + +
TopicFilesSizeSnapshotsRetriesStatus
{{.TopicName}}{{.FileCount}}{{.TotalBytes}} B{{len .Snapshots}} snapshots{{if .Retries}}{{.Retries}} retries{{else}}0{{end}}{{if eq .Status "failed"}}failed{{else}}completed{{end}}
+ +

Filesystem Explorer

+{{range .Workflows}} +
+ {{.TopicName}} {{.FileCount}} files +
+ {{range .Snapshots}} +
+
📸 {{.Name}}
+
    + {{range .Files}}
  • 📄 {{.}}
  • {{end}} +
+
+ {{end}} +

Files (Final State)

+ {{range .Files}} +
+ 📄 {{.Path}} ({{.Size}} B) +
{{.Content}}
+
+ {{end}} +
+
+{{end}} + +
Powered by TemporalZFS — Durable Filesystem for AI Agent Workflows
+ +`)) + +// browseWorkflow prints the directory tree of a specific workflow's filesystem. +func browseWorkflow(ds *DemoStore, topicSlug string) { + manifest, err := ds.LoadManifest() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to load manifest: %v\n", err) + os.Exit(1) + } + + var entry *ManifestEntry + for i := range manifest { + if manifest[i].TopicSlug == topicSlug { + entry = &manifest[i] + break + } + } + if entry == nil { + fmt.Fprintf(os.Stderr, "Topic %q not found. Available topics:\n", topicSlug) + for _, m := range manifest { + fmt.Fprintf(os.Stderr, " %s\n", m.TopicSlug) + } + os.Exit(1) + } + + s := store.NewPrefixedStore(ds.Base(), entry.PartitionID) + f, err := tzfs.Open(s) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to open filesystem: %v\n", err) + os.Exit(1) + } + defer func() { _ = f.Close() }() + + fmt.Printf("%s%s=== %s ===%s\n\n", colorBold, colorCyan, entry.TopicName, colorReset) + + // Print directory tree. + printTree(f, "/", "") + + // Print snapshots. + snapshots, err := f.ListSnapshots() + if err == nil && len(snapshots) > 0 { + fmt.Printf("\n%sSnapshots:%s\n", colorBold, colorReset) + for _, snap := range snapshots { + fmt.Printf(" %s📸 %s%s\n", colorGreen, snap.Name, colorReset) + } + } +} + +func printTree(f *tzfs.FS, dir string, indent string) { + entries, err := f.ReadDir(dir) + if err != nil { + return + } + + for i, e := range entries { + isLast := i == len(entries)-1 + connector := "├── " + if isLast { + connector = "└── " + } + + if e.Type == tzfs.InodeTypeDir { + fmt.Printf("%s%s%s📁 %s%s\n", indent, connector, colorYellow, e.Name, colorReset) + childIndent := indent + "│ " + if isLast { + childIndent = indent + " " + } + subdir := dir + if !strings.HasSuffix(dir, "/") { + subdir += "/" + } + printTree(f, subdir+e.Name, childIndent) + } else { + info, _ := f.Stat(dir + "/" + e.Name) + size := "" + if info != nil { + size = fmt.Sprintf(" (%s)", humanBytes(int64(info.Size))) + } + fmt.Printf("%s%s📄 %s%s%s\n", indent, connector, e.Name, colorDim+size, colorReset) + } + } +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/run-demo.sh b/chasm/lib/temporalzfs/examples/research-agent-demo/run-demo.sh new file mode 100755 index 0000000000..cbb09f4d13 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/run-demo.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash +set -euo pipefail + +# TemporalZFS Research Agent Demo — end-to-end runner +# Usage: ./run-demo.sh [--workflows N] [--concurrency N] [--failure-rate F] [--seed S] + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +WORKFLOWS=200 +CONCURRENCY=50 +FAILURE_RATE=1.0 +SEED=12345 +DATA_DIR="/tmp/tzfs-demo" +TEMPORAL_ADDR="localhost:7233" +TEMPORAL_PID="" +CONTINUOUS="" + +# Parse flags. +while [[ $# -gt 0 ]]; do + case $1 in + --workflows) WORKFLOWS="$2"; shift 2 ;; + --concurrency) CONCURRENCY="$2"; shift 2 ;; + --failure-rate) FAILURE_RATE="$2"; shift 2 ;; + --seed) SEED="$2"; shift 2 ;; + --data-dir) DATA_DIR="$2"; shift 2 ;; + --continuous) CONTINUOUS="true"; shift ;; + -h|--help) + echo "Usage: $0 [--workflows N] [--concurrency N] [--failure-rate F] [--seed S] [--data-dir DIR] [--continuous]" + exit 0 + ;; + *) echo "Unknown flag: $1"; exit 1 ;; + esac +done + +DEMO_BIN="/tmp/research-demo-$$" +REPORT_HTML="${SCRIPT_DIR}/report.html" + +cleanup() { + echo "" + echo "Cleaning up..." + if [[ -n "$TEMPORAL_PID" ]] && kill -0 "$TEMPORAL_PID" 2>/dev/null; then + kill "$TEMPORAL_PID" 2>/dev/null || true + wait "$TEMPORAL_PID" 2>/dev/null || true + echo " Temporal dev server stopped." + fi + rm -f "$DEMO_BIN" + echo "Done." +} +trap cleanup EXIT + +# Colors. +BOLD="\033[1m" +CYAN="\033[36m" +GREEN="\033[32m" +YELLOW="\033[33m" +DIM="\033[2m" +RESET="\033[0m" + +step() { + echo "" + echo -e "${BOLD}${CYAN}═══ $1 ═══${RESET}" + echo "" +} + +# ───────────────────────────────────────────────────────────── +step "Step 1: Build the demo" + +echo " Building from ${SCRIPT_DIR}..." +(cd "$SCRIPT_DIR" && go build -o "$DEMO_BIN" .) +echo -e " ${GREEN}Build successful.${RESET}" + +# ───────────────────────────────────────────────────────────── +step "Step 2: Start Temporal dev server" + +if temporal workflow list --address "$TEMPORAL_ADDR" >/dev/null 2>&1; then + echo -e " ${YELLOW}Temporal server already running at ${TEMPORAL_ADDR}.${RESET}" +else + echo " Starting temporal server start-dev..." + temporal server start-dev --port 7233 --ui-port 8233 2>/dev/null & + TEMPORAL_PID=$! + # Wait for server to be ready. + for _ in $(seq 1 30); do + if temporal workflow list --address "$TEMPORAL_ADDR" >/dev/null 2>&1; then + break + fi + sleep 1 + done + if ! temporal workflow list --address "$TEMPORAL_ADDR" >/dev/null 2>&1; then + echo " ERROR: Temporal server failed to start after 30 seconds." + exit 1 + fi + echo -e " ${GREEN}Temporal server ready.${RESET}" +fi +echo -e " Temporal UI: ${CYAN}http://localhost:8233${RESET}" + +# ───────────────────────────────────────────────────────────── +if [[ -n "$CONTINUOUS" ]]; then + step "Step 3: Run research agent workflows (continuous mode — Ctrl+C to stop)" +else + step "Step 3: Run ${WORKFLOWS} research agent workflows" +fi + +rm -rf "$DATA_DIR" +echo -e " ${DIM}Workflows: ${WORKFLOWS} Concurrency: ${CONCURRENCY} Failure rate: ${FAILURE_RATE} Seed: ${SEED}${RESET}" +echo "" + +RUN_FLAGS=( + --concurrency "$CONCURRENCY" + --failure-rate "$FAILURE_RATE" + --seed "$SEED" + --data-dir "$DATA_DIR" +) +if [[ -n "$CONTINUOUS" ]]; then + RUN_FLAGS+=(--continuous) +else + RUN_FLAGS+=(--workflows "$WORKFLOWS") +fi + +"$DEMO_BIN" run "${RUN_FLAGS[@]}" + +# ───────────────────────────────────────────────────────────── +step "Step 4: Temporal workflow list" + +echo " Total workflows in Temporal:" +temporal workflow count --address "$TEMPORAL_ADDR" +echo "" +echo " Last 5 completed:" +temporal workflow list --address "$TEMPORAL_ADDR" --limit 5 + +# ───────────────────────────────────────────────────────────── +step "Step 5: Browse a workflow's filesystem" + +echo -e " ${DIM}Topic: quantum-computing${RESET}" +echo "" +"$DEMO_BIN" browse --data-dir "$DATA_DIR" --topic quantum-computing 2>/dev/null + +# ───────────────────────────────────────────────────────────── +step "Step 6: Generate HTML report" + +"$DEMO_BIN" report --data-dir "$DATA_DIR" --output "$REPORT_HTML" 2>/dev/null +echo -e " Report: ${CYAN}${REPORT_HTML}${RESET}" +echo -e " Size: $(du -h "$REPORT_HTML" | cut -f1)" + +# Open report if on macOS. +if command -v open &>/dev/null; then + echo "" + echo -e " ${DIM}Opening report in browser...${RESET}" + open "$REPORT_HTML" +fi + +# ───────────────────────────────────────────────────────────── +step "Demo complete" + +echo -e " Data directory: ${DATA_DIR} ($(du -sh "$DATA_DIR" | cut -f1))" +echo -e " HTML report: ${REPORT_HTML}" +echo -e " Temporal UI: ${CYAN}http://localhost:8233${RESET}" +echo "" +echo -e " ${DIM}To browse another topic:${RESET}" +echo " $DEMO_BIN browse --data-dir $DATA_DIR --topic " +echo "" +echo -e " ${DIM}To re-run with the live dashboard:${RESET}" +echo " $DEMO_BIN run --workflows $WORKFLOWS --concurrency $CONCURRENCY --data-dir /tmp/tzfs-demo-live" diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/runner.go b/chasm/lib/temporalzfs/examples/research-agent-demo/runner.go new file mode 100644 index 0000000000..42105a122a --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/runner.go @@ -0,0 +1,231 @@ +package main + +import ( + "context" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + sdkclient "go.temporal.io/sdk/client" +) + +// WorkflowEvent describes a state change in a running workflow. +type WorkflowEvent struct { + TopicSlug string + StepIndex int // 0-4 + StepName string // "WebResearch", etc. + State string // "started", "completed", "retrying", "failed" + Attempt int + Timestamp time.Time +} + +// RunConfig holds configuration for the scale runner. +type RunConfig struct { + Workflows int + Concurrency int + FailureRate float64 + Seed int64 + TaskQueue string + Continuous bool // keep running until cancelled +} + +// RunStats tracks aggregate statistics across all workflows. +type RunStats struct { + Started atomic.Int64 + Completed atomic.Int64 + Failed atomic.Int64 + FilesCreated atomic.Int64 + BytesWritten atomic.Int64 + Snapshots atomic.Int64 + Retries atomic.Int64 +} + +// Runner starts and monitors N workflows via the Temporal SDK. +type Runner struct { + client sdkclient.Client + store *DemoStore + config RunConfig + stats RunStats + + EventCh chan WorkflowEvent +} + +// NewRunner creates a runner that will start workflows against the given Temporal client. +func NewRunner(client sdkclient.Client, store *DemoStore, config RunConfig) *Runner { + bufSize := config.Workflows * 5 + if config.Continuous { + bufSize = config.Concurrency * 10 + } + return &Runner{ + client: client, + store: store, + config: config, + EventCh: make(chan WorkflowEvent, bufSize), + } +} + +// Run starts workflows and waits for completion. In continuous mode, it keeps +// starting new workflows until the context is cancelled, then waits for in-flight +// workflows to finish. In fixed mode, it runs exactly config.Workflows workflows. +func (r *Runner) Run(ctx context.Context) error { + sem := make(chan struct{}, r.config.Concurrency) + var wg sync.WaitGroup + + seed := r.config.Seed + if seed == 0 { + seed = time.Now().UnixNano() + } + rng := rand.New(rand.NewSource(seed)) + + limit := r.config.Workflows + if r.config.Continuous { + limit = 0 // no limit + } + + // Pre-create all FS partitions before starting any workflows. + // This ensures all superblocks and root inodes exist in PebbleDB before + // any concurrent activity reads/writes, avoiding visibility issues. + if !r.config.Continuous { + for i := 0; i < limit; i++ { + topic := TopicForIndex(i) + partitionID := uint64(i + 1) + if err := r.store.CreatePartition(partitionID); err != nil { + return fmt.Errorf("create partition for %s: %w", topic.Slug, err) + } + if err := r.store.RegisterWorkflow(partitionID, topic); err != nil { + return fmt.Errorf("register workflow %s: %w", topic.Slug, err) + } + } + } + +loop: + for i := 0; limit == 0 || i < limit; i++ { + if ctx.Err() != nil { + break + } + + topic := TopicForIndex(i) + partitionID := uint64(i + 1) // must be >0 + + // In continuous mode, create partitions on the fly. + if r.config.Continuous { + if err := r.store.CreatePartition(partitionID); err != nil { + return fmt.Errorf("create partition for %s: %w", topic.Slug, err) + } + if err := r.store.RegisterWorkflow(partitionID, topic); err != nil { + return fmt.Errorf("register workflow %s: %w", topic.Slug, err) + } + } + + params := WorkflowParams{ + TopicName: topic.Name, + TopicSlug: topic.Slug, + PartitionID: partitionID, + FailureRate: r.config.FailureRate, + Seed: rng.Int63(), + } + + wg.Add(1) + r.stats.Started.Add(1) + + // Acquire semaphore — in continuous mode, also check for cancellation. + select { + case sem <- struct{}{}: + case <-ctx.Done(): + wg.Done() + r.stats.Started.Add(-1) + break loop + } + + go func() { + defer wg.Done() + defer func() { <-sem }() + r.runOne(ctx, params) + }() + } + + if r.config.Continuous { + // Wait for in-flight workflows to finish. + fmt.Printf("\n Waiting for %d in-flight workflows to complete...\n", + r.stats.Started.Load()-r.stats.Completed.Load()-r.stats.Failed.Load()) + } + + wg.Wait() + close(r.EventCh) + return nil +} + +// emitEvent sends a workflow event without blocking. If the channel is full +// the event is dropped to avoid stalling goroutines that hold the semaphore. +func (r *Runner) emitEvent(ev WorkflowEvent) { + select { + case r.EventCh <- ev: + default: + } +} + +func (r *Runner) runOne(ctx context.Context, params WorkflowParams) { + // Include task queue (which has a per-run timestamp) to avoid ID + // collisions with workflows from previous runs on the same server. + workflowID := r.config.TaskQueue + "-" + params.TopicSlug + + r.emitEvent(WorkflowEvent{ + TopicSlug: params.TopicSlug, + State: "started", + Timestamp: time.Now(), + }) + + run, err := r.client.ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ + ID: workflowID, + TaskQueue: r.config.TaskQueue, + }, ResearchWorkflow, params) + if err != nil { + // Context cancellation (Ctrl+C) is not a failure — just stop tracking. + if ctx.Err() != nil { + r.stats.Started.Add(-1) + return + } + r.stats.Failed.Add(1) + r.emitEvent(WorkflowEvent{ + TopicSlug: params.TopicSlug, + State: "failed", + Timestamp: time.Now(), + }) + return + } + + var result WorkflowResult + if err := run.Get(ctx, &result); err != nil { + // Context cancellation (Ctrl+C) means we stopped waiting, not that + // the workflow failed. Don't count these as failures. + if ctx.Err() != nil { + r.stats.Started.Add(-1) + return + } + r.stats.Failed.Add(1) + _ = r.store.UpdateWorkflowResult(params.TopicSlug, result, true) + r.emitEvent(WorkflowEvent{ + TopicSlug: params.TopicSlug, + State: "failed", + Timestamp: time.Now(), + }) + return + } + + r.stats.Completed.Add(1) + r.stats.FilesCreated.Add(int64(result.FilesCreated)) + r.stats.BytesWritten.Add(result.BytesWritten) + r.stats.Snapshots.Add(int64(result.SnapshotCount)) + r.stats.Retries.Add(int64(result.Retries)) + _ = r.store.UpdateWorkflowResult(params.TopicSlug, result, false) + + r.emitEvent(WorkflowEvent{ + TopicSlug: params.TopicSlug, + StepIndex: 4, + StepName: "PeerReview", + State: "completed", + Timestamp: time.Now(), + }) +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/store.go b/chasm/lib/temporalzfs/examples/research-agent-demo/store.go new file mode 100644 index 0000000000..43727aaf3b --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/store.go @@ -0,0 +1,135 @@ +package main + +import ( + "encoding/json" + "fmt" + "sync" + + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + "github.com/temporalio/temporal-zfs/pkg/store" + pebblestore "github.com/temporalio/temporal-zfs/pkg/store/pebble" +) + +const manifestKey = "__demo_manifest__" + +// ManifestEntry records the mapping from partition ID to topic for the report/browse commands. +// After workflow completion, result fields are populated for the HTML report. +type ManifestEntry struct { + PartitionID uint64 `json:"partition_id"` + TopicName string `json:"topic_name"` + TopicSlug string `json:"topic_slug"` + FilesCreated int `json:"files_created,omitempty"` + BytesWritten int64 `json:"bytes_written,omitempty"` + Retries int `json:"retries,omitempty"` + Completed bool `json:"completed,omitempty"` + Failed bool `json:"failed,omitempty"` +} + +// DemoStore wraps a shared PebbleDB and provides per-workflow isolated stores. +type DemoStore struct { + base *pebblestore.Store + + mu sync.Mutex + manifest []ManifestEntry +} + +// NewDemoStore opens a PebbleDB at the given path with NoSync for throughput. +func NewDemoStore(path string) (*DemoStore, error) { + s, err := pebblestore.NewNoSync(path) + if err != nil { + return nil, fmt.Errorf("open pebble store: %w", err) + } + return &DemoStore{base: s}, nil +} + +// NewDemoStoreReadOnly opens a PebbleDB in read-only mode for report/browse. +func NewDemoStoreReadOnly(path string) (*DemoStore, error) { + s, err := pebblestore.NewReadOnly(path) + if err != nil { + return nil, fmt.Errorf("open pebble store read-only: %w", err) + } + return &DemoStore{base: s}, nil +} + +// Base returns the underlying store for direct access (e.g., manifest ops). +func (ds *DemoStore) Base() store.Store { + return ds.base +} + +// StoreForWorkflow returns a PrefixedStore isolated to the given partition ID. +// The caller must NOT call Close() on the returned store. +func (ds *DemoStore) StoreForWorkflow(partitionID uint64) store.Store { + return store.NewPrefixedStore(ds.base, partitionID) +} + +// RegisterWorkflow adds a workflow to the manifest and persists it. +func (ds *DemoStore) RegisterWorkflow(partitionID uint64, topic TopicEntry) error { + ds.mu.Lock() + ds.manifest = append(ds.manifest, ManifestEntry{ + PartitionID: partitionID, + TopicName: topic.Name, + TopicSlug: topic.Slug, + }) + data, err := json.Marshal(ds.manifest) + ds.mu.Unlock() + if err != nil { + return err + } + return ds.base.Set([]byte(manifestKey), data) +} + +// UpdateWorkflowResult updates a manifest entry with the workflow's result data. +func (ds *DemoStore) UpdateWorkflowResult(topicSlug string, result WorkflowResult, failed bool) error { + ds.mu.Lock() + for i := range ds.manifest { + if ds.manifest[i].TopicSlug == topicSlug { + ds.manifest[i].FilesCreated = result.FilesCreated + ds.manifest[i].BytesWritten = result.BytesWritten + ds.manifest[i].Retries = result.Retries + ds.manifest[i].Completed = !failed + ds.manifest[i].Failed = failed + break + } + } + data, err := json.Marshal(ds.manifest) + ds.mu.Unlock() + if err != nil { + return err + } + return ds.base.Set([]byte(manifestKey), data) +} + +// LoadManifest reads the manifest from the store. +func (ds *DemoStore) LoadManifest() ([]ManifestEntry, error) { + data, err := ds.base.Get([]byte(manifestKey)) + if err != nil { + return nil, fmt.Errorf("read manifest: %w", err) + } + var entries []ManifestEntry + if err := json.Unmarshal(data, &entries); err != nil { + return nil, fmt.Errorf("unmarshal manifest: %w", err) + } + return entries, nil +} + +// CreatePartition pre-creates a TemporalZFS partition so the superblock exists +// before any Temporal activity tries to open it. This avoids race conditions +// under concurrent PebbleDB access where Open() may not see a recently +// committed superblock from a different goroutine. +func (ds *DemoStore) CreatePartition(partitionID uint64) error { + s := store.NewPrefixedStore(ds.base, partitionID) + // Try to open first — partition may already exist from a prior run. + f, err := tzfs.Open(s) + if err != nil { + f, err = tzfs.Create(s, tzfs.Options{ChunkSize: 64 * 1024}) + if err != nil { + return fmt.Errorf("create partition %d: %w", partitionID, err) + } + } + return f.Close() +} + +// Close closes the underlying PebbleDB. +func (ds *DemoStore) Close() error { + return ds.base.Close() +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/topics.go b/chasm/lib/temporalzfs/examples/research-agent-demo/topics.go new file mode 100644 index 0000000000..30a15bc7b1 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/topics.go @@ -0,0 +1,152 @@ +package main + +import "fmt" + +// TopicEntry holds a research topic with its display name and URL-safe slug. +type TopicEntry struct { + Name string + Slug string +} + +// Topics is a curated list of research topics spanning science, technology, +// policy, medicine, and engineering. The demo runner picks from this list +// and wraps with a numeric suffix when more workflows are needed. +var Topics = []TopicEntry{ + // — Computer Science & AI — + {"Quantum Computing", "quantum-computing"}, + {"Large Language Models", "large-language-models"}, + {"Reinforcement Learning", "reinforcement-learning"}, + {"Federated Learning", "federated-learning"}, + {"Neuromorphic Computing", "neuromorphic-computing"}, + {"Homomorphic Encryption", "homomorphic-encryption"}, + {"Zero-Knowledge Proofs", "zero-knowledge-proofs"}, + {"Autonomous Vehicles", "autonomous-vehicles"}, + {"Computer Vision", "computer-vision"}, + {"Natural Language Processing", "natural-language-processing"}, + {"Robotics and Automation", "robotics-and-automation"}, + {"Edge Computing", "edge-computing"}, + {"Blockchain Consensus Mechanisms", "blockchain-consensus"}, + {"Differential Privacy", "differential-privacy"}, + {"AI Safety and Alignment", "ai-safety"}, + {"Explainable AI", "explainable-ai"}, + {"Generative Adversarial Networks", "generative-adversarial-networks"}, + {"Graph Neural Networks", "graph-neural-networks"}, + {"Swarm Intelligence", "swarm-intelligence"}, + {"Automated Theorem Proving", "automated-theorem-proving"}, + + // — Biology & Medicine — + {"CRISPR Gene Editing", "crispr-gene-editing"}, + {"mRNA Therapeutics", "mrna-therapeutics"}, + {"Synthetic Biology", "synthetic-biology"}, + {"Microbiome Research", "microbiome-research"}, + {"Protein Folding", "protein-folding"}, + {"CAR-T Cell Therapy", "car-t-cell-therapy"}, + {"Epigenetics", "epigenetics"}, + {"Brain-Computer Interfaces", "brain-computer-interfaces"}, + {"Longevity Research", "longevity-research"}, + {"Pandemic Preparedness", "pandemic-preparedness"}, + {"Antibiotic Resistance", "antibiotic-resistance"}, + {"Stem Cell Therapy", "stem-cell-therapy"}, + {"Precision Medicine", "precision-medicine"}, + {"Optogenetics", "optogenetics"}, + {"Gut-Brain Axis", "gut-brain-axis"}, + {"Vaccine Development", "vaccine-development"}, + {"Regenerative Medicine", "regenerative-medicine"}, + {"Immunotherapy", "immunotherapy"}, + {"Bioprinting", "bioprinting"}, + {"Pharmacogenomics", "pharmacogenomics"}, + + // — Physics & Space — + {"Dark Matter Detection", "dark-matter-detection"}, + {"Fusion Energy", "fusion-energy"}, + {"Gravitational Waves", "gravitational-waves"}, + {"Exoplanet Habitability", "exoplanet-habitability"}, + {"Space Debris Mitigation", "space-debris-mitigation"}, + {"Quantum Gravity", "quantum-gravity"}, + {"Neutrino Physics", "neutrino-physics"}, + {"Topological Materials", "topological-materials"}, + {"Superconductivity", "superconductivity"}, + {"Asteroid Mining", "asteroid-mining"}, + {"Mars Colonization", "mars-colonization"}, + {"Solar Sail Propulsion", "solar-sail-propulsion"}, + {"Cosmic Microwave Background", "cosmic-microwave-background"}, + {"Black Hole Information Paradox", "black-hole-information-paradox"}, + {"Plasma Physics", "plasma-physics"}, + + // — Energy & Environment — + {"Climate Change Modeling", "climate-change-modeling"}, + {"Carbon Capture", "carbon-capture"}, + {"Ocean Acidification", "ocean-acidification"}, + {"Solid-State Batteries", "solid-state-batteries"}, + {"Hydrogen Economy", "hydrogen-economy"}, + {"Perovskite Solar Cells", "perovskite-solar-cells"}, + {"Wind Energy Optimization", "wind-energy-optimization"}, + {"Geothermal Energy", "geothermal-energy"}, + {"Biodiversity Loss", "biodiversity-loss"}, + {"Coral Reef Restoration", "coral-reef-restoration"}, + {"Desalination Technology", "desalination-technology"}, + {"Smart Grid Systems", "smart-grid-systems"}, + {"Circular Economy", "circular-economy"}, + {"Arctic Ice Dynamics", "arctic-ice-dynamics"}, + {"Wildfire Prediction", "wildfire-prediction"}, + + // — Engineering & Materials — + {"Metamaterials", "metamaterials"}, + {"Additive Manufacturing", "additive-manufacturing"}, + {"Self-Healing Materials", "self-healing-materials"}, + {"Graphene Applications", "graphene-applications"}, + {"Digital Twins", "digital-twins"}, + {"Soft Robotics", "soft-robotics"}, + {"Autonomous Drones", "autonomous-drones"}, + {"Hyperloop Transport", "hyperloop-transport"}, + {"Vertical Farming", "vertical-farming"}, + {"Lab-Grown Meat", "lab-grown-meat"}, + {"Quantum Sensors", "quantum-sensors"}, + {"Wearable Health Tech", "wearable-health-tech"}, + {"Nuclear Microreactors", "nuclear-microreactors"}, + {"Photonic Computing", "photonic-computing"}, + {"Flexible Electronics", "flexible-electronics"}, + + // — Social Sciences & Policy — + {"Universal Basic Income", "universal-basic-income"}, + {"Digital Currency Policy", "digital-currency-policy"}, + {"Misinformation Detection", "misinformation-detection"}, + {"Algorithmic Fairness", "algorithmic-fairness"}, + {"Cybersecurity Frameworks", "cybersecurity-frameworks"}, + {"Data Sovereignty", "data-sovereignty"}, + {"Post-Quantum Cryptography", "post-quantum-cryptography"}, + {"Smart Cities", "smart-cities"}, + {"Digital Identity Systems", "digital-identity-systems"}, + {"Open Source Intelligence", "open-source-intelligence"}, + {"Supply Chain Resilience", "supply-chain-resilience"}, + {"Telemedicine Adoption", "telemedicine-adoption"}, + {"EdTech and Learning Science", "edtech-learning-science"}, + {"Remote Work Productivity", "remote-work-productivity"}, + {"Autonomous Weapons Policy", "autonomous-weapons-policy"}, + + // — Mathematics & Theory — + {"Topological Data Analysis", "topological-data-analysis"}, + {"Causal Inference", "causal-inference"}, + {"Information Theory", "information-theory"}, + {"Complexity Theory", "complexity-theory"}, + {"Category Theory Applications", "category-theory-applications"}, + {"Bayesian Optimization", "bayesian-optimization"}, + {"Numerical Weather Prediction", "numerical-weather-prediction"}, + {"Network Science", "network-science"}, + {"Chaos Theory Applications", "chaos-theory-applications"}, + {"Computational Geometry", "computational-geometry"}, +} + +// TopicForIndex returns a topic for the given index, wrapping with a numeric +// suffix when the index exceeds the topic list length. +func TopicForIndex(i int) TopicEntry { + if i < len(Topics) { + return Topics[i] + } + base := Topics[i%len(Topics)] + cycle := i/len(Topics) + 1 + return TopicEntry{ + Name: fmt.Sprintf("%s (%d)", base.Name, cycle), + Slug: fmt.Sprintf("%s-%d", base.Slug, cycle), + } +} diff --git a/chasm/lib/temporalzfs/examples/research-agent-demo/workflow.go b/chasm/lib/temporalzfs/examples/research-agent-demo/workflow.go new file mode 100644 index 0000000000..cd7abdcd35 --- /dev/null +++ b/chasm/lib/temporalzfs/examples/research-agent-demo/workflow.go @@ -0,0 +1,79 @@ +package main + +import ( + "time" + + "go.temporal.io/sdk/temporal" + "go.temporal.io/sdk/workflow" +) + +// WorkflowParams is the input to the research agent workflow. +type WorkflowParams struct { + TopicName string `json:"topic_name"` + TopicSlug string `json:"topic_slug"` + PartitionID uint64 `json:"partition_id"` + FailureRate float64 `json:"failure_rate"` + Seed int64 `json:"seed"` +} + +// StepResult is the output of each activity. +type StepResult struct { + FilesCreated int `json:"files_created"` + BytesWritten int64 `json:"bytes_written"` + Retries int `json:"retries"` +} + +// WorkflowResult aggregates results across all activities. +type WorkflowResult struct { + TopicSlug string `json:"topic_slug"` + FilesCreated int `json:"files_created"` + BytesWritten int64 `json:"bytes_written"` + SnapshotCount int `json:"snapshot_count"` + Retries int `json:"retries"` +} + +// ResearchWorkflow chains 5 activities to research a topic, each producing +// files and an MVCC snapshot in the workflow's isolated TemporalZFS partition. +func ResearchWorkflow(ctx workflow.Context, params WorkflowParams) (WorkflowResult, error) { + ao := workflow.ActivityOptions{ + StartToCloseTimeout: 60 * time.Second, + RetryPolicy: &temporal.RetryPolicy{ + InitialInterval: 500 * time.Millisecond, + BackoffCoefficient: 1.5, + MaximumAttempts: 5, + }, + } + ctx = workflow.WithActivityOptions(ctx, ao) + + var a *Activities + var result WorkflowResult + result.TopicSlug = params.TopicSlug + + steps := []struct { + fn func(ctx workflow.Context) workflow.Future + name string + }{ + {func(ctx workflow.Context) workflow.Future { + return workflow.ExecuteActivity(ctx, a.WebResearch, params) + }, "WebResearch"}, + {func(ctx workflow.Context) workflow.Future { return workflow.ExecuteActivity(ctx, a.Summarize, params) }, "Summarize"}, + {func(ctx workflow.Context) workflow.Future { return workflow.ExecuteActivity(ctx, a.FactCheck, params) }, "FactCheck"}, + {func(ctx workflow.Context) workflow.Future { + return workflow.ExecuteActivity(ctx, a.FinalReport, params) + }, "FinalReport"}, + {func(ctx workflow.Context) workflow.Future { return workflow.ExecuteActivity(ctx, a.PeerReview, params) }, "PeerReview"}, + } + + for _, step := range steps { + var sr StepResult + if err := step.fn(ctx).Get(ctx, &sr); err != nil { + return result, err + } + result.FilesCreated += sr.FilesCreated + result.BytesWritten += sr.BytesWritten + result.Retries += sr.Retries + result.SnapshotCount++ + } + + return result, nil +} diff --git a/chasm/lib/temporalzfs/filesystem.go b/chasm/lib/temporalzfs/filesystem.go new file mode 100644 index 0000000000..0f87180de8 --- /dev/null +++ b/chasm/lib/temporalzfs/filesystem.go @@ -0,0 +1,50 @@ +package temporalzfs + +import ( + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" +) + +var _ chasm.RootComponent = (*Filesystem)(nil) + +// Filesystem is the root CHASM component for the TemporalZFS archetype. +// FS layer data (inodes, chunks, directory entries) is stored in a dedicated +// store managed by FSStoreProvider, not as CHASM Fields. Only FS metadata +// (config, stats, lifecycle) lives in CHASM state. +type Filesystem struct { + chasm.UnimplementedComponent + + *temporalzfspb.FilesystemState + + Visibility chasm.Field[*chasm.Visibility] +} + +// LifecycleState implements chasm.Component. +func (f *Filesystem) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch f.Status { + case temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + temporalzfspb.FILESYSTEM_STATUS_DELETED: + return chasm.LifecycleStateCompleted + default: + return chasm.LifecycleStateRunning + } +} + +// Terminate implements chasm.RootComponent. +func (f *Filesystem) Terminate( + ctx chasm.MutableContext, + _ chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + f.Status = temporalzfspb.FILESYSTEM_STATUS_DELETED + ctx.AddTask(f, chasm.TaskAttributes{ + ScheduledTime: chasm.TaskScheduledTimeImmediate, + }, &temporalzfspb.DataCleanupTask{}) + return chasm.TerminateComponentResponse{}, nil +} + +// SearchAttributes implements chasm.VisibilitySearchAttributesProvider. +func (f *Filesystem) SearchAttributes(_ chasm.Context) []chasm.SearchAttributeKeyValue { + return []chasm.SearchAttributeKeyValue{ + statusSearchAttribute.Value(f.GetStatus().String()), + } +} diff --git a/chasm/lib/temporalzfs/filesystem_test.go b/chasm/lib/temporalzfs/filesystem_test.go new file mode 100644 index 0000000000..f438c48938 --- /dev/null +++ b/chasm/lib/temporalzfs/filesystem_test.go @@ -0,0 +1,75 @@ +package temporalzfs + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" +) + +func TestLifecycleState(t *testing.T) { + testCases := []struct { + name string + status temporalzfspb.FilesystemStatus + expected chasm.LifecycleState + }{ + {"UNSPECIFIED is Running", temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, chasm.LifecycleStateRunning}, + {"RUNNING is Running", temporalzfspb.FILESYSTEM_STATUS_RUNNING, chasm.LifecycleStateRunning}, + {"ARCHIVED is Completed", temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, chasm.LifecycleStateCompleted}, + {"DELETED is Completed", temporalzfspb.FILESYSTEM_STATUS_DELETED, chasm.LifecycleStateCompleted}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{Status: tc.status}, + } + require.Equal(t, tc.expected, fs.LifecycleState(nil)) + }) + } +} + +func TestTerminate(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + + resp, err := fs.Terminate(ctx, chasm.TerminateComponentRequest{}) + require.NoError(t, err) + require.Equal(t, chasm.TerminateComponentResponse{}, resp) + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_DELETED, fs.Status) + // Verify DataCleanupTask is scheduled. + require.Len(t, ctx.Tasks, 1) + require.IsType(t, &temporalzfspb.DataCleanupTask{}, ctx.Tasks[0].Payload) +} + +func TestSearchAttributes(t *testing.T) { + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + + attrs := fs.SearchAttributes(nil) + require.Len(t, attrs, 1) +} + +func TestStateMachineState(t *testing.T) { + // Nil FilesystemState returns UNSPECIFIED. + fs := &Filesystem{} + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, fs.StateMachineState()) + + // Non-nil returns the actual status. + fs.FilesystemState = &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + } + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_RUNNING, fs.StateMachineState()) + + // SetStateMachineState works. + fs.SetStateMachineState(temporalzfspb.FILESYSTEM_STATUS_ARCHIVED) + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, fs.Status) +} diff --git a/chasm/lib/temporalzfs/fx.go b/chasm/lib/temporalzfs/fx.go new file mode 100644 index 0000000000..730185f26a --- /dev/null +++ b/chasm/lib/temporalzfs/fx.go @@ -0,0 +1,46 @@ +package temporalzfs + +import ( + "context" + "os" + "path/filepath" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/log" + "go.uber.org/fx" +) + +var HistoryModule = fx.Module( + "temporalzfs-history", + fx.Provide( + ConfigProvider, + fx.Annotate( + func(lc fx.Lifecycle, logger log.Logger) FSStoreProvider { + dataDir := filepath.Join(os.TempDir(), "temporalzfs") + provider := NewPebbleStoreProvider(dataDir, logger) + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return provider.Close() + }, + }) + return provider + }, + fx.As(new(FSStoreProvider)), + ), + fx.Annotate( + newNoopWorkflowExistenceChecker, + fx.As(new(WorkflowExistenceChecker)), + ), + newTFSPostDeleteHook, + newHandler, + newChunkGCTaskExecutor, + newManifestCompactTaskExecutor, + newQuotaCheckTaskExecutor, + newOwnerCheckTaskExecutor, + newDataCleanupTaskExecutor, + newLibrary, + ), + fx.Invoke(func(l *library, registry *chasm.Registry) error { + return registry.Register(l) + }), +) diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.go-helpers.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.go-helpers.pb.go new file mode 100644 index 0000000000..f3f46c7635 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,1782 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package temporalzfspb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type CreateFilesystemRequest to the protobuf v3 wire format +func (val *CreateFilesystemRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFilesystemRequest from the protobuf v3 wire format +func (val *CreateFilesystemRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFilesystemRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFilesystemRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFilesystemRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFilesystemRequest + switch t := that.(type) { + case *CreateFilesystemRequest: + that1 = t + case CreateFilesystemRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateFilesystemResponse to the protobuf v3 wire format +func (val *CreateFilesystemResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFilesystemResponse from the protobuf v3 wire format +func (val *CreateFilesystemResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFilesystemResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFilesystemResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFilesystemResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFilesystemResponse + switch t := that.(type) { + case *CreateFilesystemResponse: + that1 = t + case CreateFilesystemResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetFilesystemInfoRequest to the protobuf v3 wire format +func (val *GetFilesystemInfoRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetFilesystemInfoRequest from the protobuf v3 wire format +func (val *GetFilesystemInfoRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetFilesystemInfoRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetFilesystemInfoRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetFilesystemInfoRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetFilesystemInfoRequest + switch t := that.(type) { + case *GetFilesystemInfoRequest: + that1 = t + case GetFilesystemInfoRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetFilesystemInfoResponse to the protobuf v3 wire format +func (val *GetFilesystemInfoResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetFilesystemInfoResponse from the protobuf v3 wire format +func (val *GetFilesystemInfoResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetFilesystemInfoResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetFilesystemInfoResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetFilesystemInfoResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetFilesystemInfoResponse + switch t := that.(type) { + case *GetFilesystemInfoResponse: + that1 = t + case GetFilesystemInfoResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ArchiveFilesystemRequest to the protobuf v3 wire format +func (val *ArchiveFilesystemRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ArchiveFilesystemRequest from the protobuf v3 wire format +func (val *ArchiveFilesystemRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ArchiveFilesystemRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ArchiveFilesystemRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ArchiveFilesystemRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ArchiveFilesystemRequest + switch t := that.(type) { + case *ArchiveFilesystemRequest: + that1 = t + case ArchiveFilesystemRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ArchiveFilesystemResponse to the protobuf v3 wire format +func (val *ArchiveFilesystemResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ArchiveFilesystemResponse from the protobuf v3 wire format +func (val *ArchiveFilesystemResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ArchiveFilesystemResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ArchiveFilesystemResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ArchiveFilesystemResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ArchiveFilesystemResponse + switch t := that.(type) { + case *ArchiveFilesystemResponse: + that1 = t + case ArchiveFilesystemResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LookupRequest to the protobuf v3 wire format +func (val *LookupRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LookupRequest from the protobuf v3 wire format +func (val *LookupRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LookupRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LookupRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LookupRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LookupRequest + switch t := that.(type) { + case *LookupRequest: + that1 = t + case LookupRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LookupResponse to the protobuf v3 wire format +func (val *LookupResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LookupResponse from the protobuf v3 wire format +func (val *LookupResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LookupResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LookupResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LookupResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LookupResponse + switch t := that.(type) { + case *LookupResponse: + that1 = t + case LookupResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadChunksRequest to the protobuf v3 wire format +func (val *ReadChunksRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadChunksRequest from the protobuf v3 wire format +func (val *ReadChunksRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadChunksRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadChunksRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadChunksRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadChunksRequest + switch t := that.(type) { + case *ReadChunksRequest: + that1 = t + case ReadChunksRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadChunksResponse to the protobuf v3 wire format +func (val *ReadChunksResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadChunksResponse from the protobuf v3 wire format +func (val *ReadChunksResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadChunksResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadChunksResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadChunksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadChunksResponse + switch t := that.(type) { + case *ReadChunksResponse: + that1 = t + case ReadChunksResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WriteChunksRequest to the protobuf v3 wire format +func (val *WriteChunksRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WriteChunksRequest from the protobuf v3 wire format +func (val *WriteChunksRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WriteChunksRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WriteChunksRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WriteChunksRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WriteChunksRequest + switch t := that.(type) { + case *WriteChunksRequest: + that1 = t + case WriteChunksRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WriteChunksResponse to the protobuf v3 wire format +func (val *WriteChunksResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WriteChunksResponse from the protobuf v3 wire format +func (val *WriteChunksResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WriteChunksResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WriteChunksResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WriteChunksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WriteChunksResponse + switch t := that.(type) { + case *WriteChunksResponse: + that1 = t + case WriteChunksResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MkdirRequest to the protobuf v3 wire format +func (val *MkdirRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MkdirRequest from the protobuf v3 wire format +func (val *MkdirRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MkdirRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MkdirRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MkdirRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MkdirRequest + switch t := that.(type) { + case *MkdirRequest: + that1 = t + case MkdirRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MkdirResponse to the protobuf v3 wire format +func (val *MkdirResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MkdirResponse from the protobuf v3 wire format +func (val *MkdirResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MkdirResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MkdirResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MkdirResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MkdirResponse + switch t := that.(type) { + case *MkdirResponse: + that1 = t + case MkdirResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadDirRequest to the protobuf v3 wire format +func (val *ReadDirRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadDirRequest from the protobuf v3 wire format +func (val *ReadDirRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadDirRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadDirRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadDirRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadDirRequest + switch t := that.(type) { + case *ReadDirRequest: + that1 = t + case ReadDirRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadDirResponse to the protobuf v3 wire format +func (val *ReadDirResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadDirResponse from the protobuf v3 wire format +func (val *ReadDirResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadDirResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadDirResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadDirResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadDirResponse + switch t := that.(type) { + case *ReadDirResponse: + that1 = t + case ReadDirResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnlinkRequest to the protobuf v3 wire format +func (val *UnlinkRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnlinkRequest from the protobuf v3 wire format +func (val *UnlinkRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnlinkRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnlinkRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnlinkRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnlinkRequest + switch t := that.(type) { + case *UnlinkRequest: + that1 = t + case UnlinkRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnlinkResponse to the protobuf v3 wire format +func (val *UnlinkResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnlinkResponse from the protobuf v3 wire format +func (val *UnlinkResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnlinkResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnlinkResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnlinkResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnlinkResponse + switch t := that.(type) { + case *UnlinkResponse: + that1 = t + case UnlinkResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RmdirRequest to the protobuf v3 wire format +func (val *RmdirRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RmdirRequest from the protobuf v3 wire format +func (val *RmdirRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RmdirRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RmdirRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RmdirRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RmdirRequest + switch t := that.(type) { + case *RmdirRequest: + that1 = t + case RmdirRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RmdirResponse to the protobuf v3 wire format +func (val *RmdirResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RmdirResponse from the protobuf v3 wire format +func (val *RmdirResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RmdirResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RmdirResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RmdirResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RmdirResponse + switch t := that.(type) { + case *RmdirResponse: + that1 = t + case RmdirResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RenameRequest to the protobuf v3 wire format +func (val *RenameRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RenameRequest from the protobuf v3 wire format +func (val *RenameRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RenameRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RenameRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RenameRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RenameRequest + switch t := that.(type) { + case *RenameRequest: + that1 = t + case RenameRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RenameResponse to the protobuf v3 wire format +func (val *RenameResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RenameResponse from the protobuf v3 wire format +func (val *RenameResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RenameResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RenameResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RenameResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RenameResponse + switch t := that.(type) { + case *RenameResponse: + that1 = t + case RenameResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetattrRequest to the protobuf v3 wire format +func (val *GetattrRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetattrRequest from the protobuf v3 wire format +func (val *GetattrRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetattrRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetattrRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetattrRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetattrRequest + switch t := that.(type) { + case *GetattrRequest: + that1 = t + case GetattrRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetattrResponse to the protobuf v3 wire format +func (val *GetattrResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetattrResponse from the protobuf v3 wire format +func (val *GetattrResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetattrResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetattrResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetattrResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetattrResponse + switch t := that.(type) { + case *GetattrResponse: + that1 = t + case GetattrResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetattrRequest to the protobuf v3 wire format +func (val *SetattrRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetattrRequest from the protobuf v3 wire format +func (val *SetattrRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetattrRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetattrRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetattrRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetattrRequest + switch t := that.(type) { + case *SetattrRequest: + that1 = t + case SetattrRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetattrResponse to the protobuf v3 wire format +func (val *SetattrResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetattrResponse from the protobuf v3 wire format +func (val *SetattrResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetattrResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetattrResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetattrResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetattrResponse + switch t := that.(type) { + case *SetattrResponse: + that1 = t + case SetattrResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TruncateRequest to the protobuf v3 wire format +func (val *TruncateRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TruncateRequest from the protobuf v3 wire format +func (val *TruncateRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TruncateRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TruncateRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TruncateRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TruncateRequest + switch t := that.(type) { + case *TruncateRequest: + that1 = t + case TruncateRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TruncateResponse to the protobuf v3 wire format +func (val *TruncateResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TruncateResponse from the protobuf v3 wire format +func (val *TruncateResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TruncateResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TruncateResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TruncateResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TruncateResponse + switch t := that.(type) { + case *TruncateResponse: + that1 = t + case TruncateResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LinkRequest to the protobuf v3 wire format +func (val *LinkRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LinkRequest from the protobuf v3 wire format +func (val *LinkRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LinkRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LinkRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LinkRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LinkRequest + switch t := that.(type) { + case *LinkRequest: + that1 = t + case LinkRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LinkResponse to the protobuf v3 wire format +func (val *LinkResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LinkResponse from the protobuf v3 wire format +func (val *LinkResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LinkResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LinkResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LinkResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LinkResponse + switch t := that.(type) { + case *LinkResponse: + that1 = t + case LinkResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SymlinkRequest to the protobuf v3 wire format +func (val *SymlinkRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SymlinkRequest from the protobuf v3 wire format +func (val *SymlinkRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SymlinkRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SymlinkRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SymlinkRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SymlinkRequest + switch t := that.(type) { + case *SymlinkRequest: + that1 = t + case SymlinkRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SymlinkResponse to the protobuf v3 wire format +func (val *SymlinkResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SymlinkResponse from the protobuf v3 wire format +func (val *SymlinkResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SymlinkResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SymlinkResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SymlinkResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SymlinkResponse + switch t := that.(type) { + case *SymlinkResponse: + that1 = t + case SymlinkResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadlinkRequest to the protobuf v3 wire format +func (val *ReadlinkRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadlinkRequest from the protobuf v3 wire format +func (val *ReadlinkRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadlinkRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadlinkRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadlinkRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadlinkRequest + switch t := that.(type) { + case *ReadlinkRequest: + that1 = t + case ReadlinkRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReadlinkResponse to the protobuf v3 wire format +func (val *ReadlinkResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReadlinkResponse from the protobuf v3 wire format +func (val *ReadlinkResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReadlinkResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReadlinkResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReadlinkResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReadlinkResponse + switch t := that.(type) { + case *ReadlinkResponse: + that1 = t + case ReadlinkResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateFileRequest to the protobuf v3 wire format +func (val *CreateFileRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFileRequest from the protobuf v3 wire format +func (val *CreateFileRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFileRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFileRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFileRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFileRequest + switch t := that.(type) { + case *CreateFileRequest: + that1 = t + case CreateFileRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateFileResponse to the protobuf v3 wire format +func (val *CreateFileResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFileResponse from the protobuf v3 wire format +func (val *CreateFileResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFileResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFileResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFileResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFileResponse + switch t := that.(type) { + case *CreateFileResponse: + that1 = t + case CreateFileResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MknodRequest to the protobuf v3 wire format +func (val *MknodRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MknodRequest from the protobuf v3 wire format +func (val *MknodRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MknodRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MknodRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MknodRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MknodRequest + switch t := that.(type) { + case *MknodRequest: + that1 = t + case MknodRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MknodResponse to the protobuf v3 wire format +func (val *MknodResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MknodResponse from the protobuf v3 wire format +func (val *MknodResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MknodResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MknodResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MknodResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MknodResponse + switch t := that.(type) { + case *MknodResponse: + that1 = t + case MknodResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StatfsRequest to the protobuf v3 wire format +func (val *StatfsRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StatfsRequest from the protobuf v3 wire format +func (val *StatfsRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StatfsRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StatfsRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StatfsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StatfsRequest + switch t := that.(type) { + case *StatfsRequest: + that1 = t + case StatfsRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StatfsResponse to the protobuf v3 wire format +func (val *StatfsResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StatfsResponse from the protobuf v3 wire format +func (val *StatfsResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StatfsResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StatfsResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StatfsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StatfsResponse + switch t := that.(type) { + case *StatfsResponse: + that1 = t + case StatfsResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateSnapshotRequest to the protobuf v3 wire format +func (val *CreateSnapshotRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateSnapshotRequest from the protobuf v3 wire format +func (val *CreateSnapshotRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateSnapshotRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateSnapshotRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateSnapshotRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateSnapshotRequest + switch t := that.(type) { + case *CreateSnapshotRequest: + that1 = t + case CreateSnapshotRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateSnapshotResponse to the protobuf v3 wire format +func (val *CreateSnapshotResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateSnapshotResponse from the protobuf v3 wire format +func (val *CreateSnapshotResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateSnapshotResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateSnapshotResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateSnapshotResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateSnapshotResponse + switch t := that.(type) { + case *CreateSnapshotResponse: + that1 = t + case CreateSnapshotResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InodeAttr to the protobuf v3 wire format +func (val *InodeAttr) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InodeAttr from the protobuf v3 wire format +func (val *InodeAttr) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InodeAttr) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InodeAttr values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InodeAttr) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InodeAttr + switch t := that.(type) { + case *InodeAttr: + that1 = t + case InodeAttr: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DirEntry to the protobuf v3 wire format +func (val *DirEntry) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DirEntry from the protobuf v3 wire format +func (val *DirEntry) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DirEntry) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DirEntry values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DirEntry) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DirEntry + switch t := that.(type) { + case *DirEntry: + that1 = t + case DirEntry: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type AttachWorkflowRequest to the protobuf v3 wire format +func (val *AttachWorkflowRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type AttachWorkflowRequest from the protobuf v3 wire format +func (val *AttachWorkflowRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *AttachWorkflowRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two AttachWorkflowRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *AttachWorkflowRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *AttachWorkflowRequest + switch t := that.(type) { + case *AttachWorkflowRequest: + that1 = t + case AttachWorkflowRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type AttachWorkflowResponse to the protobuf v3 wire format +func (val *AttachWorkflowResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type AttachWorkflowResponse from the protobuf v3 wire format +func (val *AttachWorkflowResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *AttachWorkflowResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two AttachWorkflowResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *AttachWorkflowResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *AttachWorkflowResponse + switch t := that.(type) { + case *AttachWorkflowResponse: + that1 = t + case AttachWorkflowResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DetachWorkflowRequest to the protobuf v3 wire format +func (val *DetachWorkflowRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DetachWorkflowRequest from the protobuf v3 wire format +func (val *DetachWorkflowRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DetachWorkflowRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DetachWorkflowRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DetachWorkflowRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DetachWorkflowRequest + switch t := that.(type) { + case *DetachWorkflowRequest: + that1 = t + case DetachWorkflowRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DetachWorkflowResponse to the protobuf v3 wire format +func (val *DetachWorkflowResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DetachWorkflowResponse from the protobuf v3 wire format +func (val *DetachWorkflowResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DetachWorkflowResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DetachWorkflowResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DetachWorkflowResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DetachWorkflowResponse + switch t := that.(type) { + case *DetachWorkflowResponse: + that1 = t + case DetachWorkflowResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.pb.go new file mode 100644 index 0000000000..2c48aa1b45 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/request_response.pb.go @@ -0,0 +1,3131 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/temporalzfs/proto/v1/request_response.proto + +package temporalzfspb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateFilesystemRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + // Initial set of owner workflow IDs for this filesystem. + OwnerWorkflowIds []string `protobuf:"bytes,6,rep,name=owner_workflow_ids,json=ownerWorkflowIds,proto3" json:"owner_workflow_ids,omitempty"` + Config *FilesystemConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFilesystemRequest) Reset() { + *x = CreateFilesystemRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFilesystemRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFilesystemRequest) ProtoMessage() {} + +func (x *CreateFilesystemRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFilesystemRequest.ProtoReflect.Descriptor instead. +func (*CreateFilesystemRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateFilesystemRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateFilesystemRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *CreateFilesystemRequest) GetOwnerWorkflowIds() []string { + if x != nil { + return x.OwnerWorkflowIds + } + return nil +} + +func (x *CreateFilesystemRequest) GetConfig() *FilesystemConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *CreateFilesystemRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type CreateFilesystemResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFilesystemResponse) Reset() { + *x = CreateFilesystemResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFilesystemResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFilesystemResponse) ProtoMessage() {} + +func (x *CreateFilesystemResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFilesystemResponse.ProtoReflect.Descriptor instead. +func (*CreateFilesystemResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateFilesystemResponse) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +type GetFilesystemInfoRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetFilesystemInfoRequest) Reset() { + *x = GetFilesystemInfoRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetFilesystemInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilesystemInfoRequest) ProtoMessage() {} + +func (x *GetFilesystemInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilesystemInfoRequest.ProtoReflect.Descriptor instead. +func (*GetFilesystemInfoRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *GetFilesystemInfoRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetFilesystemInfoRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +type GetFilesystemInfoResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State *FilesystemState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetFilesystemInfoResponse) Reset() { + *x = GetFilesystemInfoResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetFilesystemInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilesystemInfoResponse) ProtoMessage() {} + +func (x *GetFilesystemInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilesystemInfoResponse.ProtoReflect.Descriptor instead. +func (*GetFilesystemInfoResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *GetFilesystemInfoResponse) GetState() *FilesystemState { + if x != nil { + return x.State + } + return nil +} + +func (x *GetFilesystemInfoResponse) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +type ArchiveFilesystemRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ArchiveFilesystemRequest) Reset() { + *x = ArchiveFilesystemRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ArchiveFilesystemRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArchiveFilesystemRequest) ProtoMessage() {} + +func (x *ArchiveFilesystemRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArchiveFilesystemRequest.ProtoReflect.Descriptor instead. +func (*ArchiveFilesystemRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{4} +} + +func (x *ArchiveFilesystemRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ArchiveFilesystemRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +type ArchiveFilesystemResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ArchiveFilesystemResponse) Reset() { + *x = ArchiveFilesystemResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ArchiveFilesystemResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArchiveFilesystemResponse) ProtoMessage() {} + +func (x *ArchiveFilesystemResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArchiveFilesystemResponse.ProtoReflect.Descriptor instead. +func (*ArchiveFilesystemResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{5} +} + +type LookupRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LookupRequest) Reset() { + *x = LookupRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LookupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupRequest) ProtoMessage() {} + +func (x *LookupRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupRequest.ProtoReflect.Descriptor instead. +func (*LookupRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{6} +} + +func (x *LookupRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *LookupRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *LookupRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *LookupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type LookupResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LookupResponse) Reset() { + *x = LookupResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LookupResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupResponse) ProtoMessage() {} + +func (x *LookupResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupResponse.ProtoReflect.Descriptor instead. +func (*LookupResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{7} +} + +func (x *LookupResponse) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *LookupResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type ReadChunksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + ReadSize int64 `protobuf:"varint,5,opt,name=read_size,json=readSize,proto3" json:"read_size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadChunksRequest) Reset() { + *x = ReadChunksRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadChunksRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadChunksRequest) ProtoMessage() {} + +func (x *ReadChunksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadChunksRequest.ProtoReflect.Descriptor instead. +func (*ReadChunksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{8} +} + +func (x *ReadChunksRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReadChunksRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *ReadChunksRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *ReadChunksRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadChunksRequest) GetReadSize() int64 { + if x != nil { + return x.ReadSize + } + return 0 +} + +type ReadChunksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadChunksResponse) Reset() { + *x = ReadChunksResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadChunksResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadChunksResponse) ProtoMessage() {} + +func (x *ReadChunksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadChunksResponse.ProtoReflect.Descriptor instead. +func (*ReadChunksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +func (x *ReadChunksResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type WriteChunksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WriteChunksRequest) Reset() { + *x = WriteChunksRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WriteChunksRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteChunksRequest) ProtoMessage() {} + +func (x *WriteChunksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteChunksRequest.ProtoReflect.Descriptor instead. +func (*WriteChunksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{10} +} + +func (x *WriteChunksRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *WriteChunksRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *WriteChunksRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *WriteChunksRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *WriteChunksRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type WriteChunksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + BytesWritten int64 `protobuf:"varint,1,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WriteChunksResponse) Reset() { + *x = WriteChunksResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WriteChunksResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteChunksResponse) ProtoMessage() {} + +func (x *WriteChunksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteChunksResponse.ProtoReflect.Descriptor instead. +func (*WriteChunksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{11} +} + +func (x *WriteChunksResponse) GetBytesWritten() int64 { + if x != nil { + return x.BytesWritten + } + return 0 +} + +type MkdirRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MkdirRequest) Reset() { + *x = MkdirRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MkdirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MkdirRequest) ProtoMessage() {} + +func (x *MkdirRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MkdirRequest.ProtoReflect.Descriptor instead. +func (*MkdirRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{12} +} + +func (x *MkdirRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *MkdirRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *MkdirRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *MkdirRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MkdirRequest) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +type MkdirResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MkdirResponse) Reset() { + *x = MkdirResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MkdirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MkdirResponse) ProtoMessage() {} + +func (x *MkdirResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MkdirResponse.ProtoReflect.Descriptor instead. +func (*MkdirResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{13} +} + +func (x *MkdirResponse) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *MkdirResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type ReadDirRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadDirRequest) Reset() { + *x = ReadDirRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDirRequest) ProtoMessage() {} + +func (x *ReadDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDirRequest.ProtoReflect.Descriptor instead. +func (*ReadDirRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{14} +} + +func (x *ReadDirRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReadDirRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *ReadDirRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +type ReadDirResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entries []*DirEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadDirResponse) Reset() { + *x = ReadDirResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadDirResponse) ProtoMessage() {} + +func (x *ReadDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadDirResponse.ProtoReflect.Descriptor instead. +func (*ReadDirResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{15} +} + +func (x *ReadDirResponse) GetEntries() []*DirEntry { + if x != nil { + return x.Entries + } + return nil +} + +type UnlinkRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnlinkRequest) Reset() { + *x = UnlinkRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnlinkRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnlinkRequest) ProtoMessage() {} + +func (x *UnlinkRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnlinkRequest.ProtoReflect.Descriptor instead. +func (*UnlinkRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{16} +} + +func (x *UnlinkRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *UnlinkRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *UnlinkRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *UnlinkRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type UnlinkResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnlinkResponse) Reset() { + *x = UnlinkResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnlinkResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnlinkResponse) ProtoMessage() {} + +func (x *UnlinkResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnlinkResponse.ProtoReflect.Descriptor instead. +func (*UnlinkResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{17} +} + +type RmdirRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RmdirRequest) Reset() { + *x = RmdirRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RmdirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RmdirRequest) ProtoMessage() {} + +func (x *RmdirRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RmdirRequest.ProtoReflect.Descriptor instead. +func (*RmdirRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{18} +} + +func (x *RmdirRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RmdirRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *RmdirRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *RmdirRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type RmdirResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RmdirResponse) Reset() { + *x = RmdirResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RmdirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RmdirResponse) ProtoMessage() {} + +func (x *RmdirResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RmdirResponse.ProtoReflect.Descriptor instead. +func (*RmdirResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{19} +} + +type RenameRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + OldParentInodeId uint64 `protobuf:"varint,3,opt,name=old_parent_inode_id,json=oldParentInodeId,proto3" json:"old_parent_inode_id,omitempty"` + OldName string `protobuf:"bytes,4,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewParentInodeId uint64 `protobuf:"varint,5,opt,name=new_parent_inode_id,json=newParentInodeId,proto3" json:"new_parent_inode_id,omitempty"` + NewName string `protobuf:"bytes,6,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RenameRequest) Reset() { + *x = RenameRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RenameRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RenameRequest) ProtoMessage() {} + +func (x *RenameRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RenameRequest.ProtoReflect.Descriptor instead. +func (*RenameRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{20} +} + +func (x *RenameRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RenameRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *RenameRequest) GetOldParentInodeId() uint64 { + if x != nil { + return x.OldParentInodeId + } + return 0 +} + +func (x *RenameRequest) GetOldName() string { + if x != nil { + return x.OldName + } + return "" +} + +func (x *RenameRequest) GetNewParentInodeId() uint64 { + if x != nil { + return x.NewParentInodeId + } + return 0 +} + +func (x *RenameRequest) GetNewName() string { + if x != nil { + return x.NewName + } + return "" +} + +type RenameResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RenameResponse) Reset() { + *x = RenameResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RenameResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RenameResponse) ProtoMessage() {} + +func (x *RenameResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RenameResponse.ProtoReflect.Descriptor instead. +func (*RenameResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{21} +} + +type GetattrRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetattrRequest) Reset() { + *x = GetattrRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetattrRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetattrRequest) ProtoMessage() {} + +func (x *GetattrRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetattrRequest.ProtoReflect.Descriptor instead. +func (*GetattrRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{22} +} + +func (x *GetattrRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetattrRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *GetattrRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +type GetattrResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attr *InodeAttr `protobuf:"bytes,1,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetattrResponse) Reset() { + *x = GetattrResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetattrResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetattrResponse) ProtoMessage() {} + +func (x *GetattrResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetattrResponse.ProtoReflect.Descriptor instead. +func (*GetattrResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{23} +} + +func (x *GetattrResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type SetattrRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,4,opt,name=attr,proto3" json:"attr,omitempty"` + // Bitmask of which fields in attr to apply. + Valid uint32 `protobuf:"varint,5,opt,name=valid,proto3" json:"valid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetattrRequest) Reset() { + *x = SetattrRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetattrRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetattrRequest) ProtoMessage() {} + +func (x *SetattrRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetattrRequest.ProtoReflect.Descriptor instead. +func (*SetattrRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{24} +} + +func (x *SetattrRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SetattrRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *SetattrRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *SetattrRequest) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +func (x *SetattrRequest) GetValid() uint32 { + if x != nil { + return x.Valid + } + return 0 +} + +type SetattrResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attr *InodeAttr `protobuf:"bytes,1,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetattrResponse) Reset() { + *x = SetattrResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetattrResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetattrResponse) ProtoMessage() {} + +func (x *SetattrResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetattrResponse.ProtoReflect.Descriptor instead. +func (*SetattrResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{25} +} + +func (x *SetattrResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type TruncateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + NewSize int64 `protobuf:"varint,4,opt,name=new_size,json=newSize,proto3" json:"new_size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TruncateRequest) Reset() { + *x = TruncateRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TruncateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TruncateRequest) ProtoMessage() {} + +func (x *TruncateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TruncateRequest.ProtoReflect.Descriptor instead. +func (*TruncateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{26} +} + +func (x *TruncateRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TruncateRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *TruncateRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *TruncateRequest) GetNewSize() int64 { + if x != nil { + return x.NewSize + } + return 0 +} + +type TruncateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TruncateResponse) Reset() { + *x = TruncateResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TruncateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TruncateResponse) ProtoMessage() {} + +func (x *TruncateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TruncateResponse.ProtoReflect.Descriptor instead. +func (*TruncateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{27} +} + +type LinkRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + NewParentInodeId uint64 `protobuf:"varint,4,opt,name=new_parent_inode_id,json=newParentInodeId,proto3" json:"new_parent_inode_id,omitempty"` + NewName string `protobuf:"bytes,5,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LinkRequest) Reset() { + *x = LinkRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LinkRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinkRequest) ProtoMessage() {} + +func (x *LinkRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinkRequest.ProtoReflect.Descriptor instead. +func (*LinkRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{28} +} + +func (x *LinkRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *LinkRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *LinkRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *LinkRequest) GetNewParentInodeId() uint64 { + if x != nil { + return x.NewParentInodeId + } + return 0 +} + +func (x *LinkRequest) GetNewName() string { + if x != nil { + return x.NewName + } + return "" +} + +type LinkResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attr *InodeAttr `protobuf:"bytes,1,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LinkResponse) Reset() { + *x = LinkResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LinkResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinkResponse) ProtoMessage() {} + +func (x *LinkResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinkResponse.ProtoReflect.Descriptor instead. +func (*LinkResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{29} +} + +func (x *LinkResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type SymlinkRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Target string `protobuf:"bytes,5,opt,name=target,proto3" json:"target,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SymlinkRequest) Reset() { + *x = SymlinkRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SymlinkRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SymlinkRequest) ProtoMessage() {} + +func (x *SymlinkRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SymlinkRequest.ProtoReflect.Descriptor instead. +func (*SymlinkRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{30} +} + +func (x *SymlinkRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SymlinkRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *SymlinkRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *SymlinkRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SymlinkRequest) GetTarget() string { + if x != nil { + return x.Target + } + return "" +} + +type SymlinkResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SymlinkResponse) Reset() { + *x = SymlinkResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SymlinkResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SymlinkResponse) ProtoMessage() {} + +func (x *SymlinkResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SymlinkResponse.ProtoReflect.Descriptor instead. +func (*SymlinkResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{31} +} + +func (x *SymlinkResponse) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *SymlinkResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type ReadlinkRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + InodeId uint64 `protobuf:"varint,3,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadlinkRequest) Reset() { + *x = ReadlinkRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadlinkRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadlinkRequest) ProtoMessage() {} + +func (x *ReadlinkRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadlinkRequest.ProtoReflect.Descriptor instead. +func (*ReadlinkRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{32} +} + +func (x *ReadlinkRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReadlinkRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *ReadlinkRequest) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +type ReadlinkResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadlinkResponse) Reset() { + *x = ReadlinkResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadlinkResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadlinkResponse) ProtoMessage() {} + +func (x *ReadlinkResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadlinkResponse.ProtoReflect.Descriptor instead. +func (*ReadlinkResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{33} +} + +func (x *ReadlinkResponse) GetTarget() string { + if x != nil { + return x.Target + } + return "" +} + +type CreateFileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFileRequest) Reset() { + *x = CreateFileRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFileRequest) ProtoMessage() {} + +func (x *CreateFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFileRequest.ProtoReflect.Descriptor instead. +func (*CreateFileRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{34} +} + +func (x *CreateFileRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateFileRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *CreateFileRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *CreateFileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateFileRequest) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +func (x *CreateFileRequest) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +type CreateFileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFileResponse) Reset() { + *x = CreateFileResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFileResponse) ProtoMessage() {} + +func (x *CreateFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFileResponse.ProtoReflect.Descriptor instead. +func (*CreateFileResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{35} +} + +func (x *CreateFileResponse) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *CreateFileResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type MknodRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + ParentInodeId uint64 `protobuf:"varint,3,opt,name=parent_inode_id,json=parentInodeId,proto3" json:"parent_inode_id,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Mode uint32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + Dev uint32 `protobuf:"varint,6,opt,name=dev,proto3" json:"dev,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MknodRequest) Reset() { + *x = MknodRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MknodRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MknodRequest) ProtoMessage() {} + +func (x *MknodRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MknodRequest.ProtoReflect.Descriptor instead. +func (*MknodRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{36} +} + +func (x *MknodRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *MknodRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *MknodRequest) GetParentInodeId() uint64 { + if x != nil { + return x.ParentInodeId + } + return 0 +} + +func (x *MknodRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MknodRequest) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +func (x *MknodRequest) GetDev() uint32 { + if x != nil { + return x.Dev + } + return 0 +} + +type MknodResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Attr *InodeAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MknodResponse) Reset() { + *x = MknodResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MknodResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MknodResponse) ProtoMessage() {} + +func (x *MknodResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MknodResponse.ProtoReflect.Descriptor instead. +func (*MknodResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{37} +} + +func (x *MknodResponse) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *MknodResponse) GetAttr() *InodeAttr { + if x != nil { + return x.Attr + } + return nil +} + +type StatfsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatfsRequest) Reset() { + *x = StatfsRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatfsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatfsRequest) ProtoMessage() {} + +func (x *StatfsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatfsRequest.ProtoReflect.Descriptor instead. +func (*StatfsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{38} +} + +func (x *StatfsRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *StatfsRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +type StatfsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Blocks uint64 `protobuf:"varint,1,opt,name=blocks,proto3" json:"blocks,omitempty"` + Bfree uint64 `protobuf:"varint,2,opt,name=bfree,proto3" json:"bfree,omitempty"` + Bavail uint64 `protobuf:"varint,3,opt,name=bavail,proto3" json:"bavail,omitempty"` + Files uint64 `protobuf:"varint,4,opt,name=files,proto3" json:"files,omitempty"` + Ffree uint64 `protobuf:"varint,5,opt,name=ffree,proto3" json:"ffree,omitempty"` + Bsize uint32 `protobuf:"varint,6,opt,name=bsize,proto3" json:"bsize,omitempty"` + Namelen uint32 `protobuf:"varint,7,opt,name=namelen,proto3" json:"namelen,omitempty"` + Frsize uint32 `protobuf:"varint,8,opt,name=frsize,proto3" json:"frsize,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatfsResponse) Reset() { + *x = StatfsResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatfsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatfsResponse) ProtoMessage() {} + +func (x *StatfsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatfsResponse.ProtoReflect.Descriptor instead. +func (*StatfsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{39} +} + +func (x *StatfsResponse) GetBlocks() uint64 { + if x != nil { + return x.Blocks + } + return 0 +} + +func (x *StatfsResponse) GetBfree() uint64 { + if x != nil { + return x.Bfree + } + return 0 +} + +func (x *StatfsResponse) GetBavail() uint64 { + if x != nil { + return x.Bavail + } + return 0 +} + +func (x *StatfsResponse) GetFiles() uint64 { + if x != nil { + return x.Files + } + return 0 +} + +func (x *StatfsResponse) GetFfree() uint64 { + if x != nil { + return x.Ffree + } + return 0 +} + +func (x *StatfsResponse) GetBsize() uint32 { + if x != nil { + return x.Bsize + } + return 0 +} + +func (x *StatfsResponse) GetNamelen() uint32 { + if x != nil { + return x.Namelen + } + return 0 +} + +func (x *StatfsResponse) GetFrsize() uint32 { + if x != nil { + return x.Frsize + } + return 0 +} + +type CreateSnapshotRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + SnapshotName string `protobuf:"bytes,3,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSnapshotRequest) Reset() { + *x = CreateSnapshotRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotRequest) ProtoMessage() {} + +func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{40} +} + +func (x *CreateSnapshotRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateSnapshotRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *CreateSnapshotRequest) GetSnapshotName() string { + if x != nil { + return x.SnapshotName + } + return "" +} + +type CreateSnapshotResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + SnapshotTxnId uint64 `protobuf:"varint,1,opt,name=snapshot_txn_id,json=snapshotTxnId,proto3" json:"snapshot_txn_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSnapshotResponse) Reset() { + *x = CreateSnapshotResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotResponse) ProtoMessage() {} + +func (x *CreateSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnapshotResponse.ProtoReflect.Descriptor instead. +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{41} +} + +func (x *CreateSnapshotResponse) GetSnapshotTxnId() uint64 { + if x != nil { + return x.SnapshotTxnId + } + return 0 +} + +type InodeAttr struct { + state protoimpl.MessageState `protogen:"open.v1"` + InodeId uint64 `protobuf:"varint,1,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + FileSize uint64 `protobuf:"varint,2,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Mode uint32 `protobuf:"varint,3,opt,name=mode,proto3" json:"mode,omitempty"` + Nlink uint32 `protobuf:"varint,4,opt,name=nlink,proto3" json:"nlink,omitempty"` + Uid uint32 `protobuf:"varint,5,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,6,opt,name=gid,proto3" json:"gid,omitempty"` + Atime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=atime,proto3" json:"atime,omitempty"` + Mtime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=mtime,proto3" json:"mtime,omitempty"` + Ctime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=ctime,proto3" json:"ctime,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InodeAttr) Reset() { + *x = InodeAttr{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InodeAttr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InodeAttr) ProtoMessage() {} + +func (x *InodeAttr) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[42] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InodeAttr.ProtoReflect.Descriptor instead. +func (*InodeAttr) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{42} +} + +func (x *InodeAttr) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *InodeAttr) GetFileSize() uint64 { + if x != nil { + return x.FileSize + } + return 0 +} + +func (x *InodeAttr) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +func (x *InodeAttr) GetNlink() uint32 { + if x != nil { + return x.Nlink + } + return 0 +} + +func (x *InodeAttr) GetUid() uint32 { + if x != nil { + return x.Uid + } + return 0 +} + +func (x *InodeAttr) GetGid() uint32 { + if x != nil { + return x.Gid + } + return 0 +} + +func (x *InodeAttr) GetAtime() *timestamppb.Timestamp { + if x != nil { + return x.Atime + } + return nil +} + +func (x *InodeAttr) GetMtime() *timestamppb.Timestamp { + if x != nil { + return x.Mtime + } + return nil +} + +func (x *InodeAttr) GetCtime() *timestamppb.Timestamp { + if x != nil { + return x.Ctime + } + return nil +} + +type DirEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + InodeId uint64 `protobuf:"varint,2,opt,name=inode_id,json=inodeId,proto3" json:"inode_id,omitempty"` + Mode uint32 `protobuf:"varint,3,opt,name=mode,proto3" json:"mode,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DirEntry) Reset() { + *x = DirEntry{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DirEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DirEntry) ProtoMessage() {} + +func (x *DirEntry) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[43] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DirEntry.ProtoReflect.Descriptor instead. +func (*DirEntry) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{43} +} + +func (x *DirEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DirEntry) GetInodeId() uint64 { + if x != nil { + return x.InodeId + } + return 0 +} + +func (x *DirEntry) GetMode() uint32 { + if x != nil { + return x.Mode + } + return 0 +} + +type AttachWorkflowRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttachWorkflowRequest) Reset() { + *x = AttachWorkflowRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttachWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttachWorkflowRequest) ProtoMessage() {} + +func (x *AttachWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[44] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttachWorkflowRequest.ProtoReflect.Descriptor instead. +func (*AttachWorkflowRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{44} +} + +func (x *AttachWorkflowRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *AttachWorkflowRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *AttachWorkflowRequest) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +type AttachWorkflowResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttachWorkflowResponse) Reset() { + *x = AttachWorkflowResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttachWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttachWorkflowResponse) ProtoMessage() {} + +func (x *AttachWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[45] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttachWorkflowResponse.ProtoReflect.Descriptor instead. +func (*AttachWorkflowResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{45} +} + +type DetachWorkflowRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FilesystemId string `protobuf:"bytes,2,opt,name=filesystem_id,json=filesystemId,proto3" json:"filesystem_id,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DetachWorkflowRequest) Reset() { + *x = DetachWorkflowRequest{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DetachWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachWorkflowRequest) ProtoMessage() {} + +func (x *DetachWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[46] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachWorkflowRequest.ProtoReflect.Descriptor instead. +func (*DetachWorkflowRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{46} +} + +func (x *DetachWorkflowRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DetachWorkflowRequest) GetFilesystemId() string { + if x != nil { + return x.FilesystemId + } + return "" +} + +func (x *DetachWorkflowRequest) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +type DetachWorkflowResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DetachWorkflowResponse) Reset() { + *x = DetachWorkflowResponse{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DetachWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachWorkflowResponse) ProtoMessage() {} + +func (x *DetachWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes[47] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachWorkflowResponse.ProtoReflect.Descriptor instead. +func (*DetachWorkflowResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP(), []int{47} +} + +var File_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "Etemporal/server/chasm/lib/temporalzfs/proto/v1/request_response.proto\x12.temporal.server.chasm.lib.temporalzfs.proto.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a:temporal/server/chasm/lib/temporalzfs/proto/v1/state.proto\"\x88\x02\n" + + "\x17CreateFilesystemRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12,\n" + + "\x12owner_workflow_ids\x18\x06 \x03(\tR\x10ownerWorkflowIds\x12X\n" + + "\x06config\x18\x04 \x01(\v2@.temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfigR\x06config\x12\x1d\n" + + "\n" + + "request_id\x18\x05 \x01(\tR\trequestId\"1\n" + + "\x18CreateFilesystemResponse\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\"b\n" + + "\x18GetFilesystemInfoRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\"\x89\x01\n" + + "\x19GetFilesystemInfoResponse\x12U\n" + + "\x05state\x18\x01 \x01(\v2?.temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemStateR\x05state\x12\x15\n" + + "\x06run_id\x18\x02 \x01(\tR\x05runId\"b\n" + + "\x18ArchiveFilesystemRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\"\x1b\n" + + "\x19ArchiveFilesystemResponse\"\x93\x01\n" + + "\rLookupRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\"z\n" + + "\x0eLookupResponse\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x02 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"\xab\x01\n" + + "\x11ReadChunksRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\x12\x16\n" + + "\x06offset\x18\x04 \x01(\x03R\x06offset\x12\x1b\n" + + "\tread_size\x18\x05 \x01(\x03R\breadSize\"(\n" + + "\x12ReadChunksResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"\xa3\x01\n" + + "\x12WriteChunksRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\x12\x16\n" + + "\x06offset\x18\x04 \x01(\x03R\x06offset\x12\x12\n" + + "\x04data\x18\x05 \x01(\fR\x04data\":\n" + + "\x13WriteChunksResponse\x12#\n" + + "\rbytes_written\x18\x01 \x01(\x03R\fbytesWritten\"\xa6\x01\n" + + "\fMkdirRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x12\n" + + "\x04mode\x18\x05 \x01(\rR\x04mode\"y\n" + + "\rMkdirResponse\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x02 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"s\n" + + "\x0eReadDirRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\"e\n" + + "\x0fReadDirResponse\x12R\n" + + "\aentries\x18\x01 \x03(\v28.temporal.server.chasm.lib.temporalzfs.proto.v1.DirEntryR\aentries\"\x93\x01\n" + + "\rUnlinkRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\"\x10\n" + + "\x0eUnlinkResponse\"\x92\x01\n" + + "\fRmdirRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\"\x0f\n" + + "\rRmdirResponse\"\xeb\x01\n" + + "\rRenameRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12-\n" + + "\x13old_parent_inode_id\x18\x03 \x01(\x04R\x10oldParentInodeId\x12\x19\n" + + "\bold_name\x18\x04 \x01(\tR\aoldName\x12-\n" + + "\x13new_parent_inode_id\x18\x05 \x01(\x04R\x10newParentInodeId\x12\x19\n" + + "\bnew_name\x18\x06 \x01(\tR\anewName\"\x10\n" + + "\x0eRenameResponse\"s\n" + + "\x0eGetattrRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\"`\n" + + "\x0fGetattrResponse\x12M\n" + + "\x04attr\x18\x01 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"\xd8\x01\n" + + "\x0eSetattrRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x04 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\x12\x14\n" + + "\x05valid\x18\x05 \x01(\rR\x05valid\"`\n" + + "\x0fSetattrResponse\x12M\n" + + "\x04attr\x18\x01 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"\x8f\x01\n" + + "\x0fTruncateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\x12\x19\n" + + "\bnew_size\x18\x04 \x01(\x03R\anewSize\"\x12\n" + + "\x10TruncateResponse\"\xba\x01\n" + + "\vLinkRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\x12-\n" + + "\x13new_parent_inode_id\x18\x04 \x01(\x04R\x10newParentInodeId\x12\x19\n" + + "\bnew_name\x18\x05 \x01(\tR\anewName\"]\n" + + "\fLinkResponse\x12M\n" + + "\x04attr\x18\x01 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"\xac\x01\n" + + "\x0eSymlinkRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x16\n" + + "\x06target\x18\x05 \x01(\tR\x06target\"{\n" + + "\x0fSymlinkResponse\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x02 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"t\n" + + "\x0fReadlinkRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x19\n" + + "\binode_id\x18\x03 \x01(\x04R\ainodeId\"*\n" + + "\x10ReadlinkResponse\x12\x16\n" + + "\x06target\x18\x01 \x01(\tR\x06target\"\xc1\x01\n" + + "\x11CreateFileRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x12\n" + + "\x04mode\x18\x05 \x01(\rR\x04mode\x12\x14\n" + + "\x05flags\x18\x06 \x01(\rR\x05flags\"~\n" + + "\x12CreateFileResponse\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x02 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"\xb8\x01\n" + + "\fMknodRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12&\n" + + "\x0fparent_inode_id\x18\x03 \x01(\x04R\rparentInodeId\x12\x12\n" + + "\x04name\x18\x04 \x01(\tR\x04name\x12\x12\n" + + "\x04mode\x18\x05 \x01(\rR\x04mode\x12\x10\n" + + "\x03dev\x18\x06 \x01(\rR\x03dev\"y\n" + + "\rMknodResponse\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12M\n" + + "\x04attr\x18\x02 \x01(\v29.temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttrR\x04attr\"W\n" + + "\rStatfsRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\"\xca\x01\n" + + "\x0eStatfsResponse\x12\x16\n" + + "\x06blocks\x18\x01 \x01(\x04R\x06blocks\x12\x14\n" + + "\x05bfree\x18\x02 \x01(\x04R\x05bfree\x12\x16\n" + + "\x06bavail\x18\x03 \x01(\x04R\x06bavail\x12\x14\n" + + "\x05files\x18\x04 \x01(\x04R\x05files\x12\x14\n" + + "\x05ffree\x18\x05 \x01(\x04R\x05ffree\x12\x14\n" + + "\x05bsize\x18\x06 \x01(\rR\x05bsize\x12\x18\n" + + "\anamelen\x18\a \x01(\rR\anamelen\x12\x16\n" + + "\x06frsize\x18\b \x01(\rR\x06frsize\"\x84\x01\n" + + "\x15CreateSnapshotRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12#\n" + + "\rsnapshot_name\x18\x03 \x01(\tR\fsnapshotName\"@\n" + + "\x16CreateSnapshotResponse\x12&\n" + + "\x0fsnapshot_txn_id\x18\x01 \x01(\x04R\rsnapshotTxnId\"\xa7\x02\n" + + "\tInodeAttr\x12\x19\n" + + "\binode_id\x18\x01 \x01(\x04R\ainodeId\x12\x1b\n" + + "\tfile_size\x18\x02 \x01(\x04R\bfileSize\x12\x12\n" + + "\x04mode\x18\x03 \x01(\rR\x04mode\x12\x14\n" + + "\x05nlink\x18\x04 \x01(\rR\x05nlink\x12\x10\n" + + "\x03uid\x18\x05 \x01(\rR\x03uid\x12\x10\n" + + "\x03gid\x18\x06 \x01(\rR\x03gid\x120\n" + + "\x05atime\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x05atime\x120\n" + + "\x05mtime\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x05mtime\x120\n" + + "\x05ctime\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\x05ctime\"M\n" + + "\bDirEntry\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x19\n" + + "\binode_id\x18\x02 \x01(\x04R\ainodeId\x12\x12\n" + + "\x04mode\x18\x03 \x01(\rR\x04mode\"\x80\x01\n" + + "\x15AttachWorkflowRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\"\x18\n" + + "\x16AttachWorkflowResponse\"\x80\x01\n" + + "\x15DetachWorkflowRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12#\n" + + "\rfilesystem_id\x18\x02 \x01(\tR\ffilesystemId\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\"\x18\n" + + "\x16DetachWorkflowResponseBMZKgo.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 48) +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_goTypes = []any{ + (*CreateFilesystemRequest)(nil), // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemRequest + (*CreateFilesystemResponse)(nil), // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemResponse + (*GetFilesystemInfoRequest)(nil), // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoRequest + (*GetFilesystemInfoResponse)(nil), // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoResponse + (*ArchiveFilesystemRequest)(nil), // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemRequest + (*ArchiveFilesystemResponse)(nil), // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemResponse + (*LookupRequest)(nil), // 6: temporal.server.chasm.lib.temporalzfs.proto.v1.LookupRequest + (*LookupResponse)(nil), // 7: temporal.server.chasm.lib.temporalzfs.proto.v1.LookupResponse + (*ReadChunksRequest)(nil), // 8: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksRequest + (*ReadChunksResponse)(nil), // 9: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksResponse + (*WriteChunksRequest)(nil), // 10: temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksRequest + (*WriteChunksResponse)(nil), // 11: temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksResponse + (*MkdirRequest)(nil), // 12: temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirRequest + (*MkdirResponse)(nil), // 13: temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirResponse + (*ReadDirRequest)(nil), // 14: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirRequest + (*ReadDirResponse)(nil), // 15: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirResponse + (*UnlinkRequest)(nil), // 16: temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkRequest + (*UnlinkResponse)(nil), // 17: temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkResponse + (*RmdirRequest)(nil), // 18: temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirRequest + (*RmdirResponse)(nil), // 19: temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirResponse + (*RenameRequest)(nil), // 20: temporal.server.chasm.lib.temporalzfs.proto.v1.RenameRequest + (*RenameResponse)(nil), // 21: temporal.server.chasm.lib.temporalzfs.proto.v1.RenameResponse + (*GetattrRequest)(nil), // 22: temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrRequest + (*GetattrResponse)(nil), // 23: temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrResponse + (*SetattrRequest)(nil), // 24: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrRequest + (*SetattrResponse)(nil), // 25: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrResponse + (*TruncateRequest)(nil), // 26: temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateRequest + (*TruncateResponse)(nil), // 27: temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateResponse + (*LinkRequest)(nil), // 28: temporal.server.chasm.lib.temporalzfs.proto.v1.LinkRequest + (*LinkResponse)(nil), // 29: temporal.server.chasm.lib.temporalzfs.proto.v1.LinkResponse + (*SymlinkRequest)(nil), // 30: temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkRequest + (*SymlinkResponse)(nil), // 31: temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkResponse + (*ReadlinkRequest)(nil), // 32: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkRequest + (*ReadlinkResponse)(nil), // 33: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkResponse + (*CreateFileRequest)(nil), // 34: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileRequest + (*CreateFileResponse)(nil), // 35: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileResponse + (*MknodRequest)(nil), // 36: temporal.server.chasm.lib.temporalzfs.proto.v1.MknodRequest + (*MknodResponse)(nil), // 37: temporal.server.chasm.lib.temporalzfs.proto.v1.MknodResponse + (*StatfsRequest)(nil), // 38: temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsRequest + (*StatfsResponse)(nil), // 39: temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsResponse + (*CreateSnapshotRequest)(nil), // 40: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotRequest + (*CreateSnapshotResponse)(nil), // 41: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotResponse + (*InodeAttr)(nil), // 42: temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + (*DirEntry)(nil), // 43: temporal.server.chasm.lib.temporalzfs.proto.v1.DirEntry + (*AttachWorkflowRequest)(nil), // 44: temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowRequest + (*AttachWorkflowResponse)(nil), // 45: temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowResponse + (*DetachWorkflowRequest)(nil), // 46: temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowRequest + (*DetachWorkflowResponse)(nil), // 47: temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowResponse + (*FilesystemConfig)(nil), // 48: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig + (*FilesystemState)(nil), // 49: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp +} +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_depIdxs = []int32{ + 48, // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemRequest.config:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig + 49, // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoResponse.state:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState + 42, // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.LookupResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 43, // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirResponse.entries:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.DirEntry + 42, // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 6: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrRequest.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 7: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 8: temporal.server.chasm.lib.temporalzfs.proto.v1.LinkResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 9: temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 10: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 42, // 11: temporal.server.chasm.lib.temporalzfs.proto.v1.MknodResponse.attr:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr + 50, // 12: temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr.atime:type_name -> google.protobuf.Timestamp + 50, // 13: temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr.mtime:type_name -> google.protobuf.Timestamp + 50, // 14: temporal.server.chasm.lib.temporalzfs.proto.v1.InodeAttr.ctime:type_name -> google.protobuf.Timestamp + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto != nil { + return + } + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 48, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service.pb.go new file mode 100644 index 0000000000..a840836dbc --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/temporalzfs/proto/v1/service.proto + +package temporalzfspb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/common/v1" + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_rawDesc = "" + + "\n" + + ".temporal.server.chasm.lib.temporalzfs.proto.v1.LookupResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa5\x01\n" + + "\aGetattr\x12>.temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrRequest\x1a?.temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa5\x01\n" + + "\aSetattr\x12>.temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrRequest\x1a?.temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xae\x01\n" + + "\n" + + "ReadChunks\x12A.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksRequest\x1aB.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xb1\x01\n" + + "\vWriteChunks\x12B.temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksRequest\x1aC.temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa8\x01\n" + + "\bTruncate\x12?.temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateRequest\x1a@.temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\x9f\x01\n" + + "\x05Mkdir\x12<.temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirRequest\x1a=.temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa2\x01\n" + + "\x06Unlink\x12=.temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkRequest\x1a>.temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\x9f\x01\n" + + "\x05Rmdir\x12<.temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirRequest\x1a=.temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa2\x01\n" + + "\x06Rename\x12=.temporal.server.chasm.lib.temporalzfs.proto.v1.RenameRequest\x1a>.temporal.server.chasm.lib.temporalzfs.proto.v1.RenameResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa5\x01\n" + + "\aReadDir\x12>.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirRequest\x1a?.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\x9c\x01\n" + + "\x04Link\x12;.temporal.server.chasm.lib.temporalzfs.proto.v1.LinkRequest\x1a<.temporal.server.chasm.lib.temporalzfs.proto.v1.LinkResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa5\x01\n" + + "\aSymlink\x12>.temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkRequest\x1a?.temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa8\x01\n" + + "\bReadlink\x12?.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkRequest\x1a@.temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xae\x01\n" + + "\n" + + "CreateFile\x12A.temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileRequest\x1aB.temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\x9f\x01\n" + + "\x05Mknod\x12<.temporal.server.chasm.lib.temporalzfs.proto.v1.MknodRequest\x1a=.temporal.server.chasm.lib.temporalzfs.proto.v1.MknodResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xa2\x01\n" + + "\x06Statfs\x12=.temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsRequest\x1a>.temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xba\x01\n" + + "\x0eCreateSnapshot\x12E.temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotRequest\x1aF.temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xba\x01\n" + + "\x0eAttachWorkflow\x12E.temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowRequest\x1aF.temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01\x12\xba\x01\n" + + "\x0eDetachWorkflow\x12E.temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowRequest\x1aF.temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowResponse\"\x19\x92\xc4\x03\x0f\x1a\rfilesystem_id\x8a\xb5\x18\x02\b\x01BMZKgo.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspbb\x06proto3" + +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_goTypes = []any{ + (*CreateFilesystemRequest)(nil), // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemRequest + (*GetFilesystemInfoRequest)(nil), // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoRequest + (*ArchiveFilesystemRequest)(nil), // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemRequest + (*LookupRequest)(nil), // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.LookupRequest + (*GetattrRequest)(nil), // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrRequest + (*SetattrRequest)(nil), // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrRequest + (*ReadChunksRequest)(nil), // 6: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksRequest + (*WriteChunksRequest)(nil), // 7: temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksRequest + (*TruncateRequest)(nil), // 8: temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateRequest + (*MkdirRequest)(nil), // 9: temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirRequest + (*UnlinkRequest)(nil), // 10: temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkRequest + (*RmdirRequest)(nil), // 11: temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirRequest + (*RenameRequest)(nil), // 12: temporal.server.chasm.lib.temporalzfs.proto.v1.RenameRequest + (*ReadDirRequest)(nil), // 13: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirRequest + (*LinkRequest)(nil), // 14: temporal.server.chasm.lib.temporalzfs.proto.v1.LinkRequest + (*SymlinkRequest)(nil), // 15: temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkRequest + (*ReadlinkRequest)(nil), // 16: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkRequest + (*CreateFileRequest)(nil), // 17: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileRequest + (*MknodRequest)(nil), // 18: temporal.server.chasm.lib.temporalzfs.proto.v1.MknodRequest + (*StatfsRequest)(nil), // 19: temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsRequest + (*CreateSnapshotRequest)(nil), // 20: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotRequest + (*AttachWorkflowRequest)(nil), // 21: temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowRequest + (*DetachWorkflowRequest)(nil), // 22: temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowRequest + (*CreateFilesystemResponse)(nil), // 23: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemResponse + (*GetFilesystemInfoResponse)(nil), // 24: temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoResponse + (*ArchiveFilesystemResponse)(nil), // 25: temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemResponse + (*LookupResponse)(nil), // 26: temporal.server.chasm.lib.temporalzfs.proto.v1.LookupResponse + (*GetattrResponse)(nil), // 27: temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrResponse + (*SetattrResponse)(nil), // 28: temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrResponse + (*ReadChunksResponse)(nil), // 29: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksResponse + (*WriteChunksResponse)(nil), // 30: temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksResponse + (*TruncateResponse)(nil), // 31: temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateResponse + (*MkdirResponse)(nil), // 32: temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirResponse + (*UnlinkResponse)(nil), // 33: temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkResponse + (*RmdirResponse)(nil), // 34: temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirResponse + (*RenameResponse)(nil), // 35: temporal.server.chasm.lib.temporalzfs.proto.v1.RenameResponse + (*ReadDirResponse)(nil), // 36: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirResponse + (*LinkResponse)(nil), // 37: temporal.server.chasm.lib.temporalzfs.proto.v1.LinkResponse + (*SymlinkResponse)(nil), // 38: temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkResponse + (*ReadlinkResponse)(nil), // 39: temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkResponse + (*CreateFileResponse)(nil), // 40: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileResponse + (*MknodResponse)(nil), // 41: temporal.server.chasm.lib.temporalzfs.proto.v1.MknodResponse + (*StatfsResponse)(nil), // 42: temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsResponse + (*CreateSnapshotResponse)(nil), // 43: temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotResponse + (*AttachWorkflowResponse)(nil), // 44: temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowResponse + (*DetachWorkflowResponse)(nil), // 45: temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowResponse +} +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateFilesystem:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemRequest + 1, // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.GetFilesystemInfo:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoRequest + 2, // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ArchiveFilesystem:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemRequest + 3, // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Lookup:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.LookupRequest + 4, // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Getattr:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrRequest + 5, // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Setattr:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrRequest + 6, // 6: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ReadChunks:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksRequest + 7, // 7: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.WriteChunks:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksRequest + 8, // 8: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Truncate:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateRequest + 9, // 9: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Mkdir:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirRequest + 10, // 10: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Unlink:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkRequest + 11, // 11: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Rmdir:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirRequest + 12, // 12: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Rename:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.RenameRequest + 13, // 13: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ReadDir:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirRequest + 14, // 14: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Link:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.LinkRequest + 15, // 15: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Symlink:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkRequest + 16, // 16: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Readlink:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkRequest + 17, // 17: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateFile:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileRequest + 18, // 18: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Mknod:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.MknodRequest + 19, // 19: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Statfs:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsRequest + 20, // 20: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateSnapshot:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotRequest + 21, // 21: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.AttachWorkflow:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowRequest + 22, // 22: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.DetachWorkflow:input_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowRequest + 23, // 23: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateFilesystem:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFilesystemResponse + 24, // 24: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.GetFilesystemInfo:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.GetFilesystemInfoResponse + 25, // 25: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ArchiveFilesystem:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ArchiveFilesystemResponse + 26, // 26: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Lookup:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.LookupResponse + 27, // 27: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Getattr:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.GetattrResponse + 28, // 28: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Setattr:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.SetattrResponse + 29, // 29: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ReadChunks:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadChunksResponse + 30, // 30: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.WriteChunks:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.WriteChunksResponse + 31, // 31: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Truncate:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.TruncateResponse + 32, // 32: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Mkdir:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.MkdirResponse + 33, // 33: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Unlink:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.UnlinkResponse + 34, // 34: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Rmdir:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.RmdirResponse + 35, // 35: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Rename:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.RenameResponse + 36, // 36: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.ReadDir:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadDirResponse + 37, // 37: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Link:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.LinkResponse + 38, // 38: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Symlink:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.SymlinkResponse + 39, // 39: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Readlink:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.ReadlinkResponse + 40, // 40: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateFile:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateFileResponse + 41, // 41: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Mknod:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.MknodResponse + 42, // 42: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.Statfs:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.StatfsResponse + 43, // 43: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.CreateSnapshot:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.CreateSnapshotResponse + 44, // 44: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.AttachWorkflow:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.AttachWorkflowResponse + 45, // 45: temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService.DetachWorkflow:output_type -> temporal.server.chasm.lib.temporalzfs.proto.v1.DetachWorkflowResponse + 23, // [23:46] is the sub-list for method output_type + 0, // [0:23] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_temporalzfs_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_temporalzfs_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_client.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_client.pb.go new file mode 100644 index 0000000000..6391a30d69 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_client.pb.go @@ -0,0 +1,1049 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package temporalzfspb + +import ( + "context" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// TemporalFSServiceLayeredClient is a client for TemporalFSService. +type TemporalFSServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[TemporalFSServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewTemporalFSServiceLayeredClient initializes a new TemporalFSServiceLayeredClient. +func NewTemporalFSServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (TemporalFSServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewTemporalFSServiceClient) + var redirector history.Redirector[TemporalFSServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &TemporalFSServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *TemporalFSServiceLayeredClient) callCreateFilesystemNoRetry( + ctx context.Context, + request *CreateFilesystemRequest, + opts ...grpc.CallOption, +) (*CreateFilesystemResponse, error) { + var response *CreateFilesystemResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.CreateFilesystem"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateFilesystem(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) CreateFilesystem( + ctx context.Context, + request *CreateFilesystemRequest, + opts ...grpc.CallOption, +) (*CreateFilesystemResponse, error) { + call := func(ctx context.Context) (*CreateFilesystemResponse, error) { + return c.callCreateFilesystemNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callGetFilesystemInfoNoRetry( + ctx context.Context, + request *GetFilesystemInfoRequest, + opts ...grpc.CallOption, +) (*GetFilesystemInfoResponse, error) { + var response *GetFilesystemInfoResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.GetFilesystemInfo"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.GetFilesystemInfo(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) GetFilesystemInfo( + ctx context.Context, + request *GetFilesystemInfoRequest, + opts ...grpc.CallOption, +) (*GetFilesystemInfoResponse, error) { + call := func(ctx context.Context) (*GetFilesystemInfoResponse, error) { + return c.callGetFilesystemInfoNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callArchiveFilesystemNoRetry( + ctx context.Context, + request *ArchiveFilesystemRequest, + opts ...grpc.CallOption, +) (*ArchiveFilesystemResponse, error) { + var response *ArchiveFilesystemResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.ArchiveFilesystem"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.ArchiveFilesystem(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) ArchiveFilesystem( + ctx context.Context, + request *ArchiveFilesystemRequest, + opts ...grpc.CallOption, +) (*ArchiveFilesystemResponse, error) { + call := func(ctx context.Context) (*ArchiveFilesystemResponse, error) { + return c.callArchiveFilesystemNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callLookupNoRetry( + ctx context.Context, + request *LookupRequest, + opts ...grpc.CallOption, +) (*LookupResponse, error) { + var response *LookupResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Lookup"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Lookup(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Lookup( + ctx context.Context, + request *LookupRequest, + opts ...grpc.CallOption, +) (*LookupResponse, error) { + call := func(ctx context.Context) (*LookupResponse, error) { + return c.callLookupNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callGetattrNoRetry( + ctx context.Context, + request *GetattrRequest, + opts ...grpc.CallOption, +) (*GetattrResponse, error) { + var response *GetattrResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Getattr"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Getattr(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Getattr( + ctx context.Context, + request *GetattrRequest, + opts ...grpc.CallOption, +) (*GetattrResponse, error) { + call := func(ctx context.Context) (*GetattrResponse, error) { + return c.callGetattrNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callSetattrNoRetry( + ctx context.Context, + request *SetattrRequest, + opts ...grpc.CallOption, +) (*SetattrResponse, error) { + var response *SetattrResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Setattr"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Setattr(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Setattr( + ctx context.Context, + request *SetattrRequest, + opts ...grpc.CallOption, +) (*SetattrResponse, error) { + call := func(ctx context.Context) (*SetattrResponse, error) { + return c.callSetattrNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callReadChunksNoRetry( + ctx context.Context, + request *ReadChunksRequest, + opts ...grpc.CallOption, +) (*ReadChunksResponse, error) { + var response *ReadChunksResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.ReadChunks"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.ReadChunks(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) ReadChunks( + ctx context.Context, + request *ReadChunksRequest, + opts ...grpc.CallOption, +) (*ReadChunksResponse, error) { + call := func(ctx context.Context) (*ReadChunksResponse, error) { + return c.callReadChunksNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callWriteChunksNoRetry( + ctx context.Context, + request *WriteChunksRequest, + opts ...grpc.CallOption, +) (*WriteChunksResponse, error) { + var response *WriteChunksResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.WriteChunks"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.WriteChunks(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) WriteChunks( + ctx context.Context, + request *WriteChunksRequest, + opts ...grpc.CallOption, +) (*WriteChunksResponse, error) { + call := func(ctx context.Context) (*WriteChunksResponse, error) { + return c.callWriteChunksNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callTruncateNoRetry( + ctx context.Context, + request *TruncateRequest, + opts ...grpc.CallOption, +) (*TruncateResponse, error) { + var response *TruncateResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Truncate"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Truncate(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Truncate( + ctx context.Context, + request *TruncateRequest, + opts ...grpc.CallOption, +) (*TruncateResponse, error) { + call := func(ctx context.Context) (*TruncateResponse, error) { + return c.callTruncateNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callMkdirNoRetry( + ctx context.Context, + request *MkdirRequest, + opts ...grpc.CallOption, +) (*MkdirResponse, error) { + var response *MkdirResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Mkdir"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Mkdir(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Mkdir( + ctx context.Context, + request *MkdirRequest, + opts ...grpc.CallOption, +) (*MkdirResponse, error) { + call := func(ctx context.Context) (*MkdirResponse, error) { + return c.callMkdirNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callUnlinkNoRetry( + ctx context.Context, + request *UnlinkRequest, + opts ...grpc.CallOption, +) (*UnlinkResponse, error) { + var response *UnlinkResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Unlink"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Unlink(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Unlink( + ctx context.Context, + request *UnlinkRequest, + opts ...grpc.CallOption, +) (*UnlinkResponse, error) { + call := func(ctx context.Context) (*UnlinkResponse, error) { + return c.callUnlinkNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callRmdirNoRetry( + ctx context.Context, + request *RmdirRequest, + opts ...grpc.CallOption, +) (*RmdirResponse, error) { + var response *RmdirResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Rmdir"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Rmdir(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Rmdir( + ctx context.Context, + request *RmdirRequest, + opts ...grpc.CallOption, +) (*RmdirResponse, error) { + call := func(ctx context.Context) (*RmdirResponse, error) { + return c.callRmdirNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callRenameNoRetry( + ctx context.Context, + request *RenameRequest, + opts ...grpc.CallOption, +) (*RenameResponse, error) { + var response *RenameResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Rename"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Rename(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Rename( + ctx context.Context, + request *RenameRequest, + opts ...grpc.CallOption, +) (*RenameResponse, error) { + call := func(ctx context.Context) (*RenameResponse, error) { + return c.callRenameNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callReadDirNoRetry( + ctx context.Context, + request *ReadDirRequest, + opts ...grpc.CallOption, +) (*ReadDirResponse, error) { + var response *ReadDirResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.ReadDir"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.ReadDir(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) ReadDir( + ctx context.Context, + request *ReadDirRequest, + opts ...grpc.CallOption, +) (*ReadDirResponse, error) { + call := func(ctx context.Context) (*ReadDirResponse, error) { + return c.callReadDirNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callLinkNoRetry( + ctx context.Context, + request *LinkRequest, + opts ...grpc.CallOption, +) (*LinkResponse, error) { + var response *LinkResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Link"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Link(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Link( + ctx context.Context, + request *LinkRequest, + opts ...grpc.CallOption, +) (*LinkResponse, error) { + call := func(ctx context.Context) (*LinkResponse, error) { + return c.callLinkNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callSymlinkNoRetry( + ctx context.Context, + request *SymlinkRequest, + opts ...grpc.CallOption, +) (*SymlinkResponse, error) { + var response *SymlinkResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Symlink"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Symlink(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Symlink( + ctx context.Context, + request *SymlinkRequest, + opts ...grpc.CallOption, +) (*SymlinkResponse, error) { + call := func(ctx context.Context) (*SymlinkResponse, error) { + return c.callSymlinkNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callReadlinkNoRetry( + ctx context.Context, + request *ReadlinkRequest, + opts ...grpc.CallOption, +) (*ReadlinkResponse, error) { + var response *ReadlinkResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Readlink"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Readlink(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Readlink( + ctx context.Context, + request *ReadlinkRequest, + opts ...grpc.CallOption, +) (*ReadlinkResponse, error) { + call := func(ctx context.Context) (*ReadlinkResponse, error) { + return c.callReadlinkNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callCreateFileNoRetry( + ctx context.Context, + request *CreateFileRequest, + opts ...grpc.CallOption, +) (*CreateFileResponse, error) { + var response *CreateFileResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.CreateFile"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateFile(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) CreateFile( + ctx context.Context, + request *CreateFileRequest, + opts ...grpc.CallOption, +) (*CreateFileResponse, error) { + call := func(ctx context.Context) (*CreateFileResponse, error) { + return c.callCreateFileNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callMknodNoRetry( + ctx context.Context, + request *MknodRequest, + opts ...grpc.CallOption, +) (*MknodResponse, error) { + var response *MknodResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Mknod"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Mknod(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Mknod( + ctx context.Context, + request *MknodRequest, + opts ...grpc.CallOption, +) (*MknodResponse, error) { + call := func(ctx context.Context) (*MknodResponse, error) { + return c.callMknodNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callStatfsNoRetry( + ctx context.Context, + request *StatfsRequest, + opts ...grpc.CallOption, +) (*StatfsResponse, error) { + var response *StatfsResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.Statfs"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Statfs(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) Statfs( + ctx context.Context, + request *StatfsRequest, + opts ...grpc.CallOption, +) (*StatfsResponse, error) { + call := func(ctx context.Context) (*StatfsResponse, error) { + return c.callStatfsNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callCreateSnapshotNoRetry( + ctx context.Context, + request *CreateSnapshotRequest, + opts ...grpc.CallOption, +) (*CreateSnapshotResponse, error) { + var response *CreateSnapshotResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.CreateSnapshot"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateSnapshot(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) CreateSnapshot( + ctx context.Context, + request *CreateSnapshotRequest, + opts ...grpc.CallOption, +) (*CreateSnapshotResponse, error) { + call := func(ctx context.Context) (*CreateSnapshotResponse, error) { + return c.callCreateSnapshotNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callAttachWorkflowNoRetry( + ctx context.Context, + request *AttachWorkflowRequest, + opts ...grpc.CallOption, +) (*AttachWorkflowResponse, error) { + var response *AttachWorkflowResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.AttachWorkflow"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.AttachWorkflow(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) AttachWorkflow( + ctx context.Context, + request *AttachWorkflowRequest, + opts ...grpc.CallOption, +) (*AttachWorkflowResponse, error) { + call := func(ctx context.Context) (*AttachWorkflowResponse, error) { + return c.callAttachWorkflowNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *TemporalFSServiceLayeredClient) callDetachWorkflowNoRetry( + ctx context.Context, + request *DetachWorkflowRequest, + opts ...grpc.CallOption, +) (*DetachWorkflowResponse, error) { + var response *DetachWorkflowResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TemporalFSService.DetachWorkflow"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFilesystemId(), c.numShards) + op := func(ctx context.Context, client TemporalFSServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DetachWorkflow(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TemporalFSServiceLayeredClient) DetachWorkflow( + ctx context.Context, + request *DetachWorkflowRequest, + opts ...grpc.CallOption, +) (*DetachWorkflowResponse, error) { + call := func(ctx context.Context) (*DetachWorkflowResponse, error) { + return c.callDetachWorkflowNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_grpc.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_grpc.pb.go new file mode 100644 index 0000000000..f46e1bfe43 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/service_grpc.pb.go @@ -0,0 +1,940 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/temporalzfs/proto/v1/service.proto + +package temporalzfspb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + TemporalFSService_CreateFilesystem_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/CreateFilesystem" + TemporalFSService_GetFilesystemInfo_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/GetFilesystemInfo" + TemporalFSService_ArchiveFilesystem_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/ArchiveFilesystem" + TemporalFSService_Lookup_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Lookup" + TemporalFSService_Getattr_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Getattr" + TemporalFSService_Setattr_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Setattr" + TemporalFSService_ReadChunks_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/ReadChunks" + TemporalFSService_WriteChunks_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/WriteChunks" + TemporalFSService_Truncate_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Truncate" + TemporalFSService_Mkdir_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Mkdir" + TemporalFSService_Unlink_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Unlink" + TemporalFSService_Rmdir_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Rmdir" + TemporalFSService_Rename_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Rename" + TemporalFSService_ReadDir_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/ReadDir" + TemporalFSService_Link_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Link" + TemporalFSService_Symlink_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Symlink" + TemporalFSService_Readlink_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Readlink" + TemporalFSService_CreateFile_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/CreateFile" + TemporalFSService_Mknod_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Mknod" + TemporalFSService_Statfs_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/Statfs" + TemporalFSService_CreateSnapshot_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/CreateSnapshot" + TemporalFSService_AttachWorkflow_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/AttachWorkflow" + TemporalFSService_DetachWorkflow_FullMethodName = "/temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService/DetachWorkflow" +) + +// TemporalFSServiceClient is the client API for TemporalFSService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TemporalFSServiceClient interface { + // Lifecycle + CreateFilesystem(ctx context.Context, in *CreateFilesystemRequest, opts ...grpc.CallOption) (*CreateFilesystemResponse, error) + GetFilesystemInfo(ctx context.Context, in *GetFilesystemInfoRequest, opts ...grpc.CallOption) (*GetFilesystemInfoResponse, error) + ArchiveFilesystem(ctx context.Context, in *ArchiveFilesystemRequest, opts ...grpc.CallOption) (*ArchiveFilesystemResponse, error) + // Inode operations + Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) + Getattr(ctx context.Context, in *GetattrRequest, opts ...grpc.CallOption) (*GetattrResponse, error) + Setattr(ctx context.Context, in *SetattrRequest, opts ...grpc.CallOption) (*SetattrResponse, error) + // File I/O + ReadChunks(ctx context.Context, in *ReadChunksRequest, opts ...grpc.CallOption) (*ReadChunksResponse, error) + WriteChunks(ctx context.Context, in *WriteChunksRequest, opts ...grpc.CallOption) (*WriteChunksResponse, error) + Truncate(ctx context.Context, in *TruncateRequest, opts ...grpc.CallOption) (*TruncateResponse, error) + // Directory operations + Mkdir(ctx context.Context, in *MkdirRequest, opts ...grpc.CallOption) (*MkdirResponse, error) + Unlink(ctx context.Context, in *UnlinkRequest, opts ...grpc.CallOption) (*UnlinkResponse, error) + Rmdir(ctx context.Context, in *RmdirRequest, opts ...grpc.CallOption) (*RmdirResponse, error) + Rename(ctx context.Context, in *RenameRequest, opts ...grpc.CallOption) (*RenameResponse, error) + ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) + // Links + Link(ctx context.Context, in *LinkRequest, opts ...grpc.CallOption) (*LinkResponse, error) + Symlink(ctx context.Context, in *SymlinkRequest, opts ...grpc.CallOption) (*SymlinkResponse, error) + Readlink(ctx context.Context, in *ReadlinkRequest, opts ...grpc.CallOption) (*ReadlinkResponse, error) + // Special + CreateFile(ctx context.Context, in *CreateFileRequest, opts ...grpc.CallOption) (*CreateFileResponse, error) + Mknod(ctx context.Context, in *MknodRequest, opts ...grpc.CallOption) (*MknodResponse, error) + Statfs(ctx context.Context, in *StatfsRequest, opts ...grpc.CallOption) (*StatfsResponse, error) + // Snapshots + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + // Owner management + AttachWorkflow(ctx context.Context, in *AttachWorkflowRequest, opts ...grpc.CallOption) (*AttachWorkflowResponse, error) + DetachWorkflow(ctx context.Context, in *DetachWorkflowRequest, opts ...grpc.CallOption) (*DetachWorkflowResponse, error) +} + +type temporalFSServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTemporalFSServiceClient(cc grpc.ClientConnInterface) TemporalFSServiceClient { + return &temporalFSServiceClient{cc} +} + +func (c *temporalFSServiceClient) CreateFilesystem(ctx context.Context, in *CreateFilesystemRequest, opts ...grpc.CallOption) (*CreateFilesystemResponse, error) { + out := new(CreateFilesystemResponse) + err := c.cc.Invoke(ctx, TemporalFSService_CreateFilesystem_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) GetFilesystemInfo(ctx context.Context, in *GetFilesystemInfoRequest, opts ...grpc.CallOption) (*GetFilesystemInfoResponse, error) { + out := new(GetFilesystemInfoResponse) + err := c.cc.Invoke(ctx, TemporalFSService_GetFilesystemInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) ArchiveFilesystem(ctx context.Context, in *ArchiveFilesystemRequest, opts ...grpc.CallOption) (*ArchiveFilesystemResponse, error) { + out := new(ArchiveFilesystemResponse) + err := c.cc.Invoke(ctx, TemporalFSService_ArchiveFilesystem_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Lookup(ctx context.Context, in *LookupRequest, opts ...grpc.CallOption) (*LookupResponse, error) { + out := new(LookupResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Lookup_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Getattr(ctx context.Context, in *GetattrRequest, opts ...grpc.CallOption) (*GetattrResponse, error) { + out := new(GetattrResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Getattr_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Setattr(ctx context.Context, in *SetattrRequest, opts ...grpc.CallOption) (*SetattrResponse, error) { + out := new(SetattrResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Setattr_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) ReadChunks(ctx context.Context, in *ReadChunksRequest, opts ...grpc.CallOption) (*ReadChunksResponse, error) { + out := new(ReadChunksResponse) + err := c.cc.Invoke(ctx, TemporalFSService_ReadChunks_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) WriteChunks(ctx context.Context, in *WriteChunksRequest, opts ...grpc.CallOption) (*WriteChunksResponse, error) { + out := new(WriteChunksResponse) + err := c.cc.Invoke(ctx, TemporalFSService_WriteChunks_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Truncate(ctx context.Context, in *TruncateRequest, opts ...grpc.CallOption) (*TruncateResponse, error) { + out := new(TruncateResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Truncate_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Mkdir(ctx context.Context, in *MkdirRequest, opts ...grpc.CallOption) (*MkdirResponse, error) { + out := new(MkdirResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Mkdir_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Unlink(ctx context.Context, in *UnlinkRequest, opts ...grpc.CallOption) (*UnlinkResponse, error) { + out := new(UnlinkResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Unlink_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Rmdir(ctx context.Context, in *RmdirRequest, opts ...grpc.CallOption) (*RmdirResponse, error) { + out := new(RmdirResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Rmdir_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Rename(ctx context.Context, in *RenameRequest, opts ...grpc.CallOption) (*RenameResponse, error) { + out := new(RenameResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Rename_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { + out := new(ReadDirResponse) + err := c.cc.Invoke(ctx, TemporalFSService_ReadDir_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Link(ctx context.Context, in *LinkRequest, opts ...grpc.CallOption) (*LinkResponse, error) { + out := new(LinkResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Link_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Symlink(ctx context.Context, in *SymlinkRequest, opts ...grpc.CallOption) (*SymlinkResponse, error) { + out := new(SymlinkResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Symlink_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Readlink(ctx context.Context, in *ReadlinkRequest, opts ...grpc.CallOption) (*ReadlinkResponse, error) { + out := new(ReadlinkResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Readlink_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) CreateFile(ctx context.Context, in *CreateFileRequest, opts ...grpc.CallOption) (*CreateFileResponse, error) { + out := new(CreateFileResponse) + err := c.cc.Invoke(ctx, TemporalFSService_CreateFile_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Mknod(ctx context.Context, in *MknodRequest, opts ...grpc.CallOption) (*MknodResponse, error) { + out := new(MknodResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Mknod_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) Statfs(ctx context.Context, in *StatfsRequest, opts ...grpc.CallOption) (*StatfsResponse, error) { + out := new(StatfsResponse) + err := c.cc.Invoke(ctx, TemporalFSService_Statfs_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := c.cc.Invoke(ctx, TemporalFSService_CreateSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) AttachWorkflow(ctx context.Context, in *AttachWorkflowRequest, opts ...grpc.CallOption) (*AttachWorkflowResponse, error) { + out := new(AttachWorkflowResponse) + err := c.cc.Invoke(ctx, TemporalFSService_AttachWorkflow_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *temporalFSServiceClient) DetachWorkflow(ctx context.Context, in *DetachWorkflowRequest, opts ...grpc.CallOption) (*DetachWorkflowResponse, error) { + out := new(DetachWorkflowResponse) + err := c.cc.Invoke(ctx, TemporalFSService_DetachWorkflow_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TemporalFSServiceServer is the server API for TemporalFSService service. +// All implementations must embed UnimplementedTemporalFSServiceServer +// for forward compatibility +type TemporalFSServiceServer interface { + // Lifecycle + CreateFilesystem(context.Context, *CreateFilesystemRequest) (*CreateFilesystemResponse, error) + GetFilesystemInfo(context.Context, *GetFilesystemInfoRequest) (*GetFilesystemInfoResponse, error) + ArchiveFilesystem(context.Context, *ArchiveFilesystemRequest) (*ArchiveFilesystemResponse, error) + // Inode operations + Lookup(context.Context, *LookupRequest) (*LookupResponse, error) + Getattr(context.Context, *GetattrRequest) (*GetattrResponse, error) + Setattr(context.Context, *SetattrRequest) (*SetattrResponse, error) + // File I/O + ReadChunks(context.Context, *ReadChunksRequest) (*ReadChunksResponse, error) + WriteChunks(context.Context, *WriteChunksRequest) (*WriteChunksResponse, error) + Truncate(context.Context, *TruncateRequest) (*TruncateResponse, error) + // Directory operations + Mkdir(context.Context, *MkdirRequest) (*MkdirResponse, error) + Unlink(context.Context, *UnlinkRequest) (*UnlinkResponse, error) + Rmdir(context.Context, *RmdirRequest) (*RmdirResponse, error) + Rename(context.Context, *RenameRequest) (*RenameResponse, error) + ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) + // Links + Link(context.Context, *LinkRequest) (*LinkResponse, error) + Symlink(context.Context, *SymlinkRequest) (*SymlinkResponse, error) + Readlink(context.Context, *ReadlinkRequest) (*ReadlinkResponse, error) + // Special + CreateFile(context.Context, *CreateFileRequest) (*CreateFileResponse, error) + Mknod(context.Context, *MknodRequest) (*MknodResponse, error) + Statfs(context.Context, *StatfsRequest) (*StatfsResponse, error) + // Snapshots + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + // Owner management + AttachWorkflow(context.Context, *AttachWorkflowRequest) (*AttachWorkflowResponse, error) + DetachWorkflow(context.Context, *DetachWorkflowRequest) (*DetachWorkflowResponse, error) + mustEmbedUnimplementedTemporalFSServiceServer() +} + +// UnimplementedTemporalFSServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTemporalFSServiceServer struct { +} + +func (UnimplementedTemporalFSServiceServer) CreateFilesystem(context.Context, *CreateFilesystemRequest) (*CreateFilesystemResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateFilesystem not implemented") +} +func (UnimplementedTemporalFSServiceServer) GetFilesystemInfo(context.Context, *GetFilesystemInfoRequest) (*GetFilesystemInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFilesystemInfo not implemented") +} +func (UnimplementedTemporalFSServiceServer) ArchiveFilesystem(context.Context, *ArchiveFilesystemRequest) (*ArchiveFilesystemResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ArchiveFilesystem not implemented") +} +func (UnimplementedTemporalFSServiceServer) Lookup(context.Context, *LookupRequest) (*LookupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lookup not implemented") +} +func (UnimplementedTemporalFSServiceServer) Getattr(context.Context, *GetattrRequest) (*GetattrResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Getattr not implemented") +} +func (UnimplementedTemporalFSServiceServer) Setattr(context.Context, *SetattrRequest) (*SetattrResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Setattr not implemented") +} +func (UnimplementedTemporalFSServiceServer) ReadChunks(context.Context, *ReadChunksRequest) (*ReadChunksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadChunks not implemented") +} +func (UnimplementedTemporalFSServiceServer) WriteChunks(context.Context, *WriteChunksRequest) (*WriteChunksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteChunks not implemented") +} +func (UnimplementedTemporalFSServiceServer) Truncate(context.Context, *TruncateRequest) (*TruncateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Truncate not implemented") +} +func (UnimplementedTemporalFSServiceServer) Mkdir(context.Context, *MkdirRequest) (*MkdirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mkdir not implemented") +} +func (UnimplementedTemporalFSServiceServer) Unlink(context.Context, *UnlinkRequest) (*UnlinkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Unlink not implemented") +} +func (UnimplementedTemporalFSServiceServer) Rmdir(context.Context, *RmdirRequest) (*RmdirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Rmdir not implemented") +} +func (UnimplementedTemporalFSServiceServer) Rename(context.Context, *RenameRequest) (*RenameResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Rename not implemented") +} +func (UnimplementedTemporalFSServiceServer) ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") +} +func (UnimplementedTemporalFSServiceServer) Link(context.Context, *LinkRequest) (*LinkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Link not implemented") +} +func (UnimplementedTemporalFSServiceServer) Symlink(context.Context, *SymlinkRequest) (*SymlinkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Symlink not implemented") +} +func (UnimplementedTemporalFSServiceServer) Readlink(context.Context, *ReadlinkRequest) (*ReadlinkResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Readlink not implemented") +} +func (UnimplementedTemporalFSServiceServer) CreateFile(context.Context, *CreateFileRequest) (*CreateFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateFile not implemented") +} +func (UnimplementedTemporalFSServiceServer) Mknod(context.Context, *MknodRequest) (*MknodResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mknod not implemented") +} +func (UnimplementedTemporalFSServiceServer) Statfs(context.Context, *StatfsRequest) (*StatfsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statfs not implemented") +} +func (UnimplementedTemporalFSServiceServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (UnimplementedTemporalFSServiceServer) AttachWorkflow(context.Context, *AttachWorkflowRequest) (*AttachWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AttachWorkflow not implemented") +} +func (UnimplementedTemporalFSServiceServer) DetachWorkflow(context.Context, *DetachWorkflowRequest) (*DetachWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DetachWorkflow not implemented") +} +func (UnimplementedTemporalFSServiceServer) mustEmbedUnimplementedTemporalFSServiceServer() {} + +// UnsafeTemporalFSServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TemporalFSServiceServer will +// result in compilation errors. +type UnsafeTemporalFSServiceServer interface { + mustEmbedUnimplementedTemporalFSServiceServer() +} + +func RegisterTemporalFSServiceServer(s grpc.ServiceRegistrar, srv TemporalFSServiceServer) { + s.RegisterService(&TemporalFSService_ServiceDesc, srv) +} + +func _TemporalFSService_CreateFilesystem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateFilesystemRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).CreateFilesystem(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_CreateFilesystem_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).CreateFilesystem(ctx, req.(*CreateFilesystemRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_GetFilesystemInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFilesystemInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).GetFilesystemInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_GetFilesystemInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).GetFilesystemInfo(ctx, req.(*GetFilesystemInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_ArchiveFilesystem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ArchiveFilesystemRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).ArchiveFilesystem(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_ArchiveFilesystem_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).ArchiveFilesystem(ctx, req.(*ArchiveFilesystemRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Lookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LookupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Lookup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Lookup_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Lookup(ctx, req.(*LookupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Getattr_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetattrRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Getattr(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Getattr_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Getattr(ctx, req.(*GetattrRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Setattr_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetattrRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Setattr(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Setattr_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Setattr(ctx, req.(*SetattrRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_ReadChunks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadChunksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).ReadChunks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_ReadChunks_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).ReadChunks(ctx, req.(*ReadChunksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_WriteChunks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteChunksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).WriteChunks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_WriteChunks_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).WriteChunks(ctx, req.(*WriteChunksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Truncate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TruncateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Truncate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Truncate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Truncate(ctx, req.(*TruncateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Mkdir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MkdirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Mkdir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Mkdir_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Mkdir(ctx, req.(*MkdirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Unlink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnlinkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Unlink(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Unlink_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Unlink(ctx, req.(*UnlinkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Rmdir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RmdirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Rmdir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Rmdir_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Rmdir(ctx, req.(*RmdirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Rename_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RenameRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Rename(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Rename_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Rename(ctx, req.(*RenameRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).ReadDir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_ReadDir_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).ReadDir(ctx, req.(*ReadDirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Link_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LinkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Link(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Link_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Link(ctx, req.(*LinkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Symlink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SymlinkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Symlink(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Symlink_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Symlink(ctx, req.(*SymlinkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Readlink_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadlinkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Readlink(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Readlink_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Readlink(ctx, req.(*ReadlinkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_CreateFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).CreateFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_CreateFile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).CreateFile(ctx, req.(*CreateFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Mknod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MknodRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Mknod(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Mknod_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Mknod(ctx, req.(*MknodRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_Statfs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatfsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).Statfs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_Statfs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).Statfs(ctx, req.(*StatfsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_CreateSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_AttachWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).AttachWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_AttachWorkflow_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).AttachWorkflow(ctx, req.(*AttachWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TemporalFSService_DetachWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemporalFSServiceServer).DetachWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TemporalFSService_DetachWorkflow_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemporalFSServiceServer).DetachWorkflow(ctx, req.(*DetachWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TemporalFSService_ServiceDesc is the grpc.ServiceDesc for TemporalFSService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TemporalFSService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.temporalzfs.proto.v1.TemporalFSService", + HandlerType: (*TemporalFSServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateFilesystem", + Handler: _TemporalFSService_CreateFilesystem_Handler, + }, + { + MethodName: "GetFilesystemInfo", + Handler: _TemporalFSService_GetFilesystemInfo_Handler, + }, + { + MethodName: "ArchiveFilesystem", + Handler: _TemporalFSService_ArchiveFilesystem_Handler, + }, + { + MethodName: "Lookup", + Handler: _TemporalFSService_Lookup_Handler, + }, + { + MethodName: "Getattr", + Handler: _TemporalFSService_Getattr_Handler, + }, + { + MethodName: "Setattr", + Handler: _TemporalFSService_Setattr_Handler, + }, + { + MethodName: "ReadChunks", + Handler: _TemporalFSService_ReadChunks_Handler, + }, + { + MethodName: "WriteChunks", + Handler: _TemporalFSService_WriteChunks_Handler, + }, + { + MethodName: "Truncate", + Handler: _TemporalFSService_Truncate_Handler, + }, + { + MethodName: "Mkdir", + Handler: _TemporalFSService_Mkdir_Handler, + }, + { + MethodName: "Unlink", + Handler: _TemporalFSService_Unlink_Handler, + }, + { + MethodName: "Rmdir", + Handler: _TemporalFSService_Rmdir_Handler, + }, + { + MethodName: "Rename", + Handler: _TemporalFSService_Rename_Handler, + }, + { + MethodName: "ReadDir", + Handler: _TemporalFSService_ReadDir_Handler, + }, + { + MethodName: "Link", + Handler: _TemporalFSService_Link_Handler, + }, + { + MethodName: "Symlink", + Handler: _TemporalFSService_Symlink_Handler, + }, + { + MethodName: "Readlink", + Handler: _TemporalFSService_Readlink_Handler, + }, + { + MethodName: "CreateFile", + Handler: _TemporalFSService_CreateFile_Handler, + }, + { + MethodName: "Mknod", + Handler: _TemporalFSService_Mknod_Handler, + }, + { + MethodName: "Statfs", + Handler: _TemporalFSService_Statfs_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _TemporalFSService_CreateSnapshot_Handler, + }, + { + MethodName: "AttachWorkflow", + Handler: _TemporalFSService_AttachWorkflow_Handler, + }, + { + MethodName: "DetachWorkflow", + Handler: _TemporalFSService_DetachWorkflow_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/temporalzfs/proto/v1/service.proto", +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.go-helpers.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.go-helpers.pb.go new file mode 100644 index 0000000000..6c01c540b8 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.go-helpers.pb.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package temporalzfspb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type FilesystemState to the protobuf v3 wire format +func (val *FilesystemState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type FilesystemState from the protobuf v3 wire format +func (val *FilesystemState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *FilesystemState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two FilesystemState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *FilesystemState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *FilesystemState + switch t := that.(type) { + case *FilesystemState: + that1 = t + case FilesystemState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type FilesystemConfig to the protobuf v3 wire format +func (val *FilesystemConfig) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type FilesystemConfig from the protobuf v3 wire format +func (val *FilesystemConfig) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *FilesystemConfig) Size() int { + return proto.Size(val) +} + +// Equal returns whether two FilesystemConfig values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *FilesystemConfig) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *FilesystemConfig + switch t := that.(type) { + case *FilesystemConfig: + that1 = t + case FilesystemConfig: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type FSStats to the protobuf v3 wire format +func (val *FSStats) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type FSStats from the protobuf v3 wire format +func (val *FSStats) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *FSStats) Size() int { + return proto.Size(val) +} + +// Equal returns whether two FSStats values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *FSStats) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *FSStats + switch t := that.(type) { + case *FSStats: + that1 = t + case FSStats: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + FilesystemStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Running": 1, + "Archived": 2, + "Deleted": 3, + } +) + +// FilesystemStatusFromString parses a FilesystemStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to FilesystemStatus +func FilesystemStatusFromString(s string) (FilesystemStatus, error) { + if v, ok := FilesystemStatus_value[s]; ok { + return FilesystemStatus(v), nil + } else if v, ok := FilesystemStatus_shorthandValue[s]; ok { + return FilesystemStatus(v), nil + } + return FilesystemStatus(0), fmt.Errorf("%s is not a valid FilesystemStatus", s) +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.pb.go new file mode 100644 index 0000000000..94d9142c5f --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/state.pb.go @@ -0,0 +1,447 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/temporalzfs/proto/v1/state.proto + +package temporalzfspb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FilesystemStatus int32 + +const ( + FILESYSTEM_STATUS_UNSPECIFIED FilesystemStatus = 0 + FILESYSTEM_STATUS_RUNNING FilesystemStatus = 1 + FILESYSTEM_STATUS_ARCHIVED FilesystemStatus = 2 + FILESYSTEM_STATUS_DELETED FilesystemStatus = 3 +) + +// Enum value maps for FilesystemStatus. +var ( + FilesystemStatus_name = map[int32]string{ + 0: "FILESYSTEM_STATUS_UNSPECIFIED", + 1: "FILESYSTEM_STATUS_RUNNING", + 2: "FILESYSTEM_STATUS_ARCHIVED", + 3: "FILESYSTEM_STATUS_DELETED", + } + FilesystemStatus_value = map[string]int32{ + "FILESYSTEM_STATUS_UNSPECIFIED": 0, + "FILESYSTEM_STATUS_RUNNING": 1, + "FILESYSTEM_STATUS_ARCHIVED": 2, + "FILESYSTEM_STATUS_DELETED": 3, + } +) + +func (x FilesystemStatus) Enum() *FilesystemStatus { + p := new(FilesystemStatus) + *p = x + return p +} + +func (x FilesystemStatus) String() string { + switch x { + case FILESYSTEM_STATUS_UNSPECIFIED: + return "Unspecified" + case FILESYSTEM_STATUS_RUNNING: + return "Running" + case FILESYSTEM_STATUS_ARCHIVED: + return "Archived" + case FILESYSTEM_STATUS_DELETED: + return "Deleted" + default: + return strconv.Itoa(int(x)) + } + +} + +func (FilesystemStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_enumTypes[0].Descriptor() +} + +func (FilesystemStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_enumTypes[0] +} + +func (x FilesystemStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FilesystemStatus.Descriptor instead. +func (FilesystemStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescGZIP(), []int{0} +} + +type FilesystemState struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status FilesystemStatus `protobuf:"varint,1,opt,name=status,proto3,enum=temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemStatus" json:"status,omitempty"` + Config *FilesystemConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Stats *FSStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` + NextInodeId uint64 `protobuf:"varint,4,opt,name=next_inode_id,json=nextInodeId,proto3" json:"next_inode_id,omitempty"` + NextTxnId uint64 `protobuf:"varint,5,opt,name=next_txn_id,json=nextTxnId,proto3" json:"next_txn_id,omitempty"` + // Set of workflow IDs that own this filesystem. + // TFS is eligible for GC only when this set is empty. + OwnerWorkflowIds []string `protobuf:"bytes,7,rep,name=owner_workflow_ids,json=ownerWorkflowIds,proto3" json:"owner_workflow_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FilesystemState) Reset() { + *x = FilesystemState{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FilesystemState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilesystemState) ProtoMessage() {} + +func (x *FilesystemState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilesystemState.ProtoReflect.Descriptor instead. +func (*FilesystemState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescGZIP(), []int{0} +} + +func (x *FilesystemState) GetStatus() FilesystemStatus { + if x != nil { + return x.Status + } + return FILESYSTEM_STATUS_UNSPECIFIED +} + +func (x *FilesystemState) GetConfig() *FilesystemConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *FilesystemState) GetStats() *FSStats { + if x != nil { + return x.Stats + } + return nil +} + +func (x *FilesystemState) GetNextInodeId() uint64 { + if x != nil { + return x.NextInodeId + } + return 0 +} + +func (x *FilesystemState) GetNextTxnId() uint64 { + if x != nil { + return x.NextTxnId + } + return 0 +} + +func (x *FilesystemState) GetOwnerWorkflowIds() []string { + if x != nil { + return x.OwnerWorkflowIds + } + return nil +} + +type FilesystemConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Default chunk size in bytes (default: 256KB). + ChunkSize uint32 `protobuf:"varint,1,opt,name=chunk_size,json=chunkSize,proto3" json:"chunk_size,omitempty"` + // Maximum total size quota in bytes. + MaxSize uint64 `protobuf:"varint,2,opt,name=max_size,json=maxSize,proto3" json:"max_size,omitempty"` + // Maximum inode count. + MaxFiles uint64 `protobuf:"varint,3,opt,name=max_files,json=maxFiles,proto3" json:"max_files,omitempty"` + // Interval between GC runs. + GcInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=gc_interval,json=gcInterval,proto3" json:"gc_interval,omitempty"` + // How long to retain snapshots. + SnapshotRetention *durationpb.Duration `protobuf:"bytes,5,opt,name=snapshot_retention,json=snapshotRetention,proto3" json:"snapshot_retention,omitempty"` + // Interval between owner liveness checks (default: 10m). + OwnerCheckInterval *durationpb.Duration `protobuf:"bytes,6,opt,name=owner_check_interval,json=ownerCheckInterval,proto3" json:"owner_check_interval,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FilesystemConfig) Reset() { + *x = FilesystemConfig{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FilesystemConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilesystemConfig) ProtoMessage() {} + +func (x *FilesystemConfig) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilesystemConfig.ProtoReflect.Descriptor instead. +func (*FilesystemConfig) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescGZIP(), []int{1} +} + +func (x *FilesystemConfig) GetChunkSize() uint32 { + if x != nil { + return x.ChunkSize + } + return 0 +} + +func (x *FilesystemConfig) GetMaxSize() uint64 { + if x != nil { + return x.MaxSize + } + return 0 +} + +func (x *FilesystemConfig) GetMaxFiles() uint64 { + if x != nil { + return x.MaxFiles + } + return 0 +} + +func (x *FilesystemConfig) GetGcInterval() *durationpb.Duration { + if x != nil { + return x.GcInterval + } + return nil +} + +func (x *FilesystemConfig) GetSnapshotRetention() *durationpb.Duration { + if x != nil { + return x.SnapshotRetention + } + return nil +} + +func (x *FilesystemConfig) GetOwnerCheckInterval() *durationpb.Duration { + if x != nil { + return x.OwnerCheckInterval + } + return nil +} + +type FSStats struct { + state protoimpl.MessageState `protogen:"open.v1"` + TotalSize uint64 `protobuf:"varint,1,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + FileCount uint64 `protobuf:"varint,2,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DirCount uint64 `protobuf:"varint,3,opt,name=dir_count,json=dirCount,proto3" json:"dir_count,omitempty"` + InodeCount uint64 `protobuf:"varint,4,opt,name=inode_count,json=inodeCount,proto3" json:"inode_count,omitempty"` + ChunkCount uint64 `protobuf:"varint,5,opt,name=chunk_count,json=chunkCount,proto3" json:"chunk_count,omitempty"` + TransitionCount uint64 `protobuf:"varint,6,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FSStats) Reset() { + *x = FSStats{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FSStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FSStats) ProtoMessage() {} + +func (x *FSStats) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FSStats.ProtoReflect.Descriptor instead. +func (*FSStats) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescGZIP(), []int{2} +} + +func (x *FSStats) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *FSStats) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +func (x *FSStats) GetDirCount() uint64 { + if x != nil { + return x.DirCount + } + return 0 +} + +func (x *FSStats) GetInodeCount() uint64 { + if x != nil { + return x.InodeCount + } + return 0 +} + +func (x *FSStats) GetChunkCount() uint64 { + if x != nil { + return x.ChunkCount + } + return 0 +} + +func (x *FSStats) GetTransitionCount() uint64 { + if x != nil { + return x.TransitionCount + } + return 0 +} + +var File_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDesc = "" + + "\n" + + ":temporal/server/chasm/lib/temporalzfs/proto/v1/state.proto\x12.temporal.server.chasm.lib.temporalzfs.proto.v1\x1a\x1egoogle/protobuf/duration.proto\"\x86\x03\n" + + "\x0fFilesystemState\x12X\n" + + "\x06status\x18\x01 \x01(\x0e2@.temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemStatusR\x06status\x12X\n" + + "\x06config\x18\x02 \x01(\v2@.temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfigR\x06config\x12M\n" + + "\x05stats\x18\x03 \x01(\v27.temporal.server.chasm.lib.temporalzfs.proto.v1.FSStatsR\x05stats\x12\"\n" + + "\rnext_inode_id\x18\x04 \x01(\x04R\vnextInodeId\x12\x1e\n" + + "\vnext_txn_id\x18\x05 \x01(\x04R\tnextTxnId\x12,\n" + + "\x12owner_workflow_ids\x18\a \x03(\tR\x10ownerWorkflowIds\"\xbc\x02\n" + + "\x10FilesystemConfig\x12\x1d\n" + + "\n" + + "chunk_size\x18\x01 \x01(\rR\tchunkSize\x12\x19\n" + + "\bmax_size\x18\x02 \x01(\x04R\amaxSize\x12\x1b\n" + + "\tmax_files\x18\x03 \x01(\x04R\bmaxFiles\x12:\n" + + "\vgc_interval\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\n" + + "gcInterval\x12H\n" + + "\x12snapshot_retention\x18\x05 \x01(\v2\x19.google.protobuf.DurationR\x11snapshotRetention\x12K\n" + + "\x14owner_check_interval\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x12ownerCheckInterval\"\xd1\x01\n" + + "\aFSStats\x12\x1d\n" + + "\n" + + "total_size\x18\x01 \x01(\x04R\ttotalSize\x12\x1d\n" + + "\n" + + "file_count\x18\x02 \x01(\x04R\tfileCount\x12\x1b\n" + + "\tdir_count\x18\x03 \x01(\x04R\bdirCount\x12\x1f\n" + + "\vinode_count\x18\x04 \x01(\x04R\n" + + "inodeCount\x12\x1f\n" + + "\vchunk_count\x18\x05 \x01(\x04R\n" + + "chunkCount\x12)\n" + + "\x10transition_count\x18\x06 \x01(\x04R\x0ftransitionCount*\x93\x01\n" + + "\x10FilesystemStatus\x12!\n" + + "\x1dFILESYSTEM_STATUS_UNSPECIFIED\x10\x00\x12\x1d\n" + + "\x19FILESYSTEM_STATUS_RUNNING\x10\x01\x12\x1e\n" + + "\x1aFILESYSTEM_STATUS_ARCHIVED\x10\x02\x12\x1d\n" + + "\x19FILESYSTEM_STATUS_DELETED\x10\x03BMZKgo.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDescData +} + +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_goTypes = []any{ + (FilesystemStatus)(0), // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemStatus + (*FilesystemState)(nil), // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState + (*FilesystemConfig)(nil), // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig + (*FSStats)(nil), // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.FSStats + (*durationpb.Duration)(nil), // 4: google.protobuf.Duration +} +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState.status:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemStatus + 2, // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState.config:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig + 3, // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemState.stats:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.FSStats + 4, // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig.gc_interval:type_name -> google.protobuf.Duration + 4, // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig.snapshot_retention:type_name -> google.protobuf.Duration + 4, // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.FilesystemConfig.owner_check_interval:type_name -> google.protobuf.Duration + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_init() } +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_init() { + if File_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_rawDesc)), + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_depIdxs, + EnumInfos: file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_enumTypes, + MessageInfos: file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto = out.File + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_goTypes = nil + file_temporal_server_chasm_lib_temporalzfs_proto_v1_state_proto_depIdxs = nil +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.go-helpers.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.go-helpers.pb.go new file mode 100644 index 0000000000..31f55d7947 --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package temporalzfspb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ChunkGCTask to the protobuf v3 wire format +func (val *ChunkGCTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChunkGCTask from the protobuf v3 wire format +func (val *ChunkGCTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChunkGCTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChunkGCTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChunkGCTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChunkGCTask + switch t := that.(type) { + case *ChunkGCTask: + that1 = t + case ChunkGCTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ManifestCompactTask to the protobuf v3 wire format +func (val *ManifestCompactTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ManifestCompactTask from the protobuf v3 wire format +func (val *ManifestCompactTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ManifestCompactTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ManifestCompactTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ManifestCompactTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ManifestCompactTask + switch t := that.(type) { + case *ManifestCompactTask: + that1 = t + case ManifestCompactTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type QuotaCheckTask to the protobuf v3 wire format +func (val *QuotaCheckTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type QuotaCheckTask from the protobuf v3 wire format +func (val *QuotaCheckTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *QuotaCheckTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two QuotaCheckTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *QuotaCheckTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *QuotaCheckTask + switch t := that.(type) { + case *QuotaCheckTask: + that1 = t + case QuotaCheckTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type OwnerCheckTask to the protobuf v3 wire format +func (val *OwnerCheckTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OwnerCheckTask from the protobuf v3 wire format +func (val *OwnerCheckTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OwnerCheckTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OwnerCheckTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OwnerCheckTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OwnerCheckTask + switch t := that.(type) { + case *OwnerCheckTask: + that1 = t + case OwnerCheckTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DataCleanupTask to the protobuf v3 wire format +func (val *DataCleanupTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DataCleanupTask from the protobuf v3 wire format +func (val *DataCleanupTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DataCleanupTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DataCleanupTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DataCleanupTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DataCleanupTask + switch t := that.(type) { + case *DataCleanupTask: + that1 = t + case DataCleanupTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.pb.go b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.pb.go new file mode 100644 index 0000000000..76c834d24b --- /dev/null +++ b/chasm/lib/temporalzfs/gen/temporalzfspb/v1/tasks.pb.go @@ -0,0 +1,312 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/temporalzfs/proto/v1/tasks.proto + +package temporalzfspb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ChunkGCTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Tombstone-based GC: scan tombstone prefix, delete orphaned chunks. + LastProcessedTxnId uint64 `protobuf:"varint,1,opt,name=last_processed_txn_id,json=lastProcessedTxnId,proto3" json:"last_processed_txn_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChunkGCTask) Reset() { + *x = ChunkGCTask{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChunkGCTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChunkGCTask) ProtoMessage() {} + +func (x *ChunkGCTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChunkGCTask.ProtoReflect.Descriptor instead. +func (*ChunkGCTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *ChunkGCTask) GetLastProcessedTxnId() uint64 { + if x != nil { + return x.LastProcessedTxnId + } + return 0 +} + +type ManifestCompactTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Flatten manifest diff chain from last checkpoint to current. + CheckpointTxnId uint64 `protobuf:"varint,1,opt,name=checkpoint_txn_id,json=checkpointTxnId,proto3" json:"checkpoint_txn_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ManifestCompactTask) Reset() { + *x = ManifestCompactTask{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ManifestCompactTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ManifestCompactTask) ProtoMessage() {} + +func (x *ManifestCompactTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ManifestCompactTask.ProtoReflect.Descriptor instead. +func (*ManifestCompactTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +func (x *ManifestCompactTask) GetCheckpointTxnId() uint64 { + if x != nil { + return x.CheckpointTxnId + } + return 0 +} + +type QuotaCheckTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QuotaCheckTask) Reset() { + *x = QuotaCheckTask{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QuotaCheckTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaCheckTask) ProtoMessage() {} + +func (x *QuotaCheckTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaCheckTask.ProtoReflect.Descriptor instead. +func (*QuotaCheckTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +type OwnerCheckTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Per-workflow consecutive not-found counts, keyed by workflow ID. + // Guards against transient NotFound from history service. + NotFoundCounts map[string]int32 `protobuf:"bytes,1,rep,name=not_found_counts,json=notFoundCounts,proto3" json:"not_found_counts,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OwnerCheckTask) Reset() { + *x = OwnerCheckTask{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OwnerCheckTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OwnerCheckTask) ProtoMessage() {} + +func (x *OwnerCheckTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OwnerCheckTask.ProtoReflect.Descriptor instead. +func (*OwnerCheckTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *OwnerCheckTask) GetNotFoundCounts() map[string]int32 { + if x != nil { + return x.NotFoundCounts + } + return nil +} + +type DataCleanupTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Retry attempt count for exponential backoff on failure. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DataCleanupTask) Reset() { + *x = DataCleanupTask{} + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DataCleanupTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataCleanupTask) ProtoMessage() {} + +func (x *DataCleanupTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataCleanupTask.ProtoReflect.Descriptor instead. +func (*DataCleanupTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *DataCleanupTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +var File_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + ":temporal/server/chasm/lib/temporalzfs/proto/v1/tasks.proto\x12.temporal.server.chasm.lib.temporalzfs.proto.v1\"@\n" + + "\vChunkGCTask\x121\n" + + "\x15last_processed_txn_id\x18\x01 \x01(\x04R\x12lastProcessedTxnId\"A\n" + + "\x13ManifestCompactTask\x12*\n" + + "\x11checkpoint_txn_id\x18\x01 \x01(\x04R\x0fcheckpointTxnId\"\x10\n" + + "\x0eQuotaCheckTask\"\xd1\x01\n" + + "\x0eOwnerCheckTask\x12|\n" + + "\x10not_found_counts\x18\x01 \x03(\v2R.temporal.server.chasm.lib.temporalzfs.proto.v1.OwnerCheckTask.NotFoundCountsEntryR\x0enotFoundCounts\x1aA\n" + + "\x13NotFoundCountsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value:\x028\x01\"+\n" + + "\x0fDataCleanupTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattemptBMZKgo.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_goTypes = []any{ + (*ChunkGCTask)(nil), // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.ChunkGCTask + (*ManifestCompactTask)(nil), // 1: temporal.server.chasm.lib.temporalzfs.proto.v1.ManifestCompactTask + (*QuotaCheckTask)(nil), // 2: temporal.server.chasm.lib.temporalzfs.proto.v1.QuotaCheckTask + (*OwnerCheckTask)(nil), // 3: temporal.server.chasm.lib.temporalzfs.proto.v1.OwnerCheckTask + (*DataCleanupTask)(nil), // 4: temporal.server.chasm.lib.temporalzfs.proto.v1.DataCleanupTask + nil, // 5: temporal.server.chasm.lib.temporalzfs.proto.v1.OwnerCheckTask.NotFoundCountsEntry +} +var file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_depIdxs = []int32{ + 5, // 0: temporal.server.chasm.lib.temporalzfs.proto.v1.OwnerCheckTask.not_found_counts:type_name -> temporal.server.chasm.lib.temporalzfs.proto.v1.OwnerCheckTask.NotFoundCountsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_temporalzfs_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/temporalzfs/handler.go b/chasm/lib/temporalzfs/handler.go new file mode 100644 index 0000000000..c5311c2db7 --- /dev/null +++ b/chasm/lib/temporalzfs/handler.go @@ -0,0 +1,697 @@ +package temporalzfs + +import ( + "context" + "errors" + "math" + "time" + + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/log" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Setattr valid bitmask values (matching FUSE FATTR_* constants). +const ( + setattrMode = 1 << 0 + setattrUID = 1 << 1 + setattrGID = 1 << 2 + setattrSize = 1 << 3 // truncate + setattrAtime = 1 << 4 + setattrMtime = 1 << 5 +) + +// Statfs virtual capacity defaults when no quota is configured. +const ( + statfsVirtualBytes = 1 << 40 // 1 TiB + statfsVirtualInodes = 1 << 20 // ~1M inodes +) + +type handler struct { + temporalzfspb.UnimplementedTemporalFSServiceServer + + config *Config + logger log.Logger + storeProvider FSStoreProvider +} + +func newHandler(config *Config, logger log.Logger, storeProvider FSStoreProvider) *handler { + return &handler{ + config: config, + logger: logger, + storeProvider: storeProvider, + } +} + +// openFS obtains a store for the given filesystem and opens an fs.FS on it. +// The caller owns the returned *tzfs.FS and must call f.Close() which also +// closes the underlying store. On error, all resources are cleaned up internally. +func (h *handler) openFS(shardID int32, namespaceID, filesystemID string) (*tzfs.FS, error) { + s, err := h.storeProvider.GetStore(shardID, namespaceID, filesystemID) + if err != nil { + return nil, mapFSError(err) + } + f, err := tzfs.Open(s) + if err != nil { + _ = s.Close() + return nil, mapFSError(err) + } + return f, nil +} + +// createFS initializes a new filesystem in the store. +// The caller owns the returned *tzfs.FS and must call f.Close() which also +// closes the underlying store. On error, all resources are cleaned up internally. +func (h *handler) createFS(shardID int32, namespaceID, filesystemID string, config *temporalzfspb.FilesystemConfig) (*tzfs.FS, error) { + s, err := h.storeProvider.GetStore(shardID, namespaceID, filesystemID) + if err != nil { + return nil, err + } + + chunkSize := uint32(defaultChunkSize) + if config.GetChunkSize() > 0 { + chunkSize = config.GetChunkSize() + } + + f, err := tzfs.Create(s, tzfs.Options{ChunkSize: chunkSize}) + if err != nil { + _ = s.Close() + return nil, err + } + return f, nil +} + +func (h *handler) CreateFilesystem( + ctx context.Context, + req *temporalzfspb.CreateFilesystemRequest, +) (*temporalzfspb.CreateFilesystemResponse, error) { + result, err := chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFilesystemId(), + }, + func(mCtx chasm.MutableContext, req *temporalzfspb.CreateFilesystemRequest) (*Filesystem, error) { + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{}, + Visibility: chasm.NewComponentField(mCtx, chasm.NewVisibilityWithData(mCtx, nil, nil)), + } + + err := TransitionCreate.Apply(fs, mCtx, CreateEvent{ + Config: req.GetConfig(), + OwnerWorkflowIDs: req.GetOwnerWorkflowIds(), + }) + if err != nil { + return nil, err + } + + // Initialize the underlying FS store. + f, createErr := h.createFS(0, req.GetNamespaceId(), req.GetFilesystemId(), fs.Config) + if createErr != nil { + return nil, createErr + } + _ = f.Close() + + return fs, nil + }, + req, + chasm.WithRequestID(req.GetRequestId()), + ) + if err != nil { + return nil, err + } + + return &temporalzfspb.CreateFilesystemResponse{ + RunId: result.ExecutionKey.RunID, + }, nil +} + +func (h *handler) GetFilesystemInfo( + ctx context.Context, + req *temporalzfspb.GetFilesystemInfoRequest, +) (*temporalzfspb.GetFilesystemInfoResponse, error) { + ref := chasm.NewComponentRef[*Filesystem](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFilesystemId(), + }) + + return chasm.ReadComponent( + ctx, + ref, + func(fs *Filesystem, ctx chasm.Context, _ *temporalzfspb.GetFilesystemInfoRequest) (*temporalzfspb.GetFilesystemInfoResponse, error) { + return &temporalzfspb.GetFilesystemInfoResponse{ + State: fs.FilesystemState, + RunId: ctx.ExecutionKey().RunID, + }, nil + }, + req, + nil, + ) +} + +func (h *handler) ArchiveFilesystem( + ctx context.Context, + req *temporalzfspb.ArchiveFilesystemRequest, +) (*temporalzfspb.ArchiveFilesystemResponse, error) { + ref := chasm.NewComponentRef[*Filesystem](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFilesystemId(), + }) + + _, _, err := chasm.UpdateComponent( + ctx, + ref, + func(fs *Filesystem, ctx chasm.MutableContext, _ any) (*temporalzfspb.ArchiveFilesystemResponse, error) { + if err := TransitionArchive.Apply(fs, ctx, nil); err != nil { + return nil, err + } + return &temporalzfspb.ArchiveFilesystemResponse{}, nil + }, + nil, + ) + if err != nil { + return nil, err + } + return &temporalzfspb.ArchiveFilesystemResponse{}, nil +} + +func (h *handler) AttachWorkflow( + ctx context.Context, + req *temporalzfspb.AttachWorkflowRequest, +) (*temporalzfspb.AttachWorkflowResponse, error) { + ref := chasm.NewComponentRef[*Filesystem](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFilesystemId(), + }) + + _, _, err := chasm.UpdateComponent( + ctx, + ref, + func(fs *Filesystem, _ chasm.MutableContext, _ any) (*temporalzfspb.AttachWorkflowResponse, error) { + wfID := req.GetWorkflowId() + for _, id := range fs.OwnerWorkflowIds { + if id == wfID { + return &temporalzfspb.AttachWorkflowResponse{}, nil + } + } + fs.OwnerWorkflowIds = append(fs.OwnerWorkflowIds, wfID) + return &temporalzfspb.AttachWorkflowResponse{}, nil + }, + nil, + ) + if err != nil { + return nil, err + } + return &temporalzfspb.AttachWorkflowResponse{}, nil +} + +func (h *handler) DetachWorkflow( + ctx context.Context, + req *temporalzfspb.DetachWorkflowRequest, +) (*temporalzfspb.DetachWorkflowResponse, error) { + ref := chasm.NewComponentRef[*Filesystem](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFilesystemId(), + }) + + _, _, err := chasm.UpdateComponent( + ctx, + ref, + func(fs *Filesystem, mCtx chasm.MutableContext, _ any) (*temporalzfspb.DetachWorkflowResponse, error) { + wfID := req.GetWorkflowId() + filtered := fs.OwnerWorkflowIds[:0] + for _, id := range fs.OwnerWorkflowIds { + if id != wfID { + filtered = append(filtered, id) + } + } + fs.OwnerWorkflowIds = filtered + + // If all owners are gone, transition to DELETED. + if len(fs.OwnerWorkflowIds) == 0 { + if err := TransitionDelete.Apply(fs, mCtx, nil); err != nil { + return nil, err + } + } + return &temporalzfspb.DetachWorkflowResponse{}, nil + }, + nil, + ) + if err != nil { + return nil, err + } + return &temporalzfspb.DetachWorkflowResponse{}, nil +} + +// FS operations — these use temporal-zfs inode-based APIs. + +func (h *handler) Lookup(_ context.Context, req *temporalzfspb.LookupRequest) (*temporalzfspb.LookupResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.LookupByID(req.GetParentInodeId(), req.GetName()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.LookupResponse{ + InodeId: inode.ID, + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Getattr(_ context.Context, req *temporalzfspb.GetattrRequest) (*temporalzfspb.GetattrResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.StatByID(req.GetInodeId()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.GetattrResponse{ + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Setattr(_ context.Context, req *temporalzfspb.SetattrRequest) (*temporalzfspb.SetattrResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inodeID := req.GetInodeId() + valid := req.GetValid() + attr := req.GetAttr() + + if valid&setattrMode != 0 { + if err := f.ChmodByID(inodeID, uint16(attr.GetMode())); err != nil { + return nil, mapFSError(err) + } + } + if valid&setattrUID != 0 || valid&setattrGID != 0 { + uid := uint32(math.MaxUint32) // unchanged + gid := uint32(math.MaxUint32) + if valid&setattrUID != 0 { + uid = attr.GetUid() + } + if valid&setattrGID != 0 { + gid = attr.GetGid() + } + if err := f.ChownByID(inodeID, uid, gid); err != nil { + return nil, mapFSError(err) + } + } + if valid&setattrSize != 0 { + if err := f.TruncateByID(inodeID, int64(attr.GetFileSize())); err != nil { + return nil, mapFSError(err) + } + } + if err := h.applyUtimens(f, inodeID, valid, attr); err != nil { + return nil, err + } + + // Re-read the inode to return updated attributes. + inode, err := f.StatByID(inodeID) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.SetattrResponse{ + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) applyUtimens(f *tzfs.FS, inodeID uint64, valid uint32, attr *temporalzfspb.InodeAttr) error { + if valid&setattrAtime == 0 && valid&setattrMtime == 0 { + return nil + } + var atime, mtime time.Time + if valid&setattrAtime != 0 && attr.GetAtime() != nil { + atime = attr.GetAtime().AsTime() + } + if valid&setattrMtime != 0 && attr.GetMtime() != nil { + mtime = attr.GetMtime().AsTime() + } + return mapFSError(f.UtimensByID(inodeID, atime, mtime)) +} + +func (h *handler) ReadChunks(_ context.Context, req *temporalzfspb.ReadChunksRequest) (*temporalzfspb.ReadChunksResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + data, err := f.ReadAtByID(req.GetInodeId(), req.GetOffset(), int(req.GetReadSize())) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.ReadChunksResponse{ + Data: data, + }, nil +} + +func (h *handler) WriteChunks(_ context.Context, req *temporalzfspb.WriteChunksRequest) (*temporalzfspb.WriteChunksResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + err = f.WriteAtByID(req.GetInodeId(), req.GetOffset(), req.GetData()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.WriteChunksResponse{ + BytesWritten: int64(len(req.GetData())), + }, nil +} + +func (h *handler) Truncate(_ context.Context, req *temporalzfspb.TruncateRequest) (*temporalzfspb.TruncateResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + if err := f.TruncateByID(req.GetInodeId(), req.GetNewSize()); err != nil { + return nil, mapFSError(err) + } + return &temporalzfspb.TruncateResponse{}, nil +} + +func (h *handler) Mkdir(_ context.Context, req *temporalzfspb.MkdirRequest) (*temporalzfspb.MkdirResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.MkdirByID(req.GetParentInodeId(), req.GetName(), uint16(req.GetMode())) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.MkdirResponse{ + InodeId: inode.ID, + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Unlink(_ context.Context, req *temporalzfspb.UnlinkRequest) (*temporalzfspb.UnlinkResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + if err := f.UnlinkByID(req.GetParentInodeId(), req.GetName()); err != nil { + return nil, mapFSError(err) + } + return &temporalzfspb.UnlinkResponse{}, nil +} + +func (h *handler) Rmdir(_ context.Context, req *temporalzfspb.RmdirRequest) (*temporalzfspb.RmdirResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + if err := f.RmdirByID(req.GetParentInodeId(), req.GetName()); err != nil { + return nil, mapFSError(err) + } + return &temporalzfspb.RmdirResponse{}, nil +} + +func (h *handler) Rename(_ context.Context, req *temporalzfspb.RenameRequest) (*temporalzfspb.RenameResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + if err := f.RenameByID( + req.GetOldParentInodeId(), req.GetOldName(), + req.GetNewParentInodeId(), req.GetNewName(), + ); err != nil { + return nil, mapFSError(err) + } + return &temporalzfspb.RenameResponse{}, nil +} + +func (h *handler) ReadDir(_ context.Context, req *temporalzfspb.ReadDirRequest) (*temporalzfspb.ReadDirResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + entries, err := f.ReadDirPlusByID(req.GetInodeId()) + if err != nil { + return nil, mapFSError(err) + } + + protoEntries := make([]*temporalzfspb.DirEntry, len(entries)) + for i, e := range entries { + inode := e.Inode + if inode == nil { + // Embedded inode unavailable (e.g., hardlinked file) — fetch it. + inode, err = f.StatByID(e.InodeID) + if err != nil { + return nil, mapFSError(err) + } + } + protoEntries[i] = &temporalzfspb.DirEntry{ + Name: e.Name, + InodeId: e.InodeID, + Mode: uint32(inode.Mode), + } + } + + return &temporalzfspb.ReadDirResponse{ + Entries: protoEntries, + }, nil +} + +func (h *handler) Link(_ context.Context, req *temporalzfspb.LinkRequest) (*temporalzfspb.LinkResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.LinkByID(req.GetInodeId(), req.GetNewParentInodeId(), req.GetNewName()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.LinkResponse{ + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Symlink(_ context.Context, req *temporalzfspb.SymlinkRequest) (*temporalzfspb.SymlinkResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.SymlinkByID(req.GetParentInodeId(), req.GetName(), req.GetTarget()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.SymlinkResponse{ + InodeId: inode.ID, + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Readlink(_ context.Context, req *temporalzfspb.ReadlinkRequest) (*temporalzfspb.ReadlinkResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + target, err := f.ReadlinkByID(req.GetInodeId()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.ReadlinkResponse{ + Target: target, + }, nil +} + +func (h *handler) CreateFile(_ context.Context, req *temporalzfspb.CreateFileRequest) (*temporalzfspb.CreateFileResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + inode, err := f.CreateFileByID(req.GetParentInodeId(), req.GetName(), uint16(req.GetMode())) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.CreateFileResponse{ + InodeId: inode.ID, + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Mknod(_ context.Context, req *temporalzfspb.MknodRequest) (*temporalzfspb.MknodResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + typ := modeToInodeType(req.GetMode()) + inode, err := f.MknodByID(req.GetParentInodeId(), req.GetName(), uint16(req.GetMode()&0xFFF), typ, uint64(req.GetDev())) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.MknodResponse{ + InodeId: inode.ID, + Attr: inodeToAttr(inode), + }, nil +} + +func (h *handler) Statfs(_ context.Context, req *temporalzfspb.StatfsRequest) (*temporalzfspb.StatfsResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + quota := f.GetQuota() + + bsize := uint32(f.ChunkSize()) + if bsize == 0 { + bsize = 4096 + } + + var blocks, bfree, files, ffree uint64 + if quota.MaxBytes > 0 { + blocks = uint64(quota.MaxBytes) / uint64(bsize) + used := min(uint64(quota.UsedBytes)/uint64(bsize), blocks) + bfree = blocks - used + } else { + blocks = statfsVirtualBytes / uint64(bsize) + bfree = blocks + } + if quota.MaxInodes > 0 { + files = uint64(quota.MaxInodes) + used := min(uint64(quota.UsedInodes), files) + ffree = files - used + } else { + files = statfsVirtualInodes + ffree = files + } + + return &temporalzfspb.StatfsResponse{ + Blocks: blocks, + Bfree: bfree, + Bavail: bfree, + Files: files, + Ffree: ffree, + Bsize: bsize, + Namelen: 255, + Frsize: bsize, + }, nil +} + +func (h *handler) CreateSnapshot(_ context.Context, req *temporalzfspb.CreateSnapshotRequest) (*temporalzfspb.CreateSnapshotResponse, error) { + f, err := h.openFS(0, req.GetNamespaceId(), req.GetFilesystemId()) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + snap, err := f.CreateSnapshot(req.GetSnapshotName()) + if err != nil { + return nil, mapFSError(err) + } + + return &temporalzfspb.CreateSnapshotResponse{ + SnapshotTxnId: snap.TxnID, + }, nil +} + +// modeToInodeType extracts the inode type from POSIX mode bits. +func modeToInodeType(mode uint32) tzfs.InodeType { + switch mode & 0xF000 { + case 0x1000: + return tzfs.InodeTypeFIFO + case 0x2000: + return tzfs.InodeTypeCharDev + case 0x6000: + return tzfs.InodeTypeBlockDev + case 0xC000: + return tzfs.InodeTypeSocket + default: + return tzfs.InodeTypeFile + } +} + +// inodeToAttr converts a temporal-zfs Inode to the proto InodeAttr. +func inodeToAttr(inode *tzfs.Inode) *temporalzfspb.InodeAttr { + return &temporalzfspb.InodeAttr{ + InodeId: inode.ID, + FileSize: inode.Size, + Mode: uint32(inode.Mode), + Nlink: inode.LinkCount, + Uid: inode.UID, + Gid: inode.GID, + Atime: timestamppb.New(inode.Atime), + Mtime: timestamppb.New(inode.Mtime), + Ctime: timestamppb.New(inode.Ctime), + } +} + +// mapFSError converts temporal-zfs errors to appropriate gRPC service errors. +func mapFSError(err error) error { + if err == nil { + return nil + } + switch { + case errors.Is(err, tzfs.ErrNotFound), errors.Is(err, tzfs.ErrSnapshotNotFound): + return serviceerror.NewNotFound(err.Error()) + case errors.Is(err, tzfs.ErrExist): + return serviceerror.NewAlreadyExists(err.Error()) + case errors.Is(err, tzfs.ErrPermission), errors.Is(err, tzfs.ErrNotPermitted): + return serviceerror.NewPermissionDenied(err.Error(), "") + case errors.Is(err, tzfs.ErrInvalidPath), errors.Is(err, tzfs.ErrInvalidRename), errors.Is(err, tzfs.ErrNameTooLong): + return serviceerror.NewInvalidArgument(err.Error()) + case errors.Is(err, tzfs.ErrNoSpace), errors.Is(err, tzfs.ErrTooManyLinks): + return serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_STORAGE_LIMIT, err.Error()) + case errors.Is(err, tzfs.ErrNotDir), errors.Is(err, tzfs.ErrIsDir), + errors.Is(err, tzfs.ErrNotEmpty), errors.Is(err, tzfs.ErrNotSymlink), + errors.Is(err, tzfs.ErrReadOnly), errors.Is(err, tzfs.ErrLockConflict): + return serviceerror.NewFailedPrecondition(err.Error()) + case errors.Is(err, tzfs.ErrClosed), errors.Is(err, tzfs.ErrVersionMismatch): + return serviceerror.NewUnavailable(err.Error()) + default: + return err + } +} diff --git a/chasm/lib/temporalzfs/handler_test.go b/chasm/lib/temporalzfs/handler_test.go new file mode 100644 index 0000000000..65eb40fa2a --- /dev/null +++ b/chasm/lib/temporalzfs/handler_test.go @@ -0,0 +1,618 @@ +package temporalzfs + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/log" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const rootInodeID = uint64(1) + +func newTestHandler(t *testing.T) (*handler, *PebbleStoreProvider) { + t.Helper() + provider := newTestStoreProvider(t) + h := newHandler(nil, log.NewTestLogger(), provider) + return h, provider +} + +// initHandlerFS creates an FS in the store provider. +func initHandlerFS(t *testing.T, h *handler, nsID, fsID string) { + t.Helper() + f, err := h.createFS(0, nsID, fsID, &temporalzfspb.FilesystemConfig{ChunkSize: 256 * 1024}) + require.NoError(t, err) + _ = f.Close() +} + +func TestOpenFS(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + f, err := h.openFS(0, nsID, fsID) + require.NoError(t, err) + require.NotNil(t, f) + _ = f.Close() +} + +func TestCreateFS(t *testing.T) { + h, _ := newTestHandler(t) + + config := &temporalzfspb.FilesystemConfig{ChunkSize: 512 * 1024} + f, err := h.createFS(0, "ns-1", "fs-1", config) + require.NoError(t, err) + require.NotNil(t, f) + require.EqualValues(t, 512*1024, f.ChunkSize()) + _ = f.Close() +} + +func TestCreateFS_DefaultChunkSize(t *testing.T) { + h, _ := newTestHandler(t) + + // Zero chunk size should use the default. + config := &temporalzfspb.FilesystemConfig{ChunkSize: 0} + f, err := h.createFS(0, "ns-1", "fs-1", config) + require.NoError(t, err) + require.NotNil(t, f) + require.EqualValues(t, defaultChunkSize, f.ChunkSize()) + _ = f.Close() +} + +func TestInodeToAttr(t *testing.T) { + now := time.Now() + inode := &tzfs.Inode{ + ID: 42, + Size: 1024, + Mode: 0o644, + LinkCount: 1, + UID: 1000, + GID: 1000, + Atime: now, + Mtime: now, + Ctime: now, + } + + attr := inodeToAttr(inode) + require.EqualValues(t, 42, attr.InodeId) + require.EqualValues(t, 1024, attr.FileSize) + require.EqualValues(t, 0o644, attr.Mode) + require.EqualValues(t, 1, attr.Nlink) + require.EqualValues(t, 1000, attr.Uid) + require.EqualValues(t, 1000, attr.Gid) + require.NotNil(t, attr.Atime) + require.NotNil(t, attr.Mtime) + require.NotNil(t, attr.Ctime) +} + +func TestMapFSError(t *testing.T) { + require.NoError(t, mapFSError(nil)) + require.Error(t, mapFSError(tzfs.ErrNotFound)) +} + +func TestGetattr(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: rootInodeID, + }) + require.NoError(t, err) + require.NotNil(t, resp.Attr) + require.Equal(t, rootInodeID, resp.Attr.InodeId) + require.Positive(t, resp.Attr.Mode) +} + +func TestReadWriteChunks(t *testing.T) { + h, provider := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file via temporal-zfs directly so we have an inode to read/write. + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, err := tzfs.Open(s) + require.NoError(t, err) + err = f.WriteFile("/test.txt", []byte("initial"), 0o644) + require.NoError(t, err) + inode, err := f.Stat("/test.txt") + require.NoError(t, err) + inodeID := inode.ID + _ = f.Close() + + // Write via handler. + data := []byte("hello temporalzfs") + writeResp, err := h.WriteChunks(context.Background(), &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Offset: 0, + Data: data, + }) + require.NoError(t, err) + require.EqualValues(t, len(data), writeResp.BytesWritten) + + // Read back via handler. + readResp, err := h.ReadChunks(context.Background(), &temporalzfspb.ReadChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Offset: 0, + ReadSize: int64(len(data)), + }) + require.NoError(t, err) + require.Equal(t, data, readResp.Data) +} + +func TestCreateSnapshot(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.CreateSnapshot(context.Background(), &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + SnapshotName: "snap-1", + }) + require.NoError(t, err) + require.Positive(t, resp.SnapshotTxnId) +} + +func TestLookup(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a directory via handler so it shows up under root. + mkdirResp, err := h.Mkdir(context.Background(), &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "testdir", + Mode: 0o755, + }) + require.NoError(t, err) + require.NotZero(t, mkdirResp.InodeId) + + // Lookup the directory by name. + resp, err := h.Lookup(context.Background(), &temporalzfspb.LookupRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "testdir", + }) + require.NoError(t, err) + require.Equal(t, mkdirResp.InodeId, resp.InodeId) + require.NotNil(t, resp.Attr) +} + +func TestSetattr(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file via handler. + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "setattr.txt", + Mode: 0o644, + }) + require.NoError(t, err) + inodeID := createResp.InodeId + + // Change mode via setattr. + setattrResp, err := h.Setattr(context.Background(), &temporalzfspb.SetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Valid: setattrMode, + Attr: &temporalzfspb.InodeAttr{ + Mode: 0o600, + }, + }) + require.NoError(t, err) + require.NotNil(t, setattrResp.Attr) + require.EqualValues(t, 0o600, setattrResp.Attr.Mode) +} + +func TestSetattr_Utimens(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "utimens.txt", + Mode: 0o644, + }) + require.NoError(t, err) + + newTime := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) + setattrResp, err := h.Setattr(context.Background(), &temporalzfspb.SetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: createResp.InodeId, + Valid: setattrMtime, + Attr: &temporalzfspb.InodeAttr{ + Mtime: timestamppb.New(newTime), + }, + }) + require.NoError(t, err) + require.NotNil(t, setattrResp.Attr) + require.Equal(t, newTime.Unix(), setattrResp.Attr.Mtime.AsTime().Unix()) +} + +func TestTruncate(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file and write some data. + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "truncate.txt", + Mode: 0o644, + }) + require.NoError(t, err) + inodeID := createResp.InodeId + + _, err = h.WriteChunks(context.Background(), &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Offset: 0, + Data: []byte("hello world"), + }) + require.NoError(t, err) + + // Truncate to 5 bytes. + _, err = h.Truncate(context.Background(), &temporalzfspb.TruncateRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + NewSize: 5, + }) + require.NoError(t, err) + + // Verify size via getattr. + getattrResp, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + }) + require.NoError(t, err) + require.EqualValues(t, 5, getattrResp.Attr.FileSize) +} + +func TestMkdir(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.Mkdir(context.Background(), &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "newdir", + Mode: 0o755, + }) + require.NoError(t, err) + require.NotZero(t, resp.InodeId) + require.NotNil(t, resp.Attr) + + // Verify via getattr. + getattrResp, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: resp.InodeId, + }) + require.NoError(t, err) + require.Equal(t, resp.InodeId, getattrResp.Attr.InodeId) +} + +func TestUnlink(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file. + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "todelete.txt", + Mode: 0o644, + }) + require.NoError(t, err) + inodeID := createResp.InodeId + + // Unlink it. + _, err = h.Unlink(context.Background(), &temporalzfspb.UnlinkRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "todelete.txt", + }) + require.NoError(t, err) + + // Verify it no longer exists via lookup. + _, err = h.Lookup(context.Background(), &temporalzfspb.LookupRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "todelete.txt", + }) + require.Error(t, err) + _ = inodeID +} + +func TestRmdir(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a directory. + mkdirResp, err := h.Mkdir(context.Background(), &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "rmme", + Mode: 0o755, + }) + require.NoError(t, err) + require.NotZero(t, mkdirResp.InodeId) + + // Rmdir it. + _, err = h.Rmdir(context.Background(), &temporalzfspb.RmdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "rmme", + }) + require.NoError(t, err) + + // Verify it no longer exists. + _, err = h.Lookup(context.Background(), &temporalzfspb.LookupRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "rmme", + }) + require.Error(t, err) +} + +func TestRename(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file. + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "original.txt", + Mode: 0o644, + }) + require.NoError(t, err) + + // Rename it. + _, err = h.Rename(context.Background(), &temporalzfspb.RenameRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + OldParentInodeId: rootInodeID, + OldName: "original.txt", + NewParentInodeId: rootInodeID, + NewName: "renamed.txt", + }) + require.NoError(t, err) + + // Old name should not exist. + _, err = h.Lookup(context.Background(), &temporalzfspb.LookupRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "original.txt", + }) + require.Error(t, err) + + // New name should exist with the same inode ID. + lookupResp, err := h.Lookup(context.Background(), &temporalzfspb.LookupRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "renamed.txt", + }) + require.NoError(t, err) + require.Equal(t, createResp.InodeId, lookupResp.InodeId) +} + +func TestReadDir(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create two files under root. + _, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "file-a.txt", + Mode: 0o644, + }) + require.NoError(t, err) + + _, err = h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "file-b.txt", + Mode: 0o644, + }) + require.NoError(t, err) + + // ReadDir on root. + resp, err := h.ReadDir(context.Background(), &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: rootInodeID, + }) + require.NoError(t, err) + require.Len(t, resp.Entries, 2) + + names := make(map[string]bool) + for _, e := range resp.Entries { + names[e.Name] = true + } + require.True(t, names["file-a.txt"]) + require.True(t, names["file-b.txt"]) +} + +func TestLink(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a file. + createResp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "original.txt", + Mode: 0o644, + }) + require.NoError(t, err) + + // Create a hard link. + linkResp, err := h.Link(context.Background(), &temporalzfspb.LinkRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: createResp.InodeId, + NewParentInodeId: rootInodeID, + NewName: "hardlink.txt", + }) + require.NoError(t, err) + require.NotNil(t, linkResp.Attr) + // Hard link should point to the same inode. + require.Equal(t, createResp.InodeId, linkResp.Attr.InodeId) + require.EqualValues(t, 2, linkResp.Attr.Nlink) +} + +func TestSymlink(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.Symlink(context.Background(), &temporalzfspb.SymlinkRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "mylink", + Target: "/some/target", + }) + require.NoError(t, err) + require.NotZero(t, resp.InodeId) + require.NotNil(t, resp.Attr) +} + +func TestReadlink(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create symlink. + symlinkResp, err := h.Symlink(context.Background(), &temporalzfspb.SymlinkRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "mylink", + Target: "/some/target", + }) + require.NoError(t, err) + + // Readlink it back. + readlinkResp, err := h.Readlink(context.Background(), &temporalzfspb.ReadlinkRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: symlinkResp.InodeId, + }) + require.NoError(t, err) + require.Equal(t, "/some/target", readlinkResp.Target) +} + +func TestCreateFile(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.CreateFile(context.Background(), &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "newfile.txt", + Mode: 0o644, + }) + require.NoError(t, err) + require.NotZero(t, resp.InodeId) + require.NotNil(t, resp.Attr) + require.EqualValues(t, 0o644, resp.Attr.Mode) + + // Verify via getattr. + getattrResp, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: resp.InodeId, + }) + require.NoError(t, err) + require.Equal(t, resp.InodeId, getattrResp.Attr.InodeId) +} + +func TestMknod(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + // Create a FIFO (0x1000 = S_IFIFO in POSIX). + fifoMode := uint32(0x1000 | 0o644) + resp, err := h.Mknod(context.Background(), &temporalzfspb.MknodRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInodeID, + Name: "myfifo", + Mode: fifoMode, + Dev: 0, + }) + require.NoError(t, err) + require.NotZero(t, resp.InodeId) + require.NotNil(t, resp.Attr) +} + +func TestStatfs(t *testing.T) { + h, _ := newTestHandler(t) + nsID, fsID := "ns-1", "fs-1" + initHandlerFS(t, h, nsID, fsID) + + resp, err := h.Statfs(context.Background(), &temporalzfspb.StatfsRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.Positive(t, resp.Blocks) + require.Positive(t, resp.Files) + require.Positive(t, resp.Bsize) + require.EqualValues(t, 255, resp.Namelen) +} diff --git a/chasm/lib/temporalzfs/integration_test.go b/chasm/lib/temporalzfs/integration_test.go new file mode 100644 index 0000000000..368252d198 --- /dev/null +++ b/chasm/lib/temporalzfs/integration_test.go @@ -0,0 +1,153 @@ +package temporalzfs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/log" +) + +// TestFilesystemLifecycle_EndToEnd tests a full FS lifecycle: +// create → write → read → getattr → snapshot → archive. +func TestFilesystemLifecycle_EndToEnd(t *testing.T) { + provider := newTestStoreProvider(t) + h := newHandler(nil, log.NewTestLogger(), provider) + nsID, fsID := "ns-e2e", "fs-e2e" + + // 1. Create the filesystem. + initHandlerFS(t, h, nsID, fsID) + + // 2. Getattr on root inode. + attrResp, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: 1, + }) + require.NoError(t, err) + require.EqualValues(t, 1, attrResp.Attr.InodeId) + require.Positive(t, attrResp.Attr.Mode, "root inode should have a mode set") + + // 3. Create a file via temporal-zfs, then write/read via handler. + // (WriteChunks requires an existing inode, so we create a file first.) + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, openErr := tzfs.Open(s) + require.NoError(t, openErr) + err = f.WriteFile("/hello.txt", []byte("seed"), 0o644) + require.NoError(t, err) + inode, err := f.Stat("/hello.txt") + require.NoError(t, err) + inodeID := inode.ID + _ = f.Close() + + // 4. Write via handler. + payload := []byte("hello from integration test!") + writeResp, err := h.WriteChunks(context.Background(), &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Offset: 0, + Data: payload, + }) + require.NoError(t, err) + require.EqualValues(t, len(payload), writeResp.BytesWritten) + + // 5. Read back via handler. + readResp, err := h.ReadChunks(context.Background(), &temporalzfspb.ReadChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + Offset: 0, + ReadSize: int64(len(payload)), + }) + require.NoError(t, err) + require.Equal(t, payload, readResp.Data) + + // 6. Getattr on the file. + fileAttr, err := h.Getattr(context.Background(), &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: inodeID, + }) + require.NoError(t, err) + require.Equal(t, inodeID, fileAttr.Attr.InodeId) + require.Positive(t, fileAttr.Attr.FileSize) + + // 7. Create a snapshot. + snapResp, err := h.CreateSnapshot(context.Background(), &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + SnapshotName: "e2e-snap", + }) + require.NoError(t, err) + require.Positive(t, snapResp.SnapshotTxnId) +} + +// TestPebbleStoreProvider_Isolation tests that different filesystem IDs get +// different partition IDs and data isolation. +func TestPebbleStoreProvider_Isolation(t *testing.T) { + provider := newTestStoreProvider(t) + + // Same namespace+filesystem returns consistent partition. + s1, err := provider.GetStore(0, "ns-a", "fs-1") + require.NoError(t, err) + s2, err := provider.GetStore(0, "ns-a", "fs-1") + require.NoError(t, err) + // Both should point to the same underlying store with the same partition. + _ = s1 + _ = s2 + + // Different filesystem gets a different partition. + s3, err := provider.GetStore(0, "ns-a", "fs-2") + require.NoError(t, err) + _ = s3 + + // Verify internal partition IDs are different. + p1 := provider.getPartitionID("ns-a", "fs-1") + p2 := provider.getPartitionID("ns-a", "fs-2") + require.NotEqual(t, p1, p2, "different filesystems should have different partition IDs") + + // Same key returns same partition (idempotent). + p1Again := provider.getPartitionID("ns-a", "fs-1") + require.Equal(t, p1, p1Again) +} + +// TestPebbleStoreProvider_Close tests that closing releases all resources. +func TestPebbleStoreProvider_Close(t *testing.T) { + dataDir := t.TempDir() + provider := NewPebbleStoreProvider(dataDir, log.NewTestLogger()) + + // Open a shard. + _, err := provider.GetStore(0, "ns", "fs") + require.NoError(t, err) + + // Close should succeed. + err = provider.Close() + require.NoError(t, err) + + // After close, internal state should be cleared. + require.Nil(t, provider.db) +} + +// TestPebbleStoreProvider_PartitionIDStability tests that partition IDs are +// deterministic and stable across provider instances (i.e., across restarts). +func TestPebbleStoreProvider_PartitionIDStability(t *testing.T) { + p1 := NewPebbleStoreProvider(t.TempDir(), log.NewTestLogger()) + p2 := NewPebbleStoreProvider(t.TempDir(), log.NewTestLogger()) + + // Same inputs must produce the same partition ID across instances. + id1 := p1.getPartitionID("ns-a", "fs-1") + id2 := p2.getPartitionID("ns-a", "fs-1") + require.Equal(t, id1, id2, "partition ID must be deterministic across instances") + + // Different inputs must produce different partition IDs. + id3 := p1.getPartitionID("ns-a", "fs-2") + require.NotEqual(t, id1, id3, "different filesystems should have different partition IDs") + + // Calling again returns the same value (idempotent). + id4 := p1.getPartitionID("ns-a", "fs-1") + require.Equal(t, id1, id4) +} diff --git a/chasm/lib/temporalzfs/library.go b/chasm/lib/temporalzfs/library.go new file mode 100644 index 0000000000..0a4c868593 --- /dev/null +++ b/chasm/lib/temporalzfs/library.go @@ -0,0 +1,96 @@ +package temporalzfs + +import ( + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "google.golang.org/grpc" +) + +const ( + libraryName = "temporalzfs" + componentName = "filesystem" +) + +var ( + Archetype = chasm.FullyQualifiedName(libraryName, componentName) + ArchetypeID = chasm.GenerateTypeID(Archetype) +) + +type library struct { + chasm.UnimplementedLibrary + + handler *handler + chunkGCTaskExecutor *chunkGCTaskExecutor + manifestCompactTaskExecutor *manifestCompactTaskExecutor + quotaCheckTaskExecutor *quotaCheckTaskExecutor + ownerCheckTaskExecutor *ownerCheckTaskExecutor + dataCleanupTaskExecutor *dataCleanupTaskExecutor +} + +func newLibrary( + handler *handler, + chunkGCTaskExecutor *chunkGCTaskExecutor, + manifestCompactTaskExecutor *manifestCompactTaskExecutor, + quotaCheckTaskExecutor *quotaCheckTaskExecutor, + ownerCheckTaskExecutor *ownerCheckTaskExecutor, + dataCleanupTaskExecutor *dataCleanupTaskExecutor, +) *library { + return &library{ + handler: handler, + chunkGCTaskExecutor: chunkGCTaskExecutor, + manifestCompactTaskExecutor: manifestCompactTaskExecutor, + quotaCheckTaskExecutor: quotaCheckTaskExecutor, + ownerCheckTaskExecutor: ownerCheckTaskExecutor, + dataCleanupTaskExecutor: dataCleanupTaskExecutor, + } +} + +func (l *library) Name() string { + return libraryName +} + +func (l *library) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Filesystem]( + componentName, + chasm.WithSearchAttributes( + statusSearchAttribute, + ), + chasm.WithBusinessIDAlias("FilesystemId"), + ), + } +} + +func (l *library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask( + "chunkGC", + l.chunkGCTaskExecutor, + l.chunkGCTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "manifestCompact", + l.manifestCompactTaskExecutor, + l.manifestCompactTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "quotaCheck", + l.quotaCheckTaskExecutor, + l.quotaCheckTaskExecutor, + ), + chasm.NewRegistrablePureTask( + "ownerCheck", + l.ownerCheckTaskExecutor, + l.ownerCheckTaskExecutor, + ), + chasm.NewRegistrableSideEffectTask( + "dataCleanup", + l.dataCleanupTaskExecutor, + l.dataCleanupTaskExecutor, + ), + } +} + +func (l *library) RegisterServices(server *grpc.Server) { + server.RegisterService(&temporalzfspb.TemporalFSService_ServiceDesc, l.handler) +} diff --git a/chasm/lib/temporalzfs/pebble_store_provider.go b/chasm/lib/temporalzfs/pebble_store_provider.go new file mode 100644 index 0000000000..8c13790e1c --- /dev/null +++ b/chasm/lib/temporalzfs/pebble_store_provider.go @@ -0,0 +1,108 @@ +package temporalzfs + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "os" + "path/filepath" + "sync" + + "github.com/temporalio/temporal-zfs/pkg/store" + pebblestore "github.com/temporalio/temporal-zfs/pkg/store/pebble" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +// PebbleStoreProvider implements FSStoreProvider using PebbleDB via temporal-zfs. +// A single PebbleDB instance is used for all filesystem storage (lazy-created). +// Individual filesystem executions are isolated via PrefixedStore. +type PebbleStoreProvider struct { + dataDir string + logger log.Logger + + mu sync.Mutex + db *pebblestore.Store +} + +// NewPebbleStoreProvider creates a new PebbleStoreProvider. +// dataDir is the root directory for TemporalZFS PebbleDB data. +func NewPebbleStoreProvider(dataDir string, logger log.Logger) *PebbleStoreProvider { + return &PebbleStoreProvider{ + dataDir: dataDir, + logger: logger, + } +} + +func (p *PebbleStoreProvider) GetStore(_ int32, namespaceID string, filesystemID string) (store.Store, error) { + db, err := p.getOrCreateDB() + if err != nil { + return nil, err + } + + partitionID := p.getPartitionID(namespaceID, filesystemID) + return store.NewPrefixedStore(db, partitionID), nil +} + +func (p *PebbleStoreProvider) DeleteStore(_ int32, namespaceID string, filesystemID string) error { + db, err := p.getOrCreateDB() + if err != nil { + return err + } + + partitionID := p.getPartitionID(namespaceID, filesystemID) + // Delete all keys with this partition's 8-byte prefix by constructing + // a range [prefix, prefixEnd) where prefixEnd is prefix+1. + prefix := make([]byte, 8) + binary.BigEndian.PutUint64(prefix, partitionID) + prefixEnd := make([]byte, 8) + binary.BigEndian.PutUint64(prefixEnd, partitionID+1) + return db.DeleteRange(prefix, prefixEnd) +} + +func (p *PebbleStoreProvider) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + + var err error + if p.db != nil { + err = p.db.Close() + if err != nil { + p.logger.Error("Failed to close PebbleDB", tag.Error(err)) + } + p.db = nil + } + return err +} + +func (p *PebbleStoreProvider) getOrCreateDB() (*pebblestore.Store, error) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.db != nil { + return p.db, nil + } + + dbPath := filepath.Join(p.dataDir, "temporalzfs") + if err := os.MkdirAll(dbPath, 0o750); err != nil { + return nil, fmt.Errorf("failed to create PebbleDB dir: %w", err) + } + + db, err := pebblestore.New(dbPath) + if err != nil { + return nil, fmt.Errorf("failed to open PebbleDB at %s: %w", dbPath, err) + } + + p.db = db + return db, nil +} + +// getPartitionID returns a deterministic partition ID for a given namespace+filesystem pair. +// Uses FNV-1a hash of the composite key so partition IDs are stable across restarts. +func (p *PebbleStoreProvider) getPartitionID(namespaceID string, filesystemID string) uint64 { + h := fnv.New64a() + _, _ = h.Write([]byte(namespaceID)) + _, _ = h.Write([]byte{':'}) + _, _ = h.Write([]byte(filesystemID)) + return h.Sum64() +} diff --git a/chasm/lib/temporalzfs/post_delete_hook.go b/chasm/lib/temporalzfs/post_delete_hook.go new file mode 100644 index 0000000000..ed688135ff --- /dev/null +++ b/chasm/lib/temporalzfs/post_delete_hook.go @@ -0,0 +1,33 @@ +package temporalzfs + +import ( + "context" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/service/history/deletemanager" +) + +// tfsPostDeleteHook implements deletemanager.PostDeleteHook. +// In OSS this is a no-op logger (the pull path handles cleanup). +// SaaS overrides via fx.Decorate with a real implementation that +// queries visibility for TFS executions and calls DetachWorkflow. +type tfsPostDeleteHook struct { + logger log.Logger +} + +var _ deletemanager.PostDeleteHook = (*tfsPostDeleteHook)(nil) + +func newTFSPostDeleteHook(logger log.Logger) *tfsPostDeleteHook { + return &tfsPostDeleteHook{logger: logger} +} + +func (h *tfsPostDeleteHook) AfterWorkflowDeletion(ctx context.Context, namespaceID string, workflowID string) { + // OSS: log and rely on the OwnerCheckTask (pull path) for cleanup. + // SaaS can override this to query visibility for TFS executions + // owned by this workflow and call DetachWorkflow for each. + h.logger.Debug("TFS: workflow deleted, pull path will handle TFS cleanup", + tag.WorkflowNamespaceID(namespaceID), + tag.WorkflowID(workflowID), + ) +} diff --git a/chasm/lib/temporalzfs/proto/v1/request_response.proto b/chasm/lib/temporalzfs/proto/v1/request_response.proto new file mode 100644 index 0000000000..94679b75cc --- /dev/null +++ b/chasm/lib/temporalzfs/proto/v1/request_response.proto @@ -0,0 +1,334 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.temporalzfs.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspb"; + +import "google/protobuf/timestamp.proto"; +import "chasm/lib/temporalzfs/proto/v1/state.proto"; + +// CreateFilesystem + +message CreateFilesystemRequest { + string namespace_id = 1; + string filesystem_id = 2; + // Initial set of owner workflow IDs for this filesystem. + repeated string owner_workflow_ids = 6; + FilesystemConfig config = 4; + string request_id = 5; +} + +message CreateFilesystemResponse { + string run_id = 1; +} + +// GetFilesystemInfo + +message GetFilesystemInfoRequest { + string namespace_id = 1; + string filesystem_id = 2; +} + +message GetFilesystemInfoResponse { + FilesystemState state = 1; + string run_id = 2; +} + +// ArchiveFilesystem + +message ArchiveFilesystemRequest { + string namespace_id = 1; + string filesystem_id = 2; +} + +message ArchiveFilesystemResponse { +} + +// Lookup + +message LookupRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; +} + +message LookupResponse { + uint64 inode_id = 1; + InodeAttr attr = 2; +} + +// ReadChunks + +message ReadChunksRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; + int64 offset = 4; + int64 read_size = 5; +} + +message ReadChunksResponse { + bytes data = 1; +} + +// WriteChunks + +message WriteChunksRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; + int64 offset = 4; + bytes data = 5; +} + +message WriteChunksResponse { + int64 bytes_written = 1; +} + +// Mkdir + +message MkdirRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; + uint32 mode = 5; +} + +message MkdirResponse { + uint64 inode_id = 1; + InodeAttr attr = 2; +} + +// ReadDir + +message ReadDirRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; +} + +message ReadDirResponse { + repeated DirEntry entries = 1; +} + +// Unlink + +message UnlinkRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; +} + +message UnlinkResponse { +} + +// Rmdir + +message RmdirRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; +} + +message RmdirResponse { +} + +// Rename + +message RenameRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 old_parent_inode_id = 3; + string old_name = 4; + uint64 new_parent_inode_id = 5; + string new_name = 6; +} + +message RenameResponse { +} + +// Getattr + +message GetattrRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; +} + +message GetattrResponse { + InodeAttr attr = 1; +} + +// Setattr + +message SetattrRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; + InodeAttr attr = 4; + // Bitmask of which fields in attr to apply. + uint32 valid = 5; +} + +message SetattrResponse { + InodeAttr attr = 1; +} + +// Truncate + +message TruncateRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; + int64 new_size = 4; +} + +message TruncateResponse { +} + +// Link + +message LinkRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; + uint64 new_parent_inode_id = 4; + string new_name = 5; +} + +message LinkResponse { + InodeAttr attr = 1; +} + +// Symlink + +message SymlinkRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; + string target = 5; +} + +message SymlinkResponse { + uint64 inode_id = 1; + InodeAttr attr = 2; +} + +// Readlink + +message ReadlinkRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 inode_id = 3; +} + +message ReadlinkResponse { + string target = 1; +} + +// Create (file) + +message CreateFileRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; + uint32 mode = 5; + uint32 flags = 6; +} + +message CreateFileResponse { + uint64 inode_id = 1; + InodeAttr attr = 2; +} + +// Mknod + +message MknodRequest { + string namespace_id = 1; + string filesystem_id = 2; + uint64 parent_inode_id = 3; + string name = 4; + uint32 mode = 5; + uint32 dev = 6; +} + +message MknodResponse { + uint64 inode_id = 1; + InodeAttr attr = 2; +} + +// Statfs + +message StatfsRequest { + string namespace_id = 1; + string filesystem_id = 2; +} + +message StatfsResponse { + uint64 blocks = 1; + uint64 bfree = 2; + uint64 bavail = 3; + uint64 files = 4; + uint64 ffree = 5; + uint32 bsize = 6; + uint32 namelen = 7; + uint32 frsize = 8; +} + +// CreateSnapshot + +message CreateSnapshotRequest { + string namespace_id = 1; + string filesystem_id = 2; + string snapshot_name = 3; +} + +message CreateSnapshotResponse { + uint64 snapshot_txn_id = 1; +} + +// Shared types + +message InodeAttr { + uint64 inode_id = 1; + uint64 file_size = 2; + uint32 mode = 3; + uint32 nlink = 4; + uint32 uid = 5; + uint32 gid = 6; + google.protobuf.Timestamp atime = 7; + google.protobuf.Timestamp mtime = 8; + google.protobuf.Timestamp ctime = 9; +} + +message DirEntry { + string name = 1; + uint64 inode_id = 2; + uint32 mode = 3; +} + +// AttachWorkflow + +message AttachWorkflowRequest { + string namespace_id = 1; + string filesystem_id = 2; + string workflow_id = 3; +} + +message AttachWorkflowResponse {} + +// DetachWorkflow + +message DetachWorkflowRequest { + string namespace_id = 1; + string filesystem_id = 2; + string workflow_id = 3; +} + +message DetachWorkflowResponse {} diff --git a/chasm/lib/temporalzfs/proto/v1/service.proto b/chasm/lib/temporalzfs/proto/v1/service.proto new file mode 100644 index 0000000000..4d3120073a --- /dev/null +++ b/chasm/lib/temporalzfs/proto/v1/service.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.temporalzfs.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspb"; + +import "chasm/lib/temporalzfs/proto/v1/request_response.proto"; +import "temporal/server/api/routing/v1/extension.proto"; +import "temporal/server/api/common/v1/api_category.proto"; + +service TemporalFSService { + // Lifecycle + rpc CreateFilesystem(CreateFilesystemRequest) returns (CreateFilesystemResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc GetFilesystemInfo(GetFilesystemInfoRequest) returns (GetFilesystemInfoResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc ArchiveFilesystem(ArchiveFilesystemRequest) returns (ArchiveFilesystemResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Inode operations + rpc Lookup(LookupRequest) returns (LookupResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Getattr(GetattrRequest) returns (GetattrResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Setattr(SetattrRequest) returns (SetattrResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // File I/O + rpc ReadChunks(ReadChunksRequest) returns (ReadChunksResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc WriteChunks(WriteChunksRequest) returns (WriteChunksResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Truncate(TruncateRequest) returns (TruncateResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Directory operations + rpc Mkdir(MkdirRequest) returns (MkdirResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Unlink(UnlinkRequest) returns (UnlinkResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Rmdir(RmdirRequest) returns (RmdirResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Rename(RenameRequest) returns (RenameResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc ReadDir(ReadDirRequest) returns (ReadDirResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Links + rpc Link(LinkRequest) returns (LinkResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Symlink(SymlinkRequest) returns (SymlinkResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Readlink(ReadlinkRequest) returns (ReadlinkResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Special + rpc CreateFile(CreateFileRequest) returns (CreateFileResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Mknod(MknodRequest) returns (MknodResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc Statfs(StatfsRequest) returns (StatfsResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Snapshots + rpc CreateSnapshot(CreateSnapshotRequest) returns (CreateSnapshotResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + // Owner management + rpc AttachWorkflow(AttachWorkflowRequest) returns (AttachWorkflowResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DetachWorkflow(DetachWorkflowRequest) returns (DetachWorkflowResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "filesystem_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } +} diff --git a/chasm/lib/temporalzfs/proto/v1/state.proto b/chasm/lib/temporalzfs/proto/v1/state.proto new file mode 100644 index 0000000000..14921ddf4d --- /dev/null +++ b/chasm/lib/temporalzfs/proto/v1/state.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.temporalzfs.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspb"; + +import "google/protobuf/duration.proto"; + +enum FilesystemStatus { + FILESYSTEM_STATUS_UNSPECIFIED = 0; + FILESYSTEM_STATUS_RUNNING = 1; + FILESYSTEM_STATUS_ARCHIVED = 2; + FILESYSTEM_STATUS_DELETED = 3; +} + +message FilesystemState { + FilesystemStatus status = 1; + FilesystemConfig config = 2; + FSStats stats = 3; + uint64 next_inode_id = 4; + uint64 next_txn_id = 5; + // Set of workflow IDs that own this filesystem. + // TFS is eligible for GC only when this set is empty. + repeated string owner_workflow_ids = 7; +} + +message FilesystemConfig { + // Default chunk size in bytes (default: 256KB). + uint32 chunk_size = 1; + // Maximum total size quota in bytes. + uint64 max_size = 2; + // Maximum inode count. + uint64 max_files = 3; + // Interval between GC runs. + google.protobuf.Duration gc_interval = 4; + // How long to retain snapshots. + google.protobuf.Duration snapshot_retention = 5; + // Interval between owner liveness checks (default: 10m). + google.protobuf.Duration owner_check_interval = 6; +} + +message FSStats { + uint64 total_size = 1; + uint64 file_count = 2; + uint64 dir_count = 3; + uint64 inode_count = 4; + uint64 chunk_count = 5; + uint64 transition_count = 6; +} diff --git a/chasm/lib/temporalzfs/proto/v1/tasks.proto b/chasm/lib/temporalzfs/proto/v1/tasks.proto new file mode 100644 index 0000000000..41985c6139 --- /dev/null +++ b/chasm/lib/temporalzfs/proto/v1/tasks.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.temporalzfs.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb;temporalzfspb"; + +message ChunkGCTask { + // Tombstone-based GC: scan tombstone prefix, delete orphaned chunks. + uint64 last_processed_txn_id = 1; +} + +message ManifestCompactTask { + // Flatten manifest diff chain from last checkpoint to current. + uint64 checkpoint_txn_id = 1; +} + +message QuotaCheckTask { + // Enforce storage quotas. Triggered after writes. +} + +message OwnerCheckTask { + // Per-workflow consecutive not-found counts, keyed by workflow ID. + // Guards against transient NotFound from history service. + map not_found_counts = 1; +} + +message DataCleanupTask { + // Retry attempt count for exponential backoff on failure. + int32 attempt = 1; +} diff --git a/chasm/lib/temporalzfs/research_agent_test.go b/chasm/lib/temporalzfs/research_agent_test.go new file mode 100644 index 0000000000..e5c13d5e97 --- /dev/null +++ b/chasm/lib/temporalzfs/research_agent_test.go @@ -0,0 +1,462 @@ +package temporalzfs + +// TestResearchAgent_HandlerLevel demonstrates a multi-step AI research agent +// through the TemporalZFS gRPC handler API, mirroring how a Temporal activity +// would interact with TemporalZFS in an OSS deployment. +// +// Scenario: An AI agent researches "Quantum Computing" in 3 iterations: +// +// 1. Gather Sources — creates workspace dirs, creates sources.md, writes content, snapshots +// 2. Analyze & Synthesize — overwrites sources.md, creates analysis.md, snapshots +// 3. Final Report — creates report.md, snapshots +// +// The handler test exercises the proto request/response API (Mkdir, CreateFile, +// WriteChunks, ReadChunks, ReadDir, Getattr, CreateSnapshot). Snapshot +// time-travel verification uses the library directly since the handler does not +// expose snapshot read operations. +// +// Run: +// +// go test ./chasm/lib/temporalzfs/ -run TestResearchAgent -v +// +// This exercises the OSS handler layer backed by PebbleStoreProvider. + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/temporalio/temporal-zfs/pkg/failpoint" + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" +) + +func TestResearchAgent_HandlerLevel(t *testing.T) { + h, provider := newTestHandler(t) + nsID, fsID := "ns-research", "fs-research-agent" + initHandlerFS(t, h, nsID, fsID) + + ctx := context.Background() + const rootInode uint64 = 1 + + // ─── Content for each iteration ────────────────────────────────────── + + sourcesV1 := []byte(`# Sources — Quantum Computing + +1. Feynman, R. "Simulating Physics with Computers" (1982) +2. Shor, P. "Algorithms for Quantum Computation" (1994) +3. Nielsen & Chuang, "Quantum Computation and Quantum Information" (2000) +`) + + sourcesV2 := []byte(`# Sources — Quantum Computing (Updated) + +1. Feynman, R. "Simulating Physics with Computers" (1982) +2. Shor, P. "Algorithms for Quantum Computation" (1994) +3. Nielsen & Chuang, "Quantum Computation and Quantum Information" (2000) +4. Preskill, J. "Quantum Computing in the NISQ Era and Beyond" (2018) +5. Arute et al. "Quantum Supremacy using a Programmable Superconducting Processor" (2019) +`) + + analysisContent := []byte(`# Analysis — Quantum Computing + +## Key Themes +- Quantum error correction remains the primary bottleneck. +- NISQ-era devices show promise but lack fault tolerance. +- Shor's algorithm threatens RSA; post-quantum cryptography is urgent. +`) + + reportContent := []byte(`# Final Report — Quantum Computing Research + +## Executive Summary +Quantum computing has reached an inflection point. Practical fault-tolerant +quantum computers remain years away, but near-term applications are emerging. + +## Recommendations +1. Monitor NISQ algorithm developments for near-term applications. +2. Begin migration planning to post-quantum cryptographic standards. +3. Evaluate quantum-classical hybrid approaches for optimization problems. +`) + + // ─── Iteration 1: Gather Sources ───────────────────────────────────── + + // Create /research directory. + researchDir, err := h.Mkdir(ctx, &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: rootInode, + Name: "research", + Mode: 0o755, + }) + require.NoError(t, err) + researchInodeID := researchDir.InodeId + + // Create /research/quantum-computing directory. + qcDir, err := h.Mkdir(ctx, &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: researchInodeID, + Name: "quantum-computing", + Mode: 0o755, + }) + require.NoError(t, err) + qcInodeID := qcDir.InodeId + + // Create sources.md file. + sourcesFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: qcInodeID, + Name: "sources.md", + Mode: 0o644, + }) + require.NoError(t, err) + sourcesInodeID := sourcesFile.InodeId + + // Write content to sources.md. + writeResp, err := h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: sourcesInodeID, + Offset: 0, + Data: sourcesV1, + }) + require.NoError(t, err) + assert.Equal(t, int64(len(sourcesV1)), writeResp.BytesWritten) + + // Verify read back. + readResp, err := h.ReadChunks(ctx, &temporalzfspb.ReadChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: sourcesInodeID, + Offset: 0, + ReadSize: int64(len(sourcesV1)), + }) + require.NoError(t, err) + assert.Equal(t, sourcesV1, readResp.Data) + + // Create snapshot. + snap1Resp, err := h.CreateSnapshot(ctx, &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + SnapshotName: "step-1-sources", + }) + require.NoError(t, err) + assert.Positive(t, snap1Resp.SnapshotTxnId) + + // ─── Iteration 2: Analyze & Synthesize ─────────────────────────────── + + // Overwrite sources.md with updated content (truncate + write). + _, err = h.Truncate(ctx, &temporalzfspb.TruncateRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: sourcesInodeID, + NewSize: 0, + }) + require.NoError(t, err) + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: sourcesInodeID, + Offset: 0, + Data: sourcesV2, + }) + require.NoError(t, err) + + // Create analysis.md. + analysisFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: qcInodeID, + Name: "analysis.md", + Mode: 0o644, + }) + require.NoError(t, err) + analysisInodeID := analysisFile.InodeId + + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: analysisInodeID, + Offset: 0, + Data: analysisContent, + }) + require.NoError(t, err) + + // Verify ReadDir shows 2 files. + dirResp, err := h.ReadDir(ctx, &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: qcInodeID, + }) + require.NoError(t, err) + assert.Len(t, dirResp.Entries, 2, "iteration 2 should show 2 files") + + snap2Resp, err := h.CreateSnapshot(ctx, &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + SnapshotName: "step-2-analysis", + }) + require.NoError(t, err) + assert.Greater(t, snap2Resp.SnapshotTxnId, snap1Resp.SnapshotTxnId) + + // ─── Iteration 3: Final Report ─────────────────────────────────────── + + reportFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + ParentInodeId: qcInodeID, + Name: "report.md", + Mode: 0o644, + }) + require.NoError(t, err) + + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: reportFile.InodeId, + Offset: 0, + Data: reportContent, + }) + require.NoError(t, err) + + snap3Resp, err := h.CreateSnapshot(ctx, &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + SnapshotName: "step-3-final", + }) + require.NoError(t, err) + assert.Greater(t, snap3Resp.SnapshotTxnId, snap2Resp.SnapshotTxnId) + + // ─── Verify final state via handler ────────────────────────────────── + + // ReadDir should show 3 files. + finalDir, err := h.ReadDir(ctx, &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: qcInodeID, + }) + require.NoError(t, err) + assert.Len(t, finalDir.Entries, 3, "final state should have 3 files") + + // Getattr on report file. + reportAttr, err := h.Getattr(ctx, &temporalzfspb.GetattrRequest{ + NamespaceId: nsID, + FilesystemId: fsID, + InodeId: reportFile.InodeId, + }) + require.NoError(t, err) + assert.Positive(t, reportAttr.Attr.FileSize) + + // ─── Verify snapshot time-travel via library ───────────────────────── + // The handler doesn't expose snapshot read operations, so we verify + // through the library directly. This matches the existing test pattern. + + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, err := tzfs.Open(s) + require.NoError(t, err) + defer func() { require.NoError(t, f.Close()) }() + + // Snapshot 1: only sources.md (v1). + snap1FS, err := f.OpenSnapshot("step-1-sources") + require.NoError(t, err) + defer func() { require.NoError(t, snap1FS.Close()) }() + + snap1Sources, err := snap1FS.ReadFile("/research/quantum-computing/sources.md") + require.NoError(t, err) + assert.Equal(t, sourcesV1, snap1Sources, "snapshot 1 should have sources v1") + + _, err = snap1FS.ReadFile("/research/quantum-computing/analysis.md") + require.ErrorIs(t, err, tzfs.ErrNotFound, "snapshot 1 should NOT have analysis.md") + + snap1Entries, err := snap1FS.ReadDir("/research/quantum-computing") + require.NoError(t, err) + assert.Len(t, snap1Entries, 1, "snapshot 1 should have 1 file") + + // Snapshot 2: sources.md (v2) + analysis.md. + snap2FS, err := f.OpenSnapshot("step-2-analysis") + require.NoError(t, err) + defer func() { require.NoError(t, snap2FS.Close()) }() + + snap2Sources, err := snap2FS.ReadFile("/research/quantum-computing/sources.md") + require.NoError(t, err) + assert.Equal(t, sourcesV2, snap2Sources, "snapshot 2 should have sources v2") + + _, err = snap2FS.ReadFile("/research/quantum-computing/report.md") + require.ErrorIs(t, err, tzfs.ErrNotFound, "snapshot 2 should NOT have report.md") + + snap2Entries, err := snap2FS.ReadDir("/research/quantum-computing") + require.NoError(t, err) + assert.Len(t, snap2Entries, 2, "snapshot 2 should have 2 files") + + // Snapshot 3: all 3 files. + snap3FS, err := f.OpenSnapshot("step-3-final") + require.NoError(t, err) + defer func() { require.NoError(t, snap3FS.Close()) }() + + snap3Entries, err := snap3FS.ReadDir("/research/quantum-computing") + require.NoError(t, err) + assert.Len(t, snap3Entries, 3, "snapshot 3 should have 3 files") + + snap3Report, err := snap3FS.ReadFile("/research/quantum-computing/report.md") + require.NoError(t, err) + assert.Equal(t, reportContent, snap3Report) + + // Verify snapshot listing. + snapshots, err := f.ListSnapshots() + require.NoError(t, err) + require.Len(t, snapshots, 3) + assert.Equal(t, "step-1-sources", snapshots[0].Name) + assert.Equal(t, "step-2-analysis", snapshots[1].Name) + assert.Equal(t, "step-3-final", snapshots[2].Name) +} + +// TestResearchAgent_HandlerCrashRecovery verifies that handler operations are +// atomic: if a failpoint causes an operation to fail mid-batch, the next handler +// call (which reopens the FS) sees only the previously committed state. +func TestResearchAgent_HandlerCrashRecovery(t *testing.T) { + injected := func() error { return errors.New("injected crash") } + + h, provider := newTestHandler(t) + nsID, fsID := "ns-crash", "fs-crash-agent" + initHandlerFS(t, h, nsID, fsID) + + ctx := context.Background() + const rootInode uint64 = 1 + + // ─── Complete step 1 via handler ───────────────────────────────────── + + researchDir, err := h.Mkdir(ctx, &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: rootInode, Name: "research", Mode: 0o755, + }) + require.NoError(t, err) + + qcDir, err := h.Mkdir(ctx, &temporalzfspb.MkdirRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: researchDir.InodeId, Name: "quantum-computing", Mode: 0o755, + }) + require.NoError(t, err) + qcInodeID := qcDir.InodeId + + sourcesFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: qcInodeID, Name: "sources.md", Mode: 0o644, + }) + require.NoError(t, err) + sourcesInodeID := sourcesFile.InodeId + + sourcesV1 := []byte("# Sources v1\n") + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: sourcesInodeID, Offset: 0, Data: sourcesV1, + }) + require.NoError(t, err) + + _, err = h.CreateSnapshot(ctx, &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, FilesystemId: fsID, + SnapshotName: "step-1-sources", + }) + require.NoError(t, err) + + // ─── Step 2: inject failure during CreateFile (analysis.md) ────────── + // The first op (creating analysis.md inode) fails via failpoint. + // The handler returns an error. On the next call, the FS reopens and + // shows step 1 state — the failed CreateFile left no trace. + + failpoint.Enable("after-create-inode", injected) + _, err = h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: qcInodeID, Name: "analysis.md", Mode: 0o644, + }) + require.Error(t, err, "CreateFile should fail with injected error") + failpoint.Disable("after-create-inode") + + // Verify: handler still works, ReadDir shows only step 1 files. + dirResp, err := h.ReadDir(ctx, &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: qcInodeID, + }) + require.NoError(t, err) + assert.Len(t, dirResp.Entries, 1, "after failed CreateFile, only sources.md should exist") + + // Verify: step 1 snapshot intact via library. + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, err := tzfs.Open(s) + require.NoError(t, err) + + snap1, err := f.OpenSnapshot("step-1-sources") + require.NoError(t, err) + snap1Entries, err := snap1.ReadDir("/research/quantum-computing") + require.NoError(t, err) + assert.Len(t, snap1Entries, 1, "step-1 snapshot should have 1 file") + require.NoError(t, snap1.Close()) + + // No step-2 snapshot should exist. + _, err = f.OpenSnapshot("step-2-analysis") + require.ErrorIs(t, err, tzfs.ErrSnapshotNotFound) + require.NoError(t, f.Close()) + + // ─── Recovery: retry step 2 successfully ───────────────────────────── + + analysisFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: qcInodeID, Name: "analysis.md", Mode: 0o644, + }) + require.NoError(t, err) + + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: analysisFile.InodeId, Offset: 0, Data: []byte("# Analysis\n"), + }) + require.NoError(t, err) + + _, err = h.CreateSnapshot(ctx, &temporalzfspb.CreateSnapshotRequest{ + NamespaceId: nsID, FilesystemId: fsID, + SnapshotName: "step-2-analysis", + }) + require.NoError(t, err) + + // ─── Step 3: inject failure during Mkdir (wrong op type) ───────────── + // This tests that failures in unexpected operations are also atomic. + + failpoint.Enable("after-create-inode", injected) + _, err = h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: qcInodeID, Name: "report.md", Mode: 0o644, + }) + require.Error(t, err) + failpoint.Disable("after-create-inode") + + // Verify step 2 state intact. + dirResp, err = h.ReadDir(ctx, &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: qcInodeID, + }) + require.NoError(t, err) + assert.Len(t, dirResp.Entries, 2, "after failed step 3, should still have 2 files") + + // ─── Recovery: complete step 3 ─────────────────────────────────────── + + reportFile, err := h.CreateFile(ctx, &temporalzfspb.CreateFileRequest{ + NamespaceId: nsID, FilesystemId: fsID, + ParentInodeId: qcInodeID, Name: "report.md", Mode: 0o644, + }) + require.NoError(t, err) + + _, err = h.WriteChunks(ctx, &temporalzfspb.WriteChunksRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: reportFile.InodeId, Offset: 0, Data: []byte("# Report\n"), + }) + require.NoError(t, err) + + dirResp, err = h.ReadDir(ctx, &temporalzfspb.ReadDirRequest{ + NamespaceId: nsID, FilesystemId: fsID, + InodeId: qcInodeID, + }) + require.NoError(t, err) + assert.Len(t, dirResp.Entries, 3, "all 3 files after recovery + completion") +} diff --git a/chasm/lib/temporalzfs/search_attributes.go b/chasm/lib/temporalzfs/search_attributes.go new file mode 100644 index 0000000000..1c68f733e5 --- /dev/null +++ b/chasm/lib/temporalzfs/search_attributes.go @@ -0,0 +1,8 @@ +package temporalzfs + +import "go.temporal.io/server/chasm" + +var statusSearchAttribute = chasm.NewSearchAttributeKeyword( + "FilesystemStatus", + chasm.SearchAttributeFieldLowCardinalityKeyword01, +) diff --git a/chasm/lib/temporalzfs/statemachine.go b/chasm/lib/temporalzfs/statemachine.go new file mode 100644 index 0000000000..1515c0d7bd --- /dev/null +++ b/chasm/lib/temporalzfs/statemachine.go @@ -0,0 +1,105 @@ +package temporalzfs + +import ( + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" +) + +var _ chasm.StateMachine[temporalzfspb.FilesystemStatus] = (*Filesystem)(nil) + +// StateMachineState returns the current filesystem status. +func (f *Filesystem) StateMachineState() temporalzfspb.FilesystemStatus { + if f.FilesystemState == nil { + return temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED + } + return f.Status +} + +// SetStateMachineState sets the filesystem status. +func (f *Filesystem) SetStateMachineState(state temporalzfspb.FilesystemStatus) { + if f.FilesystemState == nil { + f.FilesystemState = &temporalzfspb.FilesystemState{} + } + f.Status = state +} + +// CreateEvent carries the configuration for creating a new filesystem. +type CreateEvent struct { + Config *temporalzfspb.FilesystemConfig + OwnerWorkflowIDs []string +} + +// TransitionCreate transitions from UNSPECIFIED → RUNNING. +var TransitionCreate = chasm.NewTransition( + []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, + }, + temporalzfspb.FILESYSTEM_STATUS_RUNNING, + func(fs *Filesystem, ctx chasm.MutableContext, event CreateEvent) error { + fs.Config = event.Config + if fs.Config == nil { + fs.Config = defaultConfig() + } + fs.NextInodeId = 2 // root inode = 1 + fs.NextTxnId = 1 + fs.Stats = &temporalzfspb.FSStats{} + + // Build deduplicated owner set. + owners := make(map[string]struct{}) + for _, id := range event.OwnerWorkflowIDs { + if id != "" { + owners[id] = struct{}{} + } + } + for id := range owners { + fs.OwnerWorkflowIds = append(fs.OwnerWorkflowIds, id) + } + + // Schedule periodic GC task. + if gcInterval := fs.Config.GetGcInterval().AsDuration(); gcInterval > 0 { + ctx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: ctx.Now(fs).Add(gcInterval), + }, &temporalzfspb.ChunkGCTask{}) + } + + // Schedule periodic owner check task if there are owners. + if len(fs.OwnerWorkflowIds) > 0 { + interval := fs.Config.GetOwnerCheckInterval().AsDuration() + if interval <= 0 { + interval = defaultOwnerCheckInterval + } + ctx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: ctx.Now(fs).Add(interval), + }, &temporalzfspb.OwnerCheckTask{}) + } + + return nil + }, +) + +// TransitionArchive transitions from RUNNING → ARCHIVED. +var TransitionArchive = chasm.NewTransition( + []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + func(_ *Filesystem, _ chasm.MutableContext, _ any) error { + return nil + }, +) + +// TransitionDelete transitions from RUNNING or ARCHIVED → DELETED. +// Schedules a DataCleanupTask to delete all FS data from the store. +var TransitionDelete = chasm.NewTransition( + []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_RUNNING, + temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + }, + temporalzfspb.FILESYSTEM_STATUS_DELETED, + func(fs *Filesystem, ctx chasm.MutableContext, _ any) error { + ctx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: chasm.TaskScheduledTimeImmediate, + }, &temporalzfspb.DataCleanupTask{}) + return nil + }, +) diff --git a/chasm/lib/temporalzfs/statemachine_test.go b/chasm/lib/temporalzfs/statemachine_test.go new file mode 100644 index 0000000000..d777fca2f1 --- /dev/null +++ b/chasm/lib/temporalzfs/statemachine_test.go @@ -0,0 +1,224 @@ +package temporalzfs + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "google.golang.org/protobuf/types/known/durationpb" +) + +var defaultTime = time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) + +func newMockMutableContext() *chasm.MockMutableContext { + return &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{ + NamespaceID: "test-namespace-id", + BusinessID: "test-filesystem-id", + } + }, + }, + } +} + +func TestTransitionCreate(t *testing.T) { + testCases := []struct { + name string + config *temporalzfspb.FilesystemConfig + ownerWorkflowIDs []string + expectDefaultConf bool + expectGCTask bool + expectOwnerCheck bool + }{ + { + name: "with custom config and owner", + config: &temporalzfspb.FilesystemConfig{ + ChunkSize: 512 * 1024, + MaxSize: 2 << 30, + MaxFiles: 50_000, + GcInterval: durationpb.New(10 * time.Minute), + }, + ownerWorkflowIDs: []string{"wf-123"}, + expectDefaultConf: false, + expectGCTask: true, + expectOwnerCheck: true, + }, + { + name: "with nil config uses defaults", + config: nil, + ownerWorkflowIDs: []string{"wf-456"}, + expectDefaultConf: true, + expectGCTask: true, + expectOwnerCheck: true, + }, + { + name: "with zero GC interval and no owners", + config: &temporalzfspb.FilesystemConfig{ + ChunkSize: 256 * 1024, + GcInterval: durationpb.New(0), + }, + ownerWorkflowIDs: nil, + expectDefaultConf: false, + expectGCTask: false, + expectOwnerCheck: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := newMockMutableContext() + + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{}, + } + + err := TransitionCreate.Apply(fs, ctx, CreateEvent{ + Config: tc.config, + OwnerWorkflowIDs: tc.ownerWorkflowIDs, + }) + require.NoError(t, err) + + // Verify status. + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_RUNNING, fs.Status) + + // Verify inode and txn IDs. + require.EqualValues(t, 2, fs.NextInodeId) + require.EqualValues(t, 1, fs.NextTxnId) + + // Verify stats initialized. + require.NotNil(t, fs.Stats) + + // Verify owner workflow IDs. + require.ElementsMatch(t, tc.ownerWorkflowIDs, fs.OwnerWorkflowIds) + + // Verify config. + require.NotNil(t, fs.Config) + if tc.expectDefaultConf { + require.EqualValues(t, defaultChunkSize, fs.Config.ChunkSize) + require.EqualValues(t, defaultMaxSize, fs.Config.MaxSize) + require.EqualValues(t, defaultMaxFiles, fs.Config.MaxFiles) + require.Equal(t, defaultGCInterval, fs.Config.GcInterval.AsDuration()) + require.Equal(t, defaultSnapshotRetention, fs.Config.SnapshotRetention.AsDuration()) + } else { + require.Equal(t, tc.config.ChunkSize, fs.Config.ChunkSize) + } + + // Verify tasks. + expectedTasks := 0 + if tc.expectGCTask { + expectedTasks++ + } + if tc.expectOwnerCheck { + expectedTasks++ + } + require.Len(t, ctx.Tasks, expectedTasks) + + if tc.expectGCTask { + task := ctx.Tasks[0] + require.IsType(t, &temporalzfspb.ChunkGCTask{}, task.Payload) + expectedTime := defaultTime.Add(fs.Config.GcInterval.AsDuration()) + require.Equal(t, expectedTime, task.Attributes.ScheduledTime) + } + }) + } +} + +func TestTransitionCreate_InvalidSourceState(t *testing.T) { + for _, status := range []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_RUNNING, + temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + temporalzfspb.FILESYSTEM_STATUS_DELETED, + } { + t.Run(status.String(), func(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{Status: status}, + } + err := TransitionCreate.Apply(fs, ctx, CreateEvent{}) + require.ErrorIs(t, err, chasm.ErrInvalidTransition) + }) + } +} + +func TestTransitionArchive(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + + err := TransitionArchive.Apply(fs, ctx, nil) + require.NoError(t, err) + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, fs.Status) +} + +func TestTransitionArchive_InvalidSourceStates(t *testing.T) { + for _, status := range []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, + temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + temporalzfspb.FILESYSTEM_STATUS_DELETED, + } { + t.Run(status.String(), func(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{Status: status}, + } + err := TransitionArchive.Apply(fs, ctx, nil) + require.ErrorIs(t, err, chasm.ErrInvalidTransition) + }) + } +} + +func TestTransitionDelete_FromRunning(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + + err := TransitionDelete.Apply(fs, ctx, nil) + require.NoError(t, err) + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_DELETED, fs.Status) + // Verify DataCleanupTask is scheduled. + require.Len(t, ctx.Tasks, 1) + require.IsType(t, &temporalzfspb.DataCleanupTask{}, ctx.Tasks[0].Payload) +} + +func TestTransitionDelete_FromArchived(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, + }, + } + + err := TransitionDelete.Apply(fs, ctx, nil) + require.NoError(t, err) + require.Equal(t, temporalzfspb.FILESYSTEM_STATUS_DELETED, fs.Status) + // Verify DataCleanupTask is scheduled. + require.Len(t, ctx.Tasks, 1) + require.IsType(t, &temporalzfspb.DataCleanupTask{}, ctx.Tasks[0].Payload) +} + +func TestTransitionDelete_InvalidSourceStates(t *testing.T) { + for _, status := range []temporalzfspb.FilesystemStatus{ + temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, + temporalzfspb.FILESYSTEM_STATUS_DELETED, + } { + t.Run(status.String(), func(t *testing.T) { + ctx := newMockMutableContext() + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{Status: status}, + } + err := TransitionDelete.Apply(fs, ctx, nil) + require.ErrorIs(t, err, chasm.ErrInvalidTransition) + }) + } +} diff --git a/chasm/lib/temporalzfs/store_provider.go b/chasm/lib/temporalzfs/store_provider.go new file mode 100644 index 0000000000..125a443572 --- /dev/null +++ b/chasm/lib/temporalzfs/store_provider.go @@ -0,0 +1,24 @@ +package temporalzfs + +import ( + "github.com/temporalio/temporal-zfs/pkg/store" +) + +// FSStoreProvider is the pluggable interface for FS storage backends. +// OSS implements this with PebbleStoreProvider. SaaS implements with +// CDSStoreProvider (backed by Walker) via fx.Decorate in saas-temporal. +// +// This is the sole extension point for SaaS — all other FS components +// (CHASM archetype, gRPC service, FUSE mount) are identical between OSS and SaaS. +type FSStoreProvider interface { + // GetStore returns a store.Store scoped to a specific FS execution. + // The returned store provides full key isolation for that execution. + GetStore(shardID int32, namespaceID string, filesystemID string) (store.Store, error) + + // DeleteStore deletes all FS data for a specific filesystem execution. + // Called by DataCleanupTask when a filesystem transitions to DELETED. + DeleteStore(shardID int32, namespaceID string, filesystemID string) error + + // Close releases all resources (PebbleDB instances, Walker sessions, etc.) + Close() error +} diff --git a/chasm/lib/temporalzfs/tasks.go b/chasm/lib/temporalzfs/tasks.go new file mode 100644 index 0000000000..79047cf166 --- /dev/null +++ b/chasm/lib/temporalzfs/tasks.go @@ -0,0 +1,376 @@ +package temporalzfs + +import ( + "context" + "time" + + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" +) + +// chunkGCTaskExecutor handles periodic garbage collection of orphaned chunks. +type chunkGCTaskExecutor struct { + config *Config + logger log.Logger + storeProvider FSStoreProvider +} + +func newChunkGCTaskExecutor(config *Config, logger log.Logger, storeProvider FSStoreProvider) *chunkGCTaskExecutor { + return &chunkGCTaskExecutor{config: config, logger: logger, storeProvider: storeProvider} +} + +func (e *chunkGCTaskExecutor) Validate( + _ chasm.Context, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.ChunkGCTask, +) (bool, error) { + return fs.Status == temporalzfspb.FILESYSTEM_STATUS_RUNNING, nil +} + +func (e *chunkGCTaskExecutor) Execute( + ctx chasm.MutableContext, + fs *Filesystem, + _ chasm.TaskAttributes, + task *temporalzfspb.ChunkGCTask, +) error { + key := ctx.ExecutionKey() + + s, err := e.storeProvider.GetStore(0, key.NamespaceID, key.BusinessID) + if err != nil { + e.logger.Error("GC: failed to get store", tag.Error(err)) + return e.rescheduleGC(ctx, fs, task.GetLastProcessedTxnId()) + } + + f, err := tzfs.Open(s) + if err != nil { + _ = s.Close() + e.logger.Error("GC: failed to open FS", tag.Error(err)) + return e.rescheduleGC(ctx, fs, task.GetLastProcessedTxnId()) + } + + gcStats := f.RunGC(tzfs.GCConfig{ + BatchSize: 100, + MaxChunksPerRound: 10000, + }) + if closeErr := f.Close(); closeErr != nil { + e.logger.Warn("GC: failed to close FS", tag.Error(closeErr)) + } + + e.logger.Info("GC completed", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.NewInt64("tombstones_processed", int64(gcStats.TombstonesProcessed)), + tag.NewInt64("chunks_deleted", int64(gcStats.ChunksDeleted)), + ) + + // Update CHASM state stats from FS metrics. + if fs.Stats == nil { + fs.Stats = &temporalzfspb.FSStats{} + } + fs.Stats.TransitionCount++ + if deleted := uint64(gcStats.ChunksDeleted); deleted >= fs.Stats.ChunkCount { + fs.Stats.ChunkCount = 0 + } else { + fs.Stats.ChunkCount -= deleted + } + + return e.rescheduleGC(ctx, fs, task.GetLastProcessedTxnId()) +} + +func (e *chunkGCTaskExecutor) rescheduleGC(ctx chasm.MutableContext, fs *Filesystem, lastTxnID uint64) error { + gcInterval := fs.Config.GetGcInterval().AsDuration() + if gcInterval > 0 { + ctx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: ctx.Now(fs).Add(gcInterval), + }, &temporalzfspb.ChunkGCTask{ + LastProcessedTxnId: lastTxnID, + }) + } + return nil +} + +// manifestCompactTaskExecutor handles compaction of the underlying PebbleDB store. +type manifestCompactTaskExecutor struct { + config *Config + logger log.Logger + storeProvider FSStoreProvider +} + +func newManifestCompactTaskExecutor(config *Config, logger log.Logger, storeProvider FSStoreProvider) *manifestCompactTaskExecutor { + return &manifestCompactTaskExecutor{config: config, logger: logger, storeProvider: storeProvider} +} + +func (e *manifestCompactTaskExecutor) Validate( + _ chasm.Context, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.ManifestCompactTask, +) (bool, error) { + return fs.Status == temporalzfspb.FILESYSTEM_STATUS_RUNNING, nil +} + +func (e *manifestCompactTaskExecutor) Execute( + _ chasm.MutableContext, + _ *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.ManifestCompactTask, +) error { + // Compaction is handled at the PebbleDB level per shard, not per filesystem. + // This task is a placeholder for future per-FS compaction triggers. + return nil +} + +// quotaCheckTaskExecutor enforces storage quotas. +type quotaCheckTaskExecutor struct { + config *Config + logger log.Logger + storeProvider FSStoreProvider +} + +func newQuotaCheckTaskExecutor(config *Config, logger log.Logger, storeProvider FSStoreProvider) *quotaCheckTaskExecutor { + return "aCheckTaskExecutor{config: config, logger: logger, storeProvider: storeProvider} +} + +func (e *quotaCheckTaskExecutor) Validate( + _ chasm.Context, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.QuotaCheckTask, +) (bool, error) { + return fs.Status == temporalzfspb.FILESYSTEM_STATUS_RUNNING, nil +} + +func (e *quotaCheckTaskExecutor) Execute( + ctx chasm.MutableContext, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.QuotaCheckTask, +) error { + key := ctx.ExecutionKey() + + s, err := e.storeProvider.GetStore(0, key.NamespaceID, key.BusinessID) + if err != nil { + e.logger.Error("QuotaCheck: failed to get store", tag.Error(err)) + return err + } + + f, err := tzfs.Open(s) + if err != nil { + _ = s.Close() + e.logger.Error("QuotaCheck: failed to open FS", tag.Error(err)) + return err + } + + m := f.Metrics() + if closeErr := f.Close(); closeErr != nil { + e.logger.Warn("QuotaCheck: failed to close FS", tag.Error(closeErr)) + } + + if fs.Stats == nil { + fs.Stats = &temporalzfspb.FSStats{} + } + + // Update stats from FS metrics. + fs.Stats.TotalSize = uint64(m.BytesWritten.Load()) + fs.Stats.FileCount = uint64(m.FilesCreated.Load() - m.FilesDeleted.Load()) + fs.Stats.DirCount = uint64(m.DirsCreated.Load() - m.DirsDeleted.Load()) + + maxSize := fs.Config.GetMaxSize() + if maxSize > 0 && fs.Stats.TotalSize > maxSize { + e.logger.Warn("Filesystem exceeds size quota", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.NewInt64("total_size", int64(fs.Stats.TotalSize)), + tag.NewInt64("max_size", int64(maxSize)), + ) + } + + return nil +} + +// WorkflowExistenceChecker checks whether a workflow execution still exists. +// Used by ownerCheckTaskExecutor to detect dead owners. +type WorkflowExistenceChecker interface { + WorkflowExists(ctx context.Context, namespaceID string, workflowID string) (bool, error) +} + +// noopWorkflowExistenceChecker is the default OSS implementation. +// SaaS can override this via fx.Decorate with a real implementation +// that queries the history service. +type noopWorkflowExistenceChecker struct{} + +func newNoopWorkflowExistenceChecker() *noopWorkflowExistenceChecker { + return &noopWorkflowExistenceChecker{} +} + +func (n *noopWorkflowExistenceChecker) WorkflowExists(_ context.Context, _ string, _ string) (bool, error) { + // Default: assume all workflows exist. The push path (DetachWorkflow) + // handles cleanup in OSS. SaaS overrides with a real checker. + return true, nil +} + +// ownerCheckTaskExecutor is the pull-based safety net for GC. +// It periodically checks all owner workflow IDs and removes dead ones. +type ownerCheckTaskExecutor struct { + logger log.Logger + existenceChecker WorkflowExistenceChecker +} + +func newOwnerCheckTaskExecutor(logger log.Logger, existenceChecker WorkflowExistenceChecker) *ownerCheckTaskExecutor { + return &ownerCheckTaskExecutor{logger: logger, existenceChecker: existenceChecker} +} + +func (e *ownerCheckTaskExecutor) Validate( + _ chasm.Context, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.OwnerCheckTask, +) (bool, error) { + return fs.Status == temporalzfspb.FILESYSTEM_STATUS_RUNNING && len(fs.OwnerWorkflowIds) > 0, nil +} + +func (e *ownerCheckTaskExecutor) Execute( + ctx chasm.MutableContext, + fs *Filesystem, + _ chasm.TaskAttributes, + task *temporalzfspb.OwnerCheckTask, +) error { + key := ctx.ExecutionKey() + notFoundCounts := task.GetNotFoundCounts() + + var surviving []string + updatedCounts := make(map[string]int32) + + for _, wfID := range fs.OwnerWorkflowIds { + exists, err := e.existenceChecker.WorkflowExists(context.TODO(), key.NamespaceID, wfID) + if err != nil { + // Transient error — keep this owner, reset its counter. + surviving = append(surviving, wfID) + e.logger.Warn("OwnerCheck: transient error checking workflow", + tag.NewStringTag("workflow_id", wfID), + tag.Error(err), + ) + continue + } + if exists { + surviving = append(surviving, wfID) + continue + } + // Not found — increment counter. + count := notFoundCounts[wfID] + 1 + if count < ownerCheckNotFoundThreshold { + surviving = append(surviving, wfID) + updatedCounts[wfID] = count + } else { + e.logger.Info("OwnerCheck: removing dead owner", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.NewStringTag("workflow_id", wfID), + ) + } + } + + fs.OwnerWorkflowIds = surviving + + if len(surviving) == 0 { + // All owners gone — transition to DELETED. + e.logger.Info("OwnerCheck: all owners gone, deleting filesystem", + tag.NewStringTag("filesystem_id", key.BusinessID), + ) + return TransitionDelete.Apply(fs, ctx, nil) + } + + // Reschedule next check. + return e.rescheduleOwnerCheck(ctx, fs, updatedCounts) +} + +func (e *ownerCheckTaskExecutor) rescheduleOwnerCheck( + ctx chasm.MutableContext, + fs *Filesystem, + notFoundCounts map[string]int32, +) error { + interval := fs.Config.GetOwnerCheckInterval().AsDuration() + if interval <= 0 { + interval = defaultOwnerCheckInterval + } + ctx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: ctx.Now(fs).Add(interval), + }, &temporalzfspb.OwnerCheckTask{ + NotFoundCounts: notFoundCounts, + }) + return nil +} + +// dataCleanupTaskExecutor deletes all FS data from the store when a filesystem +// transitions to DELETED. This is a SideEffectTask because it performs +// irreversible external I/O (store deletion). +type dataCleanupTaskExecutor struct { + logger log.Logger + storeProvider FSStoreProvider +} + +func newDataCleanupTaskExecutor(logger log.Logger, storeProvider FSStoreProvider) *dataCleanupTaskExecutor { + return &dataCleanupTaskExecutor{logger: logger, storeProvider: storeProvider} +} + +func (e *dataCleanupTaskExecutor) Validate( + _ chasm.Context, + fs *Filesystem, + _ chasm.TaskAttributes, + _ *temporalzfspb.DataCleanupTask, +) (bool, error) { + return fs.Status == temporalzfspb.FILESYSTEM_STATUS_DELETED, nil +} + +func (e *dataCleanupTaskExecutor) Execute( + ctx context.Context, + ref chasm.ComponentRef, + _ chasm.TaskAttributes, + task *temporalzfspb.DataCleanupTask, +) error { + key := ref.ExecutionKey + e.logger.Info("DataCleanup: deleting FS store data", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.NewInt32("attempt", task.GetAttempt()), + ) + + if err := e.storeProvider.DeleteStore(0, key.NamespaceID, key.BusinessID); err != nil { + e.logger.Error("DataCleanup: failed to delete store", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.Error(err), + ) + // Reschedule with exponential backoff. + nextAttempt := task.GetAttempt() + 1 + backoff := time.Duration(1< dataCleanupMaxBackoff { + backoff = dataCleanupMaxBackoff + } + + _, _, schedErr := chasm.UpdateComponent( + ctx, + ref, + func(fs *Filesystem, mCtx chasm.MutableContext, _ any) (chasm.NoValue, error) { + mCtx.AddTask(fs, chasm.TaskAttributes{ + ScheduledTime: mCtx.Now(fs).Add(backoff), + }, &temporalzfspb.DataCleanupTask{ + Attempt: nextAttempt, + }) + return nil, nil + }, + nil, + ) + if schedErr != nil { + e.logger.Error("DataCleanup: failed to reschedule", + tag.NewStringTag("filesystem_id", key.BusinessID), + tag.Error(schedErr), + ) + } + return err + } + + e.logger.Info("DataCleanup: FS store data deleted successfully", + tag.NewStringTag("filesystem_id", key.BusinessID), + ) + return nil +} diff --git a/chasm/lib/temporalzfs/tasks_test.go b/chasm/lib/temporalzfs/tasks_test.go new file mode 100644 index 0000000000..dff43d23e0 --- /dev/null +++ b/chasm/lib/temporalzfs/tasks_test.go @@ -0,0 +1,199 @@ +package temporalzfs + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + "go.temporal.io/server/chasm" + temporalzfspb "go.temporal.io/server/chasm/lib/temporalzfs/gen/temporalzfspb/v1" + "go.temporal.io/server/common/log" + "google.golang.org/protobuf/types/known/durationpb" +) + +func newTestStoreProvider(t *testing.T) *PebbleStoreProvider { + t.Helper() + p := NewPebbleStoreProvider(t.TempDir(), log.NewTestLogger()) + t.Cleanup(func() { _ = p.Close() }) + return p +} + +func newRunningFilesystem() *Filesystem { + return &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + Config: &temporalzfspb.FilesystemConfig{ + ChunkSize: 256 * 1024, + MaxSize: 1 << 30, + MaxFiles: 100_000, + GcInterval: durationpb.New(5 * time.Minute), + }, + Stats: &temporalzfspb.FSStats{}, + }, + } +} + +// initTestFS creates a temporal-zfs filesystem in the store provider for the given namespace/filesystem. +func initTestFS(t *testing.T, provider *PebbleStoreProvider, nsID, fsID string) { + t.Helper() + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, err := tzfs.Create(s, tzfs.Options{}) + require.NoError(t, err) + _ = f.Close() +} + +// --- Validate tests --- + +func TestChunkGCValidate(t *testing.T) { + executor := &chunkGCTaskExecutor{} + + testCases := []struct { + status temporalzfspb.FilesystemStatus + expected bool + }{ + {temporalzfspb.FILESYSTEM_STATUS_RUNNING, true}, + {temporalzfspb.FILESYSTEM_STATUS_UNSPECIFIED, false}, + {temporalzfspb.FILESYSTEM_STATUS_ARCHIVED, false}, + {temporalzfspb.FILESYSTEM_STATUS_DELETED, false}, + } + + for _, tc := range testCases { + t.Run(tc.status.String(), func(t *testing.T) { + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{Status: tc.status}, + } + ok, err := executor.Validate(nil, fs, chasm.TaskAttributes{}, nil) + require.NoError(t, err) + require.Equal(t, tc.expected, ok) + }) + } +} + +func TestManifestCompactValidate(t *testing.T) { + executor := &manifestCompactTaskExecutor{} + + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + ok, err := executor.Validate(nil, fs, chasm.TaskAttributes{}, nil) + require.NoError(t, err) + require.True(t, ok) + + fs.Status = temporalzfspb.FILESYSTEM_STATUS_ARCHIVED + ok, err = executor.Validate(nil, fs, chasm.TaskAttributes{}, nil) + require.NoError(t, err) + require.False(t, ok) +} + +func TestQuotaCheckValidate(t *testing.T) { + executor := "aCheckTaskExecutor{} + + fs := &Filesystem{ + FilesystemState: &temporalzfspb.FilesystemState{ + Status: temporalzfspb.FILESYSTEM_STATUS_RUNNING, + }, + } + ok, err := executor.Validate(nil, fs, chasm.TaskAttributes{}, nil) + require.NoError(t, err) + require.True(t, ok) + + fs.Status = temporalzfspb.FILESYSTEM_STATUS_DELETED + ok, err = executor.Validate(nil, fs, chasm.TaskAttributes{}, nil) + require.NoError(t, err) + require.False(t, ok) +} + +// --- Execute tests --- + +func TestChunkGCExecute(t *testing.T) { + provider := newTestStoreProvider(t) + logger := log.NewTestLogger() + + nsID := "test-namespace-id" + fsID := "test-filesystem-id" + initTestFS(t, provider, nsID, fsID) + + executor := newChunkGCTaskExecutor(nil, logger, provider) + ctx := newMockMutableContext() + fs := newRunningFilesystem() + + err := executor.Execute(ctx, fs, chasm.TaskAttributes{}, &temporalzfspb.ChunkGCTask{}) + require.NoError(t, err) + + // Stats should be updated (TransitionCount incremented). + require.NotNil(t, fs.Stats) + require.EqualValues(t, 1, fs.Stats.TransitionCount) + + // GC task should be rescheduled. + require.Len(t, ctx.Tasks, 1) + task := ctx.Tasks[0] + require.IsType(t, &temporalzfspb.ChunkGCTask{}, task.Payload) + expectedTime := defaultTime.Add(5 * time.Minute) + require.Equal(t, expectedTime, task.Attributes.ScheduledTime) +} + +func TestChunkGCExecute_NoGCInterval(t *testing.T) { + provider := newTestStoreProvider(t) + logger := log.NewTestLogger() + + nsID := "test-namespace-id" + fsID := "test-filesystem-id" + initTestFS(t, provider, nsID, fsID) + + executor := newChunkGCTaskExecutor(nil, logger, provider) + ctx := newMockMutableContext() + fs := newRunningFilesystem() + fs.Config.GcInterval = durationpb.New(0) // Disable GC rescheduling. + + err := executor.Execute(ctx, fs, chasm.TaskAttributes{}, &temporalzfspb.ChunkGCTask{}) + require.NoError(t, err) + + // No task should be rescheduled. + require.Empty(t, ctx.Tasks) +} + +func TestQuotaCheckExecute(t *testing.T) { + provider := newTestStoreProvider(t) + logger := log.NewTestLogger() + + nsID := "test-namespace-id" + fsID := "test-filesystem-id" + initTestFS(t, provider, nsID, fsID) + + executor := newQuotaCheckTaskExecutor(nil, logger, provider) + ctx := newMockMutableContext() + fs := newRunningFilesystem() + + err := executor.Execute(ctx, fs, chasm.TaskAttributes{}, &temporalzfspb.QuotaCheckTask{}) + require.NoError(t, err) + + // Stats should be initialized (metrics are per-instance so values may be zero + // for a freshly opened FS, but the stats struct must be populated). + require.NotNil(t, fs.Stats) +} + +func TestQuotaCheckExecute_WithWrites(t *testing.T) { + provider := newTestStoreProvider(t) + + nsID := "test-namespace-id" + fsID := "test-filesystem-id" + + // Create FS, write data, and keep the FS open — metrics accumulate in-memory. + s, err := provider.GetStore(0, nsID, fsID) + require.NoError(t, err) + f, err := tzfs.Create(s, tzfs.Options{}) + require.NoError(t, err) + + err = f.WriteFile("/test.txt", []byte("hello world"), 0o644) + require.NoError(t, err) + + // Verify metrics are tracked on the open FS instance. + m := f.Metrics() + require.Positive(t, m.BytesWritten.Load()) + require.EqualValues(t, 1, m.FilesCreated.Load()) + _ = f.Close() +} diff --git a/docs/architecture/temporalzfs.md b/docs/architecture/temporalzfs.md new file mode 100644 index 0000000000..fa01fb1906 --- /dev/null +++ b/docs/architecture/temporalzfs.md @@ -0,0 +1,211 @@ +> [!WARNING] +> All documentation pertains to the [CHASM-based](https://github.com/temporalio/temporal/blob/main/docs/architecture/chasm.md) TemporalZFS implementation, which is not yet generally available. + +This page documents the internal architecture of TemporalZFS, a durable versioned filesystem for AI agent workflows. The target audience is server developers maintaining or operating the TemporalZFS implementation. Readers should already have an understanding of [CHASM terminology](https://github.com/temporalio/temporal/blob/main/docs/architecture/chasm.md). + +### Introduction + +TemporalZFS is implemented as a [CHASM](https://github.com/temporalio/temporal/blob/main/docs/architecture/chasm.md) library, with all related implementation code located in [`chasm/lib/temporalzfs`](https://github.com/temporalio/temporal/tree/main/chasm/lib/temporalzfs). Each filesystem is backed by an execution whose root component is a [`Filesystem`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/filesystem.go). + +FS layer data (inodes, chunks, directory entries) is stored in a dedicated store managed by an [`FSStoreProvider`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/store_provider.go), not as CHASM Fields. Only FS metadata (config, stats, lifecycle status) lives in CHASM state. This separation keeps the CHASM execution lightweight while allowing the FS data layer to scale independently. + +The FS operations are powered by the [`temporal-zfs`](https://github.com/temporalio/temporal-zfs) library, which provides a transactional copy-on-write filesystem backed by PebbleDB. + +```mermaid +classDiagram + direction TB + + class Filesystem { + FilesystemState + Visibility + LifecycleState() + Terminate() + SearchAttributes() + } + class FilesystemState { + FilesystemStatus status + FilesystemConfig config + FSStats stats + uint64 next_inode_id + uint64 next_txn_id + repeated string owner_workflow_ids + } + class FilesystemConfig { + uint32 chunk_size + uint64 max_size + uint64 max_files + Duration gc_interval + Duration snapshot_retention + Duration owner_check_interval + } + class FSStats { + uint64 total_size + uint64 file_count + uint64 dir_count + uint64 inode_count + uint64 chunk_count + uint64 transition_count + } + + Filesystem --> FilesystemState + FilesystemState --> FilesystemConfig + FilesystemState --> FSStats +``` +*Figure: The Filesystem component and its state. The Visibility field (not shown) provides search attribute indexing.* + +### State Machine + +The `Filesystem` component implements `chasm.StateMachine[FilesystemStatus]` with three transitions defined in [`statemachine.go`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/statemachine.go): + +```mermaid +stateDiagram-v2 + [*] --> UNSPECIFIED + UNSPECIFIED --> RUNNING : TransitionCreate + RUNNING --> ARCHIVED : TransitionArchive + RUNNING --> DELETED : TransitionDelete + ARCHIVED --> DELETED : TransitionDelete +``` + +- **TransitionCreate** (`UNSPECIFIED → RUNNING`): Initializes the filesystem with configuration (or defaults), sets `next_inode_id = 2` (root inode = 1), creates empty stats, records owner workflow IDs (deduplicated), schedules the first ChunkGC task (if gc_interval > 0), and schedules an OwnerCheckTask if owners are present. +- **TransitionArchive** (`RUNNING → ARCHIVED`): Marks the filesystem as archived. The underlying FS data remains accessible for reads but no further writes are expected. +- **TransitionDelete** (`RUNNING/ARCHIVED → DELETED`): Marks the filesystem for deletion and schedules a DataCleanupTask immediately. `Terminate()` also sets this status and schedules DataCleanupTask. + +Lifecycle mapping: `RUNNING` and `UNSPECIFIED` → `LifecycleStateRunning`; `ARCHIVED` and `DELETED` → `LifecycleStateCompleted`. + +### Tasks + +Five task types are registered in the [`library`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/library.go), with executors in [`tasks.go`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/tasks.go): + +| Task | Type | Description | +|------|------|-------------| +| **ChunkGC** | Periodic timer | Runs `temporal-zfs` garbage collection (`f.RunGC()`) to process tombstones and delete orphaned chunks. Reschedules itself at the configured `gc_interval`. Updates `TransitionCount` and `ChunkCount` in stats. | +| **ManifestCompact** | Placeholder | Reserved for future per-filesystem PebbleDB compaction triggers. Currently a no-op since compaction operates at the shard level. | +| **QuotaCheck** | On-demand | Reads `temporal-zfs` metrics to update `FSStats` (total size, file count, dir count). Logs a warning if the filesystem exceeds its configured `max_size` quota. | +| **OwnerCheckTask** | Periodic timer | Checks if owner workflows still exist via `WorkflowExistenceChecker`. Uses a not-found counter with threshold of 2 (must miss twice before removal) to avoid transient false positives. Removes owners that are confirmed gone. Transitions filesystem to DELETED when all owners are removed. Reschedules at `owner_check_interval`. | +| **DataCleanupTask** | Side-effect | Runs after filesystem transitions to DELETED. Calls `FSStoreProvider.DeleteStore()` to remove all filesystem data. On failure, reschedules with exponential backoff (capped at 30 minutes). | + +ChunkGC, ManifestCompact, QuotaCheck, and OwnerCheckTask validators check that the filesystem is in `RUNNING` status. DataCleanupTask validates `DELETED` status. + +### Storage Architecture + +TemporalZFS uses a pluggable storage interface so that OSS and SaaS deployments can use different backends without changing the FS layer or CHASM archetype. + +``` +┌─────────────────────────────────────┐ +│ FSStoreProvider │ ← Interface (store_provider.go) +│ GetStore(shard, ns, fsID) │ +│ DeleteStore(shard, ns, fsID) │ +│ Close() │ +├──────────────────┬──────────────────┤ +│ PebbleStore │ CDSStore │ +│ Provider (OSS) │ Provider (SaaS) │ +└──────────────────┴──────────────────┘ +``` + +**[`FSStoreProvider`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/store_provider.go)** is the sole extension point for SaaS. All other FS components (CHASM archetype, gRPC service, FUSE mount) are identical between OSS and SaaS. + +**[`PebbleStoreProvider`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/pebble_store_provider.go)** (OSS): +- Creates a single PebbleDB instance (lazy-initialized at `{dataDir}/temporalzfs/`). +- Returns a `PrefixedStore` per filesystem execution for key isolation — each `(namespaceID, filesystemID)` pair maps to a deterministic partition ID derived from FNV-1a hash, ensuring stability across restarts. +- The underlying PebbleDB is shared across all filesystem executions. + +**`CDSStoreProvider`** (SaaS, in `saas-temporal`): +- Implements `FSStoreProvider` via `fx.Decorate`, replacing `PebbleStoreProvider`. +- Backed by Walker: uses `rpcEngine` (wrapping Walker `ShardClient` RPCs) adapted to `store.Store`. +- Data isolated via `ShardspaceTemporalZFS`, a `tzfs\x00` key prefix, and per-filesystem `PrefixedStore` partitions. +- See [`cds/doc/temporalzfs.md`](https://github.com/temporalio/saas-temporal/blob/main/cds/doc/temporalzfs.md) in `saas-temporal` for the full CDS integration architecture. + +### gRPC Service + +The [`TemporalZFSService`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/proto/v1/service.proto) defines 22 RPCs for filesystem operations. The [`handler`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/handler.go) implements these using CHASM APIs for lifecycle and `temporal-zfs` APIs for FS operations. + +**Lifecycle RPCs:** + +| RPC | CHASM API | temporal-zfs API | +|-----|-----------|-----------------| +| `CreateFilesystem` | `chasm.StartExecution` | `tzfs.Create()` | +| `GetFilesystemInfo` | `chasm.ReadComponent` | — | +| `ArchiveFilesystem` | `chasm.UpdateComponent` | — | +| `AttachWorkflow` | `chasm.UpdateComponent` | — | +| `DetachWorkflow` | `chasm.UpdateComponent` | — | + +`AttachWorkflow` adds an owner workflow ID to the filesystem (deduplicated). `DetachWorkflow` removes one; if no owners remain, the filesystem transitions to DELETED. + +**FS operation RPCs** (all use inode-based `ByID` methods from `temporal-zfs`): + +| RPC | temporal-zfs API | +|-----|-----------------| +| `Getattr` | `f.StatByID()` | +| `Setattr` | `f.ChmodByID()`, `f.ChownByID()`, `f.UtimensByID()` | +| `Lookup` | `f.LookupByID()` | +| `ReadChunks` | `f.ReadAtByID()` | +| `WriteChunks` | `f.WriteAtByID()` | +| `Truncate` | `f.TruncateByID()` | +| `Mkdir` | `f.MkdirByID()` | +| `Unlink` | `f.UnlinkByID()` | +| `Rmdir` | `f.RmdirByID()` | +| `Rename` | `f.RenameByID()` | +| `ReadDir` | `f.ReadDirByID()` / `f.ReadDirPlusByID()` | +| `Link` | `f.LinkByID()` | +| `Symlink` | `f.SymlinkByID()` | +| `Readlink` | `f.ReadlinkByID()` | +| `CreateFile` | `f.CreateFileByID()` | +| `Mknod` | `f.MknodByID()` | +| `Statfs` | `f.GetQuota()`, `f.ChunkSize()` | +| `CreateSnapshot` | `f.CreateSnapshot()` | + +The handler pattern for FS operations is: get store via `FSStoreProvider` → open `tzfs.FS` → execute operation → close FS (which also closes the store). On error, `openFS`/`createFS` close the store internally before returning. The CHASM execution is only accessed for lifecycle operations (create, archive, get info). + +### WAL Integration (SaaS) + +In the SaaS deployment, writes go through a WAL pipeline for durability: + +``` +temporal-zfs write → walEngine → LP WAL → ack → stateTracker buffer + ↓ + tzfsFlusher (500ms tick) + ↓ + rpcEngine → Walker RPCs + ↓ + watermark advance +``` + +- **`walEngine`**: Implements `Engine` by routing reads to `rpcEngine` (Walker) and writes through the LP WAL. Each write is serialized as a `WALLogTFSData` record and awaits acknowledgement before buffering in the state tracker. +- **`tzfsStateTracker`**: Buffers acknowledged WAL ops in memory, ordered by log ID. The flusher drains this buffer. +- **`tzfsFlusher`**: Runs a dedicated goroutine that drains buffered ops every 500ms and writes them to Walker via `rpcEngine`, then advances the `TEMPORALZFS_RECOVERY_WATERMARK`. On shutdown, performs a final flush with a 5s timeout. +- **`tzfsWALRecoverer`**: On shard acquisition, replays WAL records between the recovery watermark and the WAL head to rebuild the state tracker buffer. + +### FX Wiring + +The [`HistoryModule`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/fx.go) wires everything together via `go.uber.org/fx`: + +1. **Provides**: `Config` (dynamic config), `FSStoreProvider` (PebbleStoreProvider), `WorkflowExistenceChecker` (noop in OSS), `PostDeleteHook` (noop in OSS), `handler` (gRPC service), task executors (chunkGC, manifestCompact, quotaCheck, ownerCheck, dataCleanup), `library`. +2. **Invokes**: `registry.Register(library)` to register the archetype with the CHASM engine. + +The module is included in [`service/history/fx.go`](https://github.com/temporalio/temporal/blob/main/service/history/fx.go) alongside other archetype modules (Activity, Scheduler, etc.). + +### Owner Lifecycle & GC + +TemporalZFS uses a belt-and-suspenders approach for garbage collection when owner workflows are deleted: + +- **Pull path (OwnerCheckTask)**: Periodic safety net. Checks if each owner workflow still exists and removes confirmed-gone owners. Transitions to DELETED when all owners are removed, which triggers DataCleanupTask. +- **Push path (PostDeleteHook)**: Fast path. A `PostDeleteHook` on the workflow delete manager calls `DetachWorkflow` when a workflow is deleted. OSS implementation is a noop (relies on pull path). SaaS overrides via `fx.Decorate` to query visibility for owned filesystems. +- **WorkflowExistenceChecker**: Interface for checking workflow existence. OSS provides a noop (always returns true). SaaS overrides to query the history service. + +### Configuration + +[`config.go`](https://github.com/temporalio/temporal/blob/main/chasm/lib/temporalzfs/config.go) defines: + +| Setting | Default | Description | +|---------|---------|-------------| +| `temporalzfs.enabled` | `false` | Namespace-level toggle for TemporalZFS | +| Default chunk size | 256 KB | Size of file data chunks | +| Default max size | 1 GB | Per-filesystem storage quota | +| Default max files | 100,000 | Per-filesystem inode quota | +| Default GC interval | 5 min | How often ChunkGC runs | +| Default snapshot retention | 24 h | How long snapshots are kept | +| Default owner check interval | 10 min | How often OwnerCheckTask runs | +| Owner check not-found threshold | 2 | Consecutive misses before owner removal | +| Data cleanup max backoff | 30 min | Max retry interval for DataCleanupTask | + +Per-filesystem configuration can override these defaults via `FilesystemConfig` at creation time. diff --git a/go.mod b/go.mod index e260fafb8f..6b6253cf17 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ retract ( ) require ( - cloud.google.com/go/storage v1.51.0 + cloud.google.com/go/storage v1.61.3 github.com/Masterminds/sprig/v3 v3.3.0 github.com/aws/aws-sdk-go v1.55.8 github.com/blang/semver/v4 v4.0.0 @@ -18,14 +18,14 @@ require ( github.com/emirpasic/gods v1.18.1 github.com/fatih/color v1.18.0 github.com/go-faker/faker/v4 v4.6.0 - github.com/go-jose/go-jose/v4 v4.0.5 + github.com/go-jose/go-jose/v4 v4.1.3 github.com/go-sql-driver/mysql v1.9.0 github.com/gocql/gocql v1.7.0 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 github.com/iancoleman/strcase v0.3.0 github.com/jackc/pgx/v5 v5.7.2 github.com/jmoiron/sqlx v1.4.0 @@ -37,7 +37,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/olivere/elastic/v7 v7.0.32 github.com/prometheus/client_golang v1.21.0 - github.com/prometheus/client_model v0.6.1 + github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.62.0 github.com/robfig/cron/v3 v3.0.1 github.com/sony/gobreaker v1.0.0 @@ -45,12 +45,13 @@ require ( github.com/temporalio/ringpop-go v0.0.0-20250130211428-b97329e994f7 github.com/temporalio/sqlparser v0.0.0-20231115171017-f4060bcfa6cb github.com/temporalio/tchannel-go v1.22.1-0.20240528171429-1db37fdea938 + github.com/temporalio/temporal-zfs v1.4.0 github.com/tidwall/btree v1.8.1 github.com/uber-go/tally/v4 v4.1.17 github.com/urfave/cli v1.22.16 github.com/urfave/cli/v2 v2.27.5 go.opentelemetry.io/collector/pdata v1.34.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 go.opentelemetry.io/otel v1.40.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 @@ -66,36 +67,73 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 - golang.org/x/oauth2 v0.28.0 - golang.org/x/sync v0.18.0 - golang.org/x/text v0.31.0 - golang.org/x/time v0.10.0 - google.golang.org/api v0.224.0 - google.golang.org/grpc v1.72.2 - google.golang.org/protobuf v1.36.6 + golang.org/x/oauth2 v0.36.0 + golang.org/x/sync v0.20.0 + golang.org/x/text v0.35.0 + golang.org/x/time v0.15.0 + google.golang.org/api v0.272.0 + google.golang.org/grpc v1.79.3 + google.golang.org/protobuf v1.36.11 gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 modernc.org/sqlite v1.44.3 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/RaduBerinde/axisds v0.1.0 // indirect + github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.4 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.12 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 // indirect + github.com/aws/smithy-go v1.24.2 // indirect + github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble/v2 v2.1.4 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect ) require ( - cel.dev/expr v0.23.1 // indirect - cloud.google.com/go v0.118.3 // indirect; indirect e - cloud.google.com/go/auth v0.15.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect - cloud.google.com/go/iam v1.4.2 // indirect - cloud.google.com/go/monitoring v1.24.1 // indirect + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect; indirect e + cloud.google.com/go/auth v0.18.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/apache/thrift v0.21.0 // indirect @@ -104,22 +142,22 @@ require ( github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect - github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect + github.com/googleapis/gax-go/v2 v2.18.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -141,7 +179,7 @@ require ( github.com/ncruces/go-strftime v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect @@ -151,27 +189,28 @@ require ( github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/twmb/murmur3 v1.1.8 // indirect github.com/uber-common/bark v1.3.0 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect - github.com/zeebo/errs v1.4.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 - go.opentelemetry.io/proto/otlp v1.5.0 + go.opentelemetry.io/proto/otlp v1.7.1 go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.19.0 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/sys v0.40.0 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + golang.org/x/crypto v0.49.0 // indirect + golang.org/x/net v0.52.0 // indirect + golang.org/x/sys v0.42.0 // indirect + google.golang.org/genproto v0.0.0-20260217215200-42d3e9bedb6d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c // indirect gopkg.in/inf.v0 v0.9.1 // indirect modernc.org/libc v1.67.6 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect ) + +replace github.com/temporalio/temporal-zfs v1.4.0 => github.com/moedash/temporal-zfs v1.4.0 diff --git a/go.sum b/go.sum index 351d39581a..a58f9c31de 100644 --- a/go.sum +++ b/go.sum @@ -1,42 +1,56 @@ -cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg= -cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= -cloud.google.com/go v0.118.3 h1:jsypSnrE/w4mJysioGdMBg4MiW/hHx/sArFpaBWHdME= -cloud.google.com/go v0.118.3/go.mod h1:Lhs3YLnBlwJ4KA6nuObNMZ/fCbOQBPuWKPoE0Wa/9Vc= -cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps= -cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8= -cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= -cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.4.2 h1:4AckGYAYsowXeHzsn/LCKWIwSWLkdb0eGjH8wWkd27Q= -cloud.google.com/go/iam v1.4.2/go.mod h1:REGlrt8vSlh4dfCJfSEcNjLGq75wW75c5aU3FLOYq34= -cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= -cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q= -cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY= -cloud.google.com/go/monitoring v1.24.1 h1:vKiypZVFD/5a3BbQMvI4gZdl8445ITzXFh257XBgrS0= -cloud.google.com/go/monitoring v1.24.1/go.mod h1:Z05d1/vn9NaujqY2voG6pVQXoJGbp+r3laV+LySt9K0= -cloud.google.com/go/storage v1.51.0 h1:ZVZ11zCiD7b3k+cH5lQs/qcNaoSz3U9I0jgwVzqDlCw= -cloud.google.com/go/storage v1.51.0/go.mod h1:YEJfu/Ki3i5oHC/7jyTgsGZwdQ8P9hqMqvpi5kRKGgc= -cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE= -cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= +cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= +cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg= +cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 h1:jWQK1GI+LeGGUKBADtcH2rRqPxYB1Ljwms5gFA2LqrM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4/go.mod h1:8mwH4klAm9DUgR2EEHyEEAQlRDvLPyg5fQry3y+cDew= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -44,12 +58,56 @@ github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+ github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= +github.com/RaduBerinde/axisds v0.1.0/go.mod h1:UHGJonU9z4YYGKJxSaC6/TNcLOBptpmM5m2Cksbnw0Y= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 h1:bsU8Tzxr/PNz75ayvCnxKZWEYdLMPDkUgticP4a4Bvk= +github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54/go.mod h1:0tr7FllbE9gJkHq7CVeeDDFAFKQVy5RnCSSNBOvdqbc= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= +github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= +github.com/aws/aws-sdk-go-v2/config v1.32.12 h1:O3csC7HUGn2895eNrLytOJQdoL2xyJy0iYXhoZ1OmP0= +github.com/aws/aws-sdk-go-v2/config v1.32.12/go.mod h1:96zTvoOFR4FURjI+/5wY1vc1ABceROO4lWgWJuxgy0g= +github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 h1:SwGMTMLIlvDNyhMteQ6r8IJSBPlRdXX5d4idhIGbkXA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21/go.mod h1:UUxgWxofmOdAMuqEsSppbDtGKLfR04HGsD0HXzvhI1k= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 h1:qtJZ70afD3ISKWnoX3xB0J2otEqu3LqicRcDBqsj0hQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12/go.mod h1:v2pNpJbRNl4vEUWEh5ytQok0zACAKfdmKS51Hotc3pQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 h1:siU1A6xjUZ2N8zjTHSXFhB9L/2OY8Dqs0xXiLjF30jA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20/go.mod h1:4TLZCmVJDM3FOu5P5TJP0zOlu9zWgDWU7aUxWbr+rcw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7MSNWeQ6eo247kE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.8/go.mod h1:LXypKvk85AROkKhOG6/YEcHFPoX+prKTowKnVdcaIxE= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.13 h1:kiIDLZ005EcKomYYITtfsjn7dtOwHDOFy7IbPXKek2o= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.13/go.mod h1:2h/xGEowcW/g38g06g3KpRWDlT+OTfxxI0o1KqayAB8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17 h1:jzKAXIlhZhJbnYwHbvUQZEB8KfgAEuG0dc08Bkda7NU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk= +github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= +github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/benbjohnson/clock v0.0.0-20160125162948-a620c1cc9866/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -73,16 +131,35 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b h1:SHlYZ/bMx7frnmeqCu+xm0TCxXLzX3jQIVuFbnFGtFU= +github.com/cockroachdb/crlib v0.0.0-20241112164430-1264a2edc35b/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5 h1:UycK/E0TkisVrQbSoxvU827FwgBBcZ95nRRmpj/12QI= +github.com/cockroachdb/datadriven v1.0.3-0.20250407164829-2945557346d5/go.mod h1:jsaKMvD3RBCATk1/jbUZM8C9idWBJME9+VRZ5+Liq1g= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble/v2 v2.1.4 h1:j9wPgMDbkErFdAKYFGhsoCcvzcjR+6zrJ4jhKtJ6bOk= +github.com/cockroachdb/pebble/v2 v2.1.4/go.mod h1:Reo1RTniv1UjVTAu/Fv74y5i3kJ5gmVrPhO9UtFiKn8= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b h1:VXvSNzmr8hMj8XTuY0PT9Ane9qZGul/p67vGYwl9BFI= +github.com/cockroachdb/swiss v0.0.0-20251224182025-b0f6560f979b/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-farm v0.0.0-20140601200337-fc41e106ee0e/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -90,14 +167,14 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= -github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= -github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -109,11 +186,17 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-faker/faker/v4 v4.6.0 h1:6aOPzNptRiDwD14HuAnEtlTa+D1IfFuEHO8+vEFwjTs= github.com/go-faker/faker/v4 v4.6.0/go.mod h1:ZmrHuVtTTm2Em9e0Du6CJ9CADaLEzGXW62z1YqFH0m0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -128,6 +211,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -135,8 +220,8 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -151,16 +236,16 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g= -github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8= +github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= +github.com/googleapis/gax-go/v2 v2.18.0 h1:jxP5Uuo3bxm3M6gGtV94P4lliVetoCB4Wk2x8QA86LI= +github.com/googleapis/gax-go/v2 v2.18.0/go.mod h1:uSzZN4a356eRG985CzJ3WfbFSpqkLTjsnhWGJR6EwrE= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -222,6 +307,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882 h1:0lgqHvJWHLGW5TuObJrfyEi6+ASTKDBWikGvPqy9Yiw= +github.com/minio/minlz v1.0.1-0.20250507153514-87eb42fe8882/go.mod h1:qT0aEB35q79LLornSzeDH75LBf3aH1MV+jB5w9Wasec= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -233,6 +320,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moedash/temporal-zfs v1.4.0 h1:WmS0Rmm0vcVW40I+CGQmEGrlkknWvluJ/RBP3kKaE7k= +github.com/moedash/temporal-zfs v1.4.0/go.mod h1:9WYzE+Lvb01sifVCa7NOY4MrzyLX/Oq54UUKGwVQjOc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= @@ -247,19 +336,25 @@ github.com/olivere/elastic/v7 v7.0.32/go.mod h1:c7PVmLe3Fxq77PIfY/bZmxY/TAamBhCz github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a h1:AA9vgIBDjMHPC2McaGPojgV2dcI78ZC0TLNhYCXEKH8= github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a/go.mod h1:lzZQ3Noex5pfAy7mkAeCjcBDteYU85uWWnJ/y6gKU8k= github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= @@ -277,6 +372,7 @@ github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -292,8 +388,8 @@ github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -342,18 +438,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= @@ -364,8 +458,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0u go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E= go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= @@ -374,8 +468,8 @@ go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4A go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.temporal.io/api v1.62.4 h1:XxstCG0LWfAqMsQAMk8kIx92l47FtJlIOKFWF3ydOUE= go.temporal.io/api v1.62.4/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM= go.temporal.io/sdk v1.38.0 h1:4Bok5LEdED7YKpsSjIa3dDqram5VOq+ydBf4pyx0Wo4= @@ -406,8 +500,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= +golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -427,8 +521,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -441,18 +535,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -473,8 +567,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -487,10 +581,10 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= -golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= +golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= +golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -505,28 +599,30 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.224.0 h1:Ir4UPtDsNiwIOHdExr3fAj4xZ42QjK7uQte3lORLJwU= -google.golang.org/api v0.224.0/go.mod h1:3V39my2xAGkodXy0vEqcEtkqgw2GtrFL5WuBZlCTCOQ= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= -google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= -google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/api v0.272.0 h1:eLUQZGnAS3OHn31URRf9sAmRk3w2JjMx37d2k8AjJmA= +google.golang.org/api v0.272.0/go.mod h1:wKjowi5LNJc5qarNvDCvNQBn3rVK8nSy6jg2SwRwzIA= +google.golang.org/genproto v0.0.0-20260217215200-42d3e9bedb6d h1:vsOm753cOAMkt76efriTCDKjpCbK18XGHMJHo0JUKhc= +google.golang.org/genproto v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:0oz9d7g9QLSdv9/lgbIjowW1JoxMbxmBVNe8i6tORJI= +google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d h1:EocjzKLywydp5uZ5tJ79iP6Q0UjDnyiHkGRWxuPBP8s= +google.golang.org/genproto/googleapis/api v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:48U2I+QQUYhsFrg2SY6r+nJzeOtjey7j//WBESw+qyQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c h1:xgCzyF2LFIO/0X2UAoVRiXKU5Xg6VjToG4i2/ecSswk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/service/history/deletemanager/delete_manager.go b/service/history/deletemanager/delete_manager.go index 698b8b96be..34e61e34db 100644 --- a/service/history/deletemanager/delete_manager.go +++ b/service/history/deletemanager/delete_manager.go @@ -45,6 +45,12 @@ type ( ) error } + // PostDeleteHook is called after a workflow execution is successfully deleted. + // Implementations should be best-effort and not block deletion on failure. + PostDeleteHook interface { + AfterWorkflowDeletion(ctx context.Context, namespaceID string, workflowID string) + } + DeleteManagerImpl struct { shardContext historyi.ShardContext workflowCache wcache.Cache @@ -52,6 +58,7 @@ type ( metricsHandler metrics.Handler timeSource clock.TimeSource visibilityManager manager.VisibilityManager + postDeleteHooks []PostDeleteHook } ) @@ -63,6 +70,7 @@ func NewDeleteManager( config *configs.Config, timeSource clock.TimeSource, visibilityManager manager.VisibilityManager, + postDeleteHooks ...PostDeleteHook, ) *DeleteManagerImpl { deleteManager := &DeleteManagerImpl{ shardContext: shardContext, @@ -71,6 +79,7 @@ func NewDeleteManager( config: config, timeSource: timeSource, visibilityManager: visibilityManager, + postDeleteHooks: postDeleteHooks, } return deleteManager @@ -173,6 +182,11 @@ func (m *DeleteManagerImpl) deleteWorkflowExecutionInternal( // Clear workflow execution context here to prevent further readers to get stale copy of non-exiting workflow execution. weCtx.Clear() + // Notify post-delete hooks (best-effort, e.g., TFS DetachWorkflow). + for _, hook := range m.postDeleteHooks { + hook.AfterWorkflowDeletion(ctx, namespaceID.String(), we.GetWorkflowId()) + } + metrics.WorkflowCleanupDeleteCount.With(metricsHandler).Record(1) return nil } diff --git a/service/history/deletemanager/delete_manager_mock.go b/service/history/deletemanager/delete_manager_mock.go index f1f72488c8..a39e7e3692 100644 --- a/service/history/deletemanager/delete_manager_mock.go +++ b/service/history/deletemanager/delete_manager_mock.go @@ -85,3 +85,39 @@ func (mr *MockDeleteManagerMockRecorder) DeleteWorkflowExecutionByRetention(ctx, mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecutionByRetention", reflect.TypeOf((*MockDeleteManager)(nil).DeleteWorkflowExecutionByRetention), ctx, nsID, we, weCtx, ms, stage) } + +// MockPostDeleteHook is a mock of PostDeleteHook interface. +type MockPostDeleteHook struct { + ctrl *gomock.Controller + recorder *MockPostDeleteHookMockRecorder + isgomock struct{} +} + +// MockPostDeleteHookMockRecorder is the mock recorder for MockPostDeleteHook. +type MockPostDeleteHookMockRecorder struct { + mock *MockPostDeleteHook +} + +// NewMockPostDeleteHook creates a new mock instance. +func NewMockPostDeleteHook(ctrl *gomock.Controller) *MockPostDeleteHook { + mock := &MockPostDeleteHook{ctrl: ctrl} + mock.recorder = &MockPostDeleteHookMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPostDeleteHook) EXPECT() *MockPostDeleteHookMockRecorder { + return m.recorder +} + +// AfterWorkflowDeletion mocks base method. +func (m *MockPostDeleteHook) AfterWorkflowDeletion(ctx context.Context, namespaceID, workflowID string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AfterWorkflowDeletion", ctx, namespaceID, workflowID) +} + +// AfterWorkflowDeletion indicates an expected call of AfterWorkflowDeletion. +func (mr *MockPostDeleteHookMockRecorder) AfterWorkflowDeletion(ctx, namespaceID, workflowID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterWorkflowDeletion", reflect.TypeOf((*MockPostDeleteHook)(nil).AfterWorkflowDeletion), ctx, namespaceID, workflowID) +} diff --git a/service/history/fx.go b/service/history/fx.go index e37adf8907..60bedc680e 100644 --- a/service/history/fx.go +++ b/service/history/fx.go @@ -8,6 +8,7 @@ import ( "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/chasm" "go.temporal.io/server/chasm/lib/activity" + "go.temporal.io/server/chasm/lib/temporalzfs" "go.temporal.io/server/common" commoncache "go.temporal.io/server/common/cache" "go.temporal.io/server/common/clock" @@ -96,6 +97,7 @@ var Module = fx.Options( nexusoperations.Module, fx.Invoke(nexusworkflow.RegisterCommandHandlers), activity.HistoryModule, + temporalzfs.HistoryModule, ) func ServerProvider(grpcServerOptions []grpc.ServerOption) *grpc.Server { diff --git a/tests/temporalzfs_test.go b/tests/temporalzfs_test.go new file mode 100644 index 0000000000..c530764832 --- /dev/null +++ b/tests/temporalzfs_test.go @@ -0,0 +1,319 @@ +package tests + +// TestTemporalZFS_ResearchAgent exercises TemporalZFS through a real Temporal +// server with CHASM enabled. It injects the TemporalZFS fx module into the +// history service, extracts the FSStoreProvider via fx.Populate, and creates +// a real filesystem backed by PebbleDB through the full server wiring. +// +// This verifies that the TemporalZFS fx module correctly wires into the CHASM +// registry, the PebbleStoreProvider functions correctly under the server's +// lifecycle, and the full FS API (Mkdir, WriteFile, ReadFile, CreateSnapshot, +// OpenSnapshot, ReadDir, ListSnapshots) works end-to-end. +// +// Run: +// +// go test ./tests/ -run TestTemporalZFS -v -count 1 +// +// Architecture: FunctionalTestBase → HistoryService(TemporalZFS HistoryModule) → +// PebbleStoreProvider → store.Store → tzfs.FS + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + tzfs "github.com/temporalio/temporal-zfs/pkg/fs" + sdkclient "go.temporal.io/sdk/client" + "go.temporal.io/sdk/workflow" + "go.temporal.io/server/chasm/lib/temporalzfs" + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/tests/testcore" + "go.uber.org/fx" +) + +type TemporalZFSTestSuite struct { + testcore.FunctionalTestBase //nolint:forbidigo // NewEnv doesn't support WithFxOptionsForService needed for fx.Populate + storeProvider temporalzfs.FSStoreProvider +} + +func TestTemporalZFS(t *testing.T) { + t.Parallel() + suite.Run(t, new(TemporalZFSTestSuite)) +} + +func (s *TemporalZFSTestSuite) SetupSuite() { + s.SetupSuiteWithCluster( //nolint:forbidigo // NewEnv doesn't support WithFxOptionsForService + testcore.WithDynamicConfigOverrides(map[dynamicconfig.Key]any{ + dynamicconfig.EnableChasm.Key(): true, + }), + // TemporalZFS HistoryModule is already registered in service/history/fx.go. + // We only need fx.Populate to extract the FSStoreProvider from the graph. + testcore.WithFxOptionsForService(primitives.HistoryService, + fx.Populate(&s.storeProvider), + ), + ) +} + +func (s *TemporalZFSTestSuite) TearDownSuite() { + s.FunctionalTestBase.TearDownSuite() //nolint:forbidigo // NewEnv doesn't support WithFxOptionsForService +} + +// TestResearchAgent_RealServer runs the 3-iteration research agent scenario +// through a real Temporal server's TemporalZFS subsystem. +func (s *TemporalZFSTestSuite) TestResearchAgent_RealServer() { + t := s.T() + + // Content for each iteration. + sourcesV1 := []byte("# Sources v1\n1. Feynman (1982)\n2. Shor (1994)\n") + sourcesV2 := []byte("# Sources v2\n1. Feynman (1982)\n2. Shor (1994)\n3. Preskill (2018)\n") + analysisContent := []byte("# Analysis\nQuantum error correction is the bottleneck.\n") + reportContent := []byte("# Final Report\nQC has reached an inflection point.\n") + + // Create a real FS through the server's PebbleStoreProvider. + store, err := s.storeProvider.GetStore(1, s.NamespaceID().String(), "research-agent-fs") + s.NoError(err) + + f, err := tzfs.Create(store, tzfs.Options{}) + s.NoError(err) + defer func() { s.NoError(f.Close()) }() + + // ─── Iteration 1: Gather Sources ───────────────────────────────────── + + s.NoError(f.Mkdir("/research", 0o755)) + s.NoError(f.Mkdir("/research/quantum-computing", 0o755)) + s.NoError(f.WriteFile("/research/quantum-computing/sources.md", sourcesV1, 0o644)) + + snap1, err := f.CreateSnapshot("step-1-sources") + s.NoError(err) + assert.Equal(t, "step-1-sources", snap1.Name) + + // ─── Iteration 2: Analyze & Synthesize ─────────────────────────────── + + s.NoError(f.WriteFile("/research/quantum-computing/sources.md", sourcesV2, 0o644)) + s.NoError(f.WriteFile("/research/quantum-computing/analysis.md", analysisContent, 0o644)) + + snap2, err := f.CreateSnapshot("step-2-analysis") + s.NoError(err) + assert.Greater(t, snap2.TxnID, snap1.TxnID) + + // ─── Iteration 3: Final Report ─────────────────────────────────────── + + s.NoError(f.WriteFile("/research/quantum-computing/report.md", reportContent, 0o644)) + + snap3, err := f.CreateSnapshot("step-3-final") + s.NoError(err) + assert.Greater(t, snap3.TxnID, snap2.TxnID) + + // ─── Verify current filesystem state ───────────────────────────────── + + gotSources, err := f.ReadFile("/research/quantum-computing/sources.md") + s.NoError(err) + assert.Equal(t, sourcesV2, gotSources) + + gotAnalysis, err := f.ReadFile("/research/quantum-computing/analysis.md") + s.NoError(err) + assert.Equal(t, analysisContent, gotAnalysis) + + gotReport, err := f.ReadFile("/research/quantum-computing/report.md") + s.NoError(err) + assert.Equal(t, reportContent, gotReport) + + entries, err := f.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, entries, 3) + + // ─── Verify snapshot 1: step-1-sources ─────────────────────────────── + + snap1FS, err := f.OpenSnapshot("step-1-sources") + s.NoError(err) + defer func() { s.NoError(snap1FS.Close()) }() + + snap1Sources, err := snap1FS.ReadFile("/research/quantum-computing/sources.md") + s.NoError(err) + assert.Equal(t, sourcesV1, snap1Sources, "snapshot 1 should have sources.md v1") + + _, err = snap1FS.ReadFile("/research/quantum-computing/analysis.md") + s.ErrorIs(err, tzfs.ErrNotFound, "snapshot 1 should NOT have analysis.md") + + snap1Entries, err := snap1FS.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, snap1Entries, 1, "snapshot 1 should have 1 file") + + // ─── Verify snapshot 2: step-2-analysis ────────────────────────────── + + snap2FS, err := f.OpenSnapshot("step-2-analysis") + s.NoError(err) + defer func() { s.NoError(snap2FS.Close()) }() + + snap2Sources, err := snap2FS.ReadFile("/research/quantum-computing/sources.md") + s.NoError(err) + assert.Equal(t, sourcesV2, snap2Sources, "snapshot 2 should have sources.md v2") + + _, err = snap2FS.ReadFile("/research/quantum-computing/report.md") + s.ErrorIs(err, tzfs.ErrNotFound, "snapshot 2 should NOT have report.md") + + snap2Entries, err := snap2FS.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, snap2Entries, 2, "snapshot 2 should have 2 files") + + // ─── Verify snapshot 3: step-3-final ───────────────────────────────── + + snap3FS, err := f.OpenSnapshot("step-3-final") + s.NoError(err) + defer func() { s.NoError(snap3FS.Close()) }() + + snap3Entries, err := snap3FS.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, snap3Entries, 3, "snapshot 3 should have 3 files") + + // ─── Verify snapshot listing ───────────────────────────────────────── + + snapshots, err := f.ListSnapshots() + s.NoError(err) + s.Len(snapshots, 3) + assert.Equal(t, "step-1-sources", snapshots[0].Name) + assert.Equal(t, "step-2-analysis", snapshots[1].Name) + assert.Equal(t, "step-3-final", snapshots[2].Name) + + // ─── Verify metrics ────────────────────────────────────────────────── + + m := f.Metrics() + assert.Equal(t, int64(3), m.FilesCreated.Load(), "3 files created") + assert.Equal(t, int64(2), m.DirsCreated.Load(), "2 dirs created") + assert.Positive(t, m.BytesWritten.Load()) +} + +// TestResearchAgent_Workflow runs the research agent as a real Temporal workflow +// with activities. Each step of the research agent is an activity that operates +// on TemporalZFS. The workflow orchestrates the 3 steps sequentially. After the +// workflow completes, the test verifies MVCC snapshot isolation. +// +// This demonstrates the real-world pattern: a Temporal workflow orchestrating +// an AI agent whose activities read/write a durable versioned filesystem. +func (s *TemporalZFSTestSuite) TestResearchAgent_Workflow() { + t := s.T() + + sourcesV1 := []byte("# Sources v1\n1. Feynman (1982)\n2. Shor (1994)\n") + sourcesV2 := []byte("# Sources v2\n1. Feynman (1982)\n2. Shor (1994)\n3. Preskill (2018)\n") + analysisContent := []byte("# Analysis\nQuantum error correction is the bottleneck.\n") + reportContent := []byte("# Final Report\nQC has reached an inflection point.\n") + + // Create FS backed by the real server's PebbleStoreProvider. + store, err := s.storeProvider.GetStore(1, s.NamespaceID().String(), "research-wf-fs") + s.NoError(err) + + f, err := tzfs.Create(store, tzfs.Options{}) + s.NoError(err) + defer func() { s.NoError(f.Close()) }() + + // ─── Define activities ─────────────────────────────────────────────── + // Each activity performs one step of the research agent workflow. + // Activities share the FS instance via closure (in-process worker). + + gatherSources := func(ctx context.Context) error { + if err := f.Mkdir("/research", 0o755); err != nil { + return err + } + if err := f.Mkdir("/research/quantum-computing", 0o755); err != nil { + return err + } + if err := f.WriteFile("/research/quantum-computing/sources.md", sourcesV1, 0o644); err != nil { + return err + } + _, err := f.CreateSnapshot("step-1-sources") + return err + } + + analyzeSources := func(ctx context.Context) error { + if err := f.WriteFile("/research/quantum-computing/sources.md", sourcesV2, 0o644); err != nil { + return err + } + if err := f.WriteFile("/research/quantum-computing/analysis.md", analysisContent, 0o644); err != nil { + return err + } + _, err := f.CreateSnapshot("step-2-analysis") + return err + } + + writeFinalReport := func(ctx context.Context) error { + if err := f.WriteFile("/research/quantum-computing/report.md", reportContent, 0o644); err != nil { + return err + } + _, err := f.CreateSnapshot("step-3-final") + return err + } + + // ─── Define workflow ───────────────────────────────────────────────── + + researchAgentWorkflow := func(ctx workflow.Context) error { + ao := workflow.ActivityOptions{ + StartToCloseTimeout: 30 * time.Second * debug.TimeoutMultiplier, + } + ctx = workflow.WithActivityOptions(ctx, ao) + + if err := workflow.ExecuteActivity(ctx, gatherSources).Get(ctx, nil); err != nil { + return err + } + if err := workflow.ExecuteActivity(ctx, analyzeSources).Get(ctx, nil); err != nil { + return err + } + return workflow.ExecuteActivity(ctx, writeFinalReport).Get(ctx, nil) + } + + // ─── Register and execute ──────────────────────────────────────────── + + s.Worker().RegisterWorkflow(researchAgentWorkflow) + s.Worker().RegisterActivity(gatherSources) + s.Worker().RegisterActivity(analyzeSources) + s.Worker().RegisterActivity(writeFinalReport) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second*debug.TimeoutMultiplier) + defer cancel() + + run, err := s.SdkClient().ExecuteWorkflow(ctx, sdkclient.StartWorkflowOptions{ + ID: "research-agent-workflow", + TaskQueue: s.TaskQueue(), + }, researchAgentWorkflow) + s.NoError(err) + s.NoError(run.Get(ctx, nil)) + + // ─── Verify FS state after workflow completion ─────────────────────── + + entries, err := f.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, entries, 3, "workflow should have created 3 files") + + gotSources, err := f.ReadFile("/research/quantum-computing/sources.md") + s.NoError(err) + assert.Equal(t, sourcesV2, gotSources) + + // Verify MVCC snapshot isolation. + snap1FS, err := f.OpenSnapshot("step-1-sources") + s.NoError(err) + snap1Data, err := snap1FS.ReadFile("/research/quantum-computing/sources.md") + s.NoError(err) + assert.Equal(t, sourcesV1, snap1Data, "snapshot 1 should have v1") + s.NoError(snap1FS.Close()) + + snap2FS, err := f.OpenSnapshot("step-2-analysis") + s.NoError(err) + snap2Entries, err := snap2FS.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, snap2Entries, 2, "snapshot 2 should have 2 files") + s.NoError(snap2FS.Close()) + + snap3FS, err := f.OpenSnapshot("step-3-final") + s.NoError(err) + snap3Entries, err := snap3FS.ReadDir("/research/quantum-computing") + s.NoError(err) + assert.Len(t, snap3Entries, 3, "snapshot 3 should have 3 files") + s.NoError(snap3FS.Close()) + + snapshots, err := f.ListSnapshots() + s.NoError(err) + s.Len(snapshots, 3) +}