diff --git a/core/config/capabilities_config.go b/core/config/capabilities_config.go index 16582a74e21..430d8cf785f 100644 --- a/core/config/capabilities_config.go +++ b/core/config/capabilities_config.go @@ -33,6 +33,7 @@ type CapabilitiesWorkflowRegistry interface { RelayID() types.RelayID SyncStrategy() string WorkflowStorage() WorkflowStorage + AlternativeSources() []AlternativeWorkflowSource } type WorkflowStorage interface { @@ -41,6 +42,14 @@ type WorkflowStorage interface { TLSEnabled() bool } +// AlternativeWorkflowSource represents a single alternative workflow metadata source +// that can be configured to load workflows from sources other than the on-chain registry. +type AlternativeWorkflowSource interface { + GetURL() string + GetTLSEnabled() bool + GetName() string +} + type GatewayConnector interface { ChainIDForNodeKey() string NodeAddress() string diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml index 72e4cf1f2af..e1d2ee452cf 100644 --- a/core/config/docs/core.toml +++ b/core/config/docs/core.toml @@ -528,6 +528,15 @@ TLSEnabled = true # Default # ArtifactStorageHost is the host name that, when present within the workflow metadata binary or config URL, designates that a signed URL should be retrieved from the workflow storage service. ArtifactStorageHost = 'artifact.cre.chain.link' # Example +[[Capabilities.WorkflowRegistry.AlternativeSources]] +# URL is the GRPC endpoint for the alternative workflow metadata source. +# This allows workflows to be loaded from sources other than the on-chain registry contract. +URL = 'localhost:50051' # Example +# TLSEnabled enables TLS for the GRPC connection. Defaults to true. +TLSEnabled = true # Default +# Name is a human-readable identifier for logging purposes. +Name = 'my-workflow-source' # Example + [Workflows] [Workflows.Limits] # Global is the maximum number of workflows that can be registered globally. diff --git a/core/config/toml/types.go b/core/config/toml/types.go index 74800fcb5a5..9850b27cf3e 100644 --- a/core/config/toml/types.go +++ b/core/config/toml/types.go @@ -2197,16 +2197,60 @@ func (s *WorkflowStorage) ValidateConfig() error { return nil } +// AlternativeWorkflowSource represents a single alternative workflow metadata source +// configured via TOML. This allows workflows to be loaded from sources other than +// the on-chain registry contract (e.g., a GRPC service). +type AlternativeWorkflowSource struct { + URL *string `toml:"URL"` + TLSEnabled *bool `toml:"TLSEnabled"` + Name *string `toml:"Name"` // Human-readable name for logging +} + +func (a *AlternativeWorkflowSource) setFrom(f *AlternativeWorkflowSource) { + if f.URL != nil { + a.URL = f.URL + } + if f.TLSEnabled != nil { + a.TLSEnabled = f.TLSEnabled + } + if f.Name != nil { + a.Name = f.Name + } +} + +// GetURL implements config.AlternativeWorkflowSource. +func (a AlternativeWorkflowSource) GetURL() string { + if a.URL == nil { + return "" + } + return *a.URL +} + +func (a AlternativeWorkflowSource) GetTLSEnabled() bool { + if a.TLSEnabled == nil { + return true // Default to enabled + } + return *a.TLSEnabled +} + +func (a AlternativeWorkflowSource) GetName() string { + if a.Name == nil { + return "GRPCWorkflowSource" + } + return *a.Name +} + type WorkflowRegistry struct { - Address *string - NetworkID *string - ChainID *string - ContractVersion *string - MaxBinarySize *utils.FileSize - MaxEncryptedSecretsSize *utils.FileSize - MaxConfigSize *utils.FileSize - SyncStrategy *string - WorkflowStorage WorkflowStorage + Address *string + NetworkID *string + ChainID *string + ContractVersion *string + MaxBinarySize *utils.FileSize + MaxEncryptedSecretsSize *utils.FileSize + MaxConfigSize *utils.FileSize + SyncStrategy *string + WorkflowStorage WorkflowStorage + AlternativeSourcesConfig []AlternativeWorkflowSource `toml:"AlternativeSources"` } func (r *WorkflowRegistry) setFrom(f *WorkflowRegistry) { @@ -2243,6 +2287,49 @@ func (r *WorkflowRegistry) setFrom(f *WorkflowRegistry) { } r.WorkflowStorage.setFrom(&f.WorkflowStorage) + + if len(f.AlternativeSourcesConfig) > 0 { + r.AlternativeSourcesConfig = make([]AlternativeWorkflowSource, len(f.AlternativeSourcesConfig)) + for i := range f.AlternativeSourcesConfig { + r.AlternativeSourcesConfig[i].setFrom(&f.AlternativeSourcesConfig[i]) + } + } +} + +// MaxAlternativeSources is the maximum number of alternative workflow sources +const MaxAlternativeSources = 5 + +func (r *WorkflowRegistry) ValidateConfig() error { + if err := r.WorkflowStorage.ValidateConfig(); err != nil { + return err + } + + if len(r.AlternativeSourcesConfig) > MaxAlternativeSources { + return configutils.ErrInvalid{ + Name: "AlternativeSources", + Value: len(r.AlternativeSourcesConfig), + Msg: fmt.Sprintf("maximum %d alternative sources supported", MaxAlternativeSources), + } + } + + // Validate each source has a URL + for i, src := range r.AlternativeSourcesConfig { + if src.URL == nil || *src.URL == "" { + return configutils.ErrMissing{Name: fmt.Sprintf("AlternativeSources[%d].URL", i)} + } + } + + return nil +} + +// AlternativeSources returns the list of alternative workflow sources. +// Implements config.CapabilitiesWorkflowRegistry. +func (r WorkflowRegistry) AlternativeSources() []config.AlternativeWorkflowSource { + result := make([]config.AlternativeWorkflowSource, len(r.AlternativeSourcesConfig)) + for i := range r.AlternativeSourcesConfig { + result[i] = r.AlternativeSourcesConfig[i] + } + return result } type Dispatcher struct { diff --git a/core/platform/monitoring.go b/core/platform/monitoring.go index 6745a2a3635..be5dcb6c727 100644 --- a/core/platform/monitoring.go +++ b/core/platform/monitoring.go @@ -35,6 +35,10 @@ const ( EngineVersion = "engineVersion" CapabilitiesRegistryVersion = "capabilitiesRegistryVersion" DonVersion = "donVersion" + + // WorkflowSource identifies where the workflow was deployed from + // e.g., "contract", "grpc:my-source", "file" + KeyWorkflowSource = "workflowSource" ) func LabelKeysSorted() iter.Seq[string] { diff --git a/core/scripts/cre/environment/README.md b/core/scripts/cre/environment/README.md index a9de9a4085a..9516021bc64 100644 --- a/core/scripts/cre/environment/README.md +++ b/core/scripts/cre/environment/README.md @@ -23,6 +23,16 @@ Slack: #topic-local-dev-environments - [Debugging core nodes](#debugging-core-nodes) - [Debugging capabilities (mac)](#debugging-capabilities-mac) - [Workflow Commands](#workflow-commands) + - [Alternative Workflow Sources](#alternative-workflow-sources) + - [Overview](#alternative-sources-overview) + - [Configuration](#alternative-sources-configuration) + - [File Source JSON Format](#file-source-json-format) + - [Helper Tool: generate_file_source](#helper-tool-generate_file_source) + - [Deploying a File-Source Workflow](#deploying-a-file-source-workflow) + - [Mixed Sources (Contract + File)](#mixed-sources-contract--file) + - [Pausing and Deleting File-Source Workflows](#pausing-and-deleting-file-source-workflows) + - [Key Behaviors](#alternative-sources-key-behaviors) + - [Debugging Alternative Sources](#debugging-alternative-sources) - [Further use](#further-use) - [Advanced Usage](#advanced-usage) - [Testing Billing](#testing-billing) @@ -382,6 +392,306 @@ This command uses default values and is useful for testing the workflow deployme --- +## Alternative Workflow Sources + +The workflow registry syncer supports multiple sources of workflow metadata beyond the on-chain contract. This enables flexible deployment scenarios including pure file-based or GRPC-based workflow deployments. + +### Alternative Sources Overview + +Three source types are supported: + +1. **ContractWorkflowSource** (optional): Reads from the on-chain workflow registry contract +2. **GRPCWorkflowSource** (alternative): Fetches from external GRPC services +3. **FileWorkflowSource** (alternative): Reads from a local JSON file + +**Key Features:** +- Contract source is optional - enables pure GRPC-only or file-only deployments +- All alternative sources (GRPC and file) are configured via unified `AlternativeSources` config +- Source type is auto-detected by URL scheme (`file://` for file, otherwise GRPC) + +### Alternative Sources Configuration + +All alternative sources are configured via the `AlternativeSources` config in TOML. The source type is auto-detected based on the URL scheme: + +**File source (detected by `file://` prefix):** +```toml +[WorkflowRegistry] +Address = "0x1234..." # Optional - leave empty for pure file-only deployments + +[[WorkflowRegistry.AlternativeSources]] +Name = "local-file" +URL = "file:///tmp/workflows_metadata.json" +``` + +**GRPC source (URL without `file://` prefix):** +```toml +[WorkflowRegistry] +Address = "0x1234..." + +[[WorkflowRegistry.AlternativeSources]] +Name = "private-registry" +URL = "grpc.private-registry.example.com:443" +TLSEnabled = true +``` + +**Pure GRPC-only deployment (no contract):** +```toml +[WorkflowRegistry] +# No Address = no contract source + +[[WorkflowRegistry.AlternativeSources]] +Name = "private-registry" +URL = "grpc.private-registry.example.com:443" +TLSEnabled = true +``` + +### File Source JSON Format + +The file source reads from the path specified in the URL (e.g., `/tmp/workflows_metadata.json`). + +**JSON Schema:** +```json +{ + "workflows": [ + { + "workflow_id": "<32-byte hex string without 0x prefix>", + "owner": "", + "created_at": "", + "status": "<0=active, 1=paused>", + "workflow_name": "", + "binary_url": "", + "config_url": "", + "tag": "", + "attributes": "", + "don_family": "" + } + ] +} +``` + +**Example:** +```json +{ + "workflows": [ + { + "workflow_id": "0102030405060708091011121314151617181920212223242526272829303132", + "owner": "f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "created_at": 1733250000, + "status": 0, + "workflow_name": "my-file-workflow", + "binary_url": "file:///home/chainlink/workflows/my_workflow.wasm", + "config_url": "file:///home/chainlink/workflows/my_config.json", + "tag": "v1.0.0", + "don_family": "workflow" + } + ] +} +``` + +See [examples/workflows_metadata_example.json](./examples/workflows_metadata_example.json) for a reference file. + +### Helper Tool: generate_file_source + +A helper tool is provided to generate the workflow metadata JSON with the correct workflowID (which is a hash of the workflow artifacts): + +```bash +cd core/scripts/cre/environment +go run ./cmd/generate_file_source \ + --binary /path/to/workflow.wasm \ + --config /path/to/config.json \ + --name my-workflow \ + --owner f39fd6e51aad88f6f4ce6ab8827279cfffb92266 \ + --output /tmp/workflows_metadata.json \ + --don-family workflow +``` + +**Additional flags:** +- `--binary-url-prefix`: Prefix for the binary URL in the output (e.g., `file:///home/chainlink/workflows/`) +- `--config-url-prefix`: Prefix for the config URL in the output + +### Deploying a File-Source Workflow + +This walkthrough demonstrates deploying a workflow via file source in a local CRE environment. + +**Prerequisites:** +- Local CRE environment set up +- Docker running +- Go toolchain installed + +**Step-by-step:** + +```bash +# 1. Start the environment +cd core/scripts/cre/environment +go run . env start --auto-setup + +# 2. Deploy a workflow via contract first (this creates the compiled binary in containers) +go run . workflow deploy -w ./examples/workflows/v2/cron/main.go -n cron_contract + +# 3. Get the existing workflow binary from a container +docker cp workflow-node1:/home/chainlink/workflows/cron_contract.wasm /tmp/cron_contract.wasm + +# 4. Generate the file source metadata with a DIFFERENT workflow name +go run ./cmd/generate_file_source \ + --binary /tmp/cron_contract.wasm \ + --name file_source_cron \ + --owner f39fd6e51aad88f6f4ce6ab8827279cfffb92266 \ + --output /tmp/workflows_metadata.json \ + --don-family workflow \ + --binary-url-prefix "file:///home/chainlink/workflows/" \ + --config-url-prefix "file:///home/chainlink/workflows/" + +# 5. Copy the binary to all containers with new name +docker cp /tmp/cron_contract.wasm workflow-node1:/home/chainlink/workflows/file_source_workflow.wasm +docker cp /tmp/cron_contract.wasm workflow-node2:/home/chainlink/workflows/file_source_workflow.wasm +docker cp /tmp/cron_contract.wasm workflow-node3:/home/chainlink/workflows/file_source_workflow.wasm +docker cp /tmp/cron_contract.wasm workflow-node4:/home/chainlink/workflows/file_source_workflow.wasm +docker cp /tmp/cron_contract.wasm workflow-node5:/home/chainlink/workflows/file_source_workflow.wasm + +# 6. Create an empty config file and copy to all containers +echo '{}' > /tmp/file_source_config.json +docker cp /tmp/file_source_config.json workflow-node1:/home/chainlink/workflows/file_source_config.json +docker cp /tmp/file_source_config.json workflow-node2:/home/chainlink/workflows/file_source_config.json +docker cp /tmp/file_source_config.json workflow-node3:/home/chainlink/workflows/file_source_config.json +docker cp /tmp/file_source_config.json workflow-node4:/home/chainlink/workflows/file_source_config.json +docker cp /tmp/file_source_config.json workflow-node5:/home/chainlink/workflows/file_source_config.json + +# 7. Copy the metadata file to all nodes +docker cp /tmp/workflows_metadata.json workflow-node1:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata.json workflow-node2:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata.json workflow-node3:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata.json workflow-node4:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata.json workflow-node5:/tmp/workflows_metadata.json + +# 8. Wait for the syncer to pick up the workflow (default 12 second interval) +# Check logs for "Loaded workflows from file" messages +docker logs workflow-node1 2>&1 | grep -i "file" + +# 9. Verify the workflow is running +docker logs workflow-node1 2>&1 | grep -i "workflow engine" +``` + +### Mixed Sources (Contract + File) + +You can run both contract-deployed and file-source workflows simultaneously: + +```bash +# 1. Deploy workflow via contract +go run . workflow deploy -w ./examples/workflows/v2/cron/main.go -n contract_workflow + +# 2. Add a different workflow via file source (follow steps 3-7 from above) + +# 3. Verify both workflows are running +docker logs workflow-node1 2>&1 | grep -i "Aggregated workflows from all sources" +# Should show totalWorkflows: 2 +``` + +### Pausing and Deleting File-Source Workflows + +**Pausing a workflow** - Change the `status` field to `1`: + +```bash +# Create updated metadata with status=1 (paused) +cat > /tmp/workflows_metadata_paused.json << 'EOF' +{ + "workflows": [ + { + "workflow_id": "", + "owner": "f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "status": 1, + "workflow_name": "file_source_cron", + "binary_url": "file:///home/chainlink/workflows/file_source_workflow.wasm", + "config_url": "file:///home/chainlink/workflows/file_source_config.json", + "don_family": "workflow" + } + ] +} +EOF + +# Copy to all nodes +docker cp /tmp/workflows_metadata_paused.json workflow-node1:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata_paused.json workflow-node2:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata_paused.json workflow-node3:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata_paused.json workflow-node4:/tmp/workflows_metadata.json +docker cp /tmp/workflows_metadata_paused.json workflow-node5:/tmp/workflows_metadata.json + +# Wait for syncer to detect the change +docker logs workflow-node1 2>&1 | grep -i "paused" +``` + +**Deleting a workflow** - Remove it from the JSON file: + +```bash +# Create empty metadata file +echo '{"workflows":[]}' > /tmp/empty_metadata.json + +# Copy to all nodes +docker cp /tmp/empty_metadata.json workflow-node1:/tmp/workflows_metadata.json +docker cp /tmp/empty_metadata.json workflow-node2:/tmp/workflows_metadata.json +docker cp /tmp/empty_metadata.json workflow-node3:/tmp/workflows_metadata.json +docker cp /tmp/empty_metadata.json workflow-node4:/tmp/workflows_metadata.json +docker cp /tmp/empty_metadata.json workflow-node5:/tmp/workflows_metadata.json + +# Contract workflows continue running; file-source workflow is removed +``` + +### Alternative Sources Key Behaviors + +**Source Aggregation:** +- Workflows from all sources are merged into a single list +- Only ContractWorkflowSource provides real blockchain head (block height/hash) +- For pure alternative-source deployments, a synthetic head is created (Unix timestamp) +- If one source fails, others continue to work (graceful degradation) + +**Contract Source Optional:** +- If no contract address is configured, the contract source is skipped +- Enables pure GRPC-only or file-only workflow deployments +- Synthetic heads are used when no contract source is present + +**File Source Characteristics:** +- File is read on every sync interval (default 12 seconds) +- Missing file = empty workflow list (not an error) +- Invalid JSON entries are skipped with a warning +- File source is always "ready" (unlike contract source which needs initialization) + +**GRPC Source:** +- Supports JWT-based authentication +- Includes automatic retry logic with exponential backoff (max 2 retries, 100ms-5s delay) +- Only transient errors (Unavailable, ResourceExhausted) are retried + +**Source Tracking:** +- Each workflow includes a `Source` field identifying where it was deployed from +- Source identifiers: `ContractWorkflowSource`, `FileWorkflowSource`, `GRPCWorkflowSource` + +### Debugging Alternative Sources + +**Check if file source is being read:** +```bash +docker logs workflow-node1 2>&1 | grep "Loaded workflows from file" +docker logs workflow-node1 2>&1 | grep "Workflow metadata file does not exist" +``` + +**Check aggregated workflows:** +```bash +docker logs workflow-node1 2>&1 | grep "Aggregated workflows from all sources" +docker logs workflow-node1 2>&1 | grep "fetching workflow metadata from all sources" +``` + +**Verify workflow engine started:** +```bash +docker logs workflow-node1 2>&1 | grep "Creating Workflow Engine for workflow spec" +``` + +**Key log messages:** +- `"Loaded workflows from file"` - File was successfully read +- `"Workflow metadata file does not exist"` - File doesn't exist (normal if not using file source) +- `"Source not ready, skipping"` - Contract source not yet initialized +- `"Aggregated workflows from all sources"` with `totalWorkflows` count - Sync completed +- `"All workflow sources failed - will retry next cycle"` (WARN) - All sources failed +- `"Failed to fetch workflows from source"` (ERROR) - Individual source failure + +--- + ## Further use To manage workflows you will need the CRE CLI. You can either: - download it from [smartcontract/dev-platform](https://github.com/smartcontractkit/dev-platform/releases/tag/v0.2.0) or diff --git a/core/scripts/cre/environment/cmd/generate_file_source/main.go b/core/scripts/cre/environment/cmd/generate_file_source/main.go new file mode 100644 index 00000000000..aec33fc2d5f --- /dev/null +++ b/core/scripts/cre/environment/cmd/generate_file_source/main.go @@ -0,0 +1,191 @@ +// Command generate_file_source creates a workflow metadata JSON file for the file-based workflow source. +// This tool generates the correct workflowID based on the binary, config, owner, and name. +// +// The binary file should be in .br.b64 format (base64-encoded brotli-compressed WASM). +// This is the format used by the workflow deploy command. +// +// Usage: +// +// go run ./cmd/generate_file_source \ +// --binary /path/to/workflow.br.b64 \ +// --config /path/to/config.yaml \ +// --name my-workflow \ +// --owner f39fd6e51aad88f6f4ce6ab8827279cfffb92266 \ +// --output /tmp/workflows_metadata.json \ +// --don-family workflow +package main + +import ( + "encoding/base64" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/andybalholm/brotli" + + pkgworkflows "github.com/smartcontractkit/chainlink-common/pkg/workflows" +) + +type FileWorkflowMetadata struct { + WorkflowID string `json:"workflow_id"` + Owner string `json:"owner"` + CreatedAt uint64 `json:"created_at"` + Status uint8 `json:"status"` + WorkflowName string `json:"workflow_name"` + BinaryURL string `json:"binary_url"` + ConfigURL string `json:"config_url"` + Tag string `json:"tag"` + DonFamily string `json:"don_family"` +} + +type FileWorkflowSourceData struct { + Workflows []FileWorkflowMetadata `json:"workflows"` +} + +func main() { + var ( + binaryPath string + configPath string + workflowName string + owner string + outputPath string + donFamily string + tag string + binaryURLPrefix string + configURLPrefix string + status int + ) + + flag.StringVar(&binaryPath, "binary", "", "Path to the compiled workflow binary (required)") + flag.StringVar(&configPath, "config", "", "Path to the workflow config file (optional)") + flag.StringVar(&workflowName, "name", "file-source-workflow", "Workflow name") + flag.StringVar(&owner, "owner", "f39fd6e51aad88f6f4ce6ab8827279cfffb92266", "Workflow owner address (hex without 0x)") + flag.StringVar(&outputPath, "output", "/tmp/workflows_metadata.json", "Output path for the JSON file") + flag.StringVar(&donFamily, "don-family", "workflow", "DON family name") + flag.StringVar(&tag, "tag", "v1.0.0", "Workflow tag") + flag.StringVar(&binaryURLPrefix, "binary-url-prefix", "file:///home/chainlink/workflows/", "URL prefix for binary (will append filename)") + flag.StringVar(&configURLPrefix, "config-url-prefix", "file:///home/chainlink/workflows/", "URL prefix for config (will append filename)") + flag.IntVar(&status, "status", 0, "Workflow status (0=active, 1=paused)") + flag.Parse() + + if binaryPath == "" { + fmt.Println("Error: --binary is required") + flag.Usage() + os.Exit(1) + } + + // Read binary file + binaryRaw, err := os.ReadFile(binaryPath) + if err != nil { + fmt.Printf("Error reading binary file: %v\n", err) + os.Exit(1) + } + + // Decompress binary if it's in .br.b64 format + var binary []byte + if strings.HasSuffix(binaryPath, ".br.b64") { + // Base64 decode + decoded, decodeErr := base64.StdEncoding.DecodeString(string(binaryRaw)) + if decodeErr != nil { + fmt.Printf("Error base64 decoding binary: %v\n", decodeErr) + os.Exit(1) + } + // Brotli decompress + reader := brotli.NewReader(strings.NewReader(string(decoded))) + var decompressErr error + binary, decompressErr = io.ReadAll(reader) + if decompressErr != nil { + fmt.Printf("Error brotli decompressing binary: %v\n", decompressErr) + os.Exit(1) + } + fmt.Printf("Decompressed binary from %d bytes (compressed) to %d bytes (WASM)\n", len(binaryRaw), len(binary)) + } else { + binary = binaryRaw + } + + // Read config file (optional) + var config []byte + if configPath != "" { + config, err = os.ReadFile(configPath) + if err != nil { + fmt.Printf("Error reading config file: %v\n", err) + os.Exit(1) + } + } + + // Decode owner + ownerBytes, err := hex.DecodeString(owner) + if err != nil { + fmt.Printf("Error decoding owner hex: %v\n", err) + os.Exit(1) + } + + // Generate workflow ID + workflowID, err := pkgworkflows.GenerateWorkflowID(ownerBytes, workflowName, binary, config, "") + if err != nil { + fmt.Printf("Error generating workflow ID: %v\n", err) + os.Exit(1) + } + + // Get binary and config filenames - use .br.b64 for compressed binary + binaryFilename := "file_source_workflow.br.b64" + configFilename := "file_source_config.json" + + // Build the metadata + now := time.Now().Unix() + var createdAt uint64 + if now >= 0 { + createdAt = uint64(now) // #nosec G115 -- time is always positive + } + var statusUint8 uint8 + if status >= 0 && status <= 255 { + statusUint8 = uint8(status) // #nosec G115 -- status is validated in range + } + metadata := FileWorkflowSourceData{ + Workflows: []FileWorkflowMetadata{ + { + WorkflowID: hex.EncodeToString(workflowID[:]), + Owner: owner, + CreatedAt: createdAt, + Status: statusUint8, + WorkflowName: workflowName, + BinaryURL: binaryURLPrefix + binaryFilename, + ConfigURL: configURLPrefix + configFilename, + Tag: tag, + DonFamily: donFamily, + }, + }, + } + + // Marshal to JSON + jsonData, err := json.MarshalIndent(metadata, "", " ") + if err != nil { + fmt.Printf("Error marshaling JSON: %v\n", err) + os.Exit(1) + } + + // Write to output file + if err := os.WriteFile(outputPath, jsonData, 0600); err != nil { + fmt.Printf("Error writing output file: %v\n", err) + os.Exit(1) + } + + fmt.Printf("Generated workflow metadata file: %s\n", outputPath) + fmt.Printf("Workflow ID: %s\n", hex.EncodeToString(workflowID[:])) + fmt.Printf("Workflow Name: %s\n", workflowName) + fmt.Printf("Owner: %s\n", owner) + fmt.Printf("DON Family: %s\n", donFamily) + fmt.Printf("\nTo use this workflow:\n") + fmt.Printf("1. Copy the binary to Docker containers: docker cp %s workflow-node1:/home/chainlink/workflows/%s\n", binaryPath, binaryFilename) + if configPath != "" { + fmt.Printf("2. Copy the config to Docker containers: docker cp %s workflow-node1:/home/chainlink/workflows/%s\n", configPath, configFilename) + } + fmt.Printf("3. Copy the metadata JSON to Docker containers: docker cp %s workflow-node1:/tmp/workflows_metadata.json\n", outputPath) + fmt.Printf("4. Repeat steps 1-3 for all workflow nodes\n") + fmt.Printf("5. Wait for syncer to pick up the workflow (default 12 second interval)\n") +} diff --git a/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml b/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml new file mode 100644 index 00000000000..d7b0435a37b --- /dev/null +++ b/core/scripts/cre/environment/configs/workflow-gateway-don-grpc-source.toml @@ -0,0 +1,105 @@ +# Workflow Gateway DON configuration with gRPC alternative workflow source enabled. +# This topology is the same as workflow-gateway-don.toml but with AlternativeSources +# configured to read workflows from a gRPC source at host.docker.internal:8544. +# +# Used by: system-tests/tests/smoke/cre/v2_grpc_source_test.go + +[[blockchains]] + type = "anvil" + chain_id = "1337" + docker_cmd_params = ["-b", "0.5", "--mixed-mining"] + +[[blockchains]] + type = "anvil" + chain_id = "2337" + port = "8546" + docker_cmd_params = ["-b", "0.5", "--mixed-mining"] + +[jd] + csa_encryption_key = "d1093c0060d50a3c89c189b2e485da5a3ce57f3dcb38ab7e2c0d5f0bb2314a44" # any random 32 byte hex string + # change to your version + image = "job-distributor:0.22.1" + +[fake] + port = 8171 + +[fake_http] + port = 8666 + +#[s3provider] +# # use all defaults +# port = 9000 +# console_port = 9001 + +[infra] + # either "docker" or "crib" + type = "docker" + +#[infra.crib] +# namespace = "crib-local" +# folder_location = "/absolute/path/to/crib/deployments/cre" +# provider = "kind" # or "aws" + +[[nodesets]] + nodes = 4 + name = "workflow" + don_types = ["workflow"] + override_mode = "all" + http_port_range_start = 10100 + + env_vars = { CL_EVM_CMD = "" } + capabilities = ["ocr3", "custom-compute", "web-api-target", "web-api-trigger", "vault", "cron", "http-action", "http-trigger", "consensus", "don-time"] + + [nodesets.chain_capabilities] + write-evm = ["1337", "2337"] + read-contract = ["1337", "2337"] + evm = ["1337", "2337"] + + # See ./examples/workflow-don-overrides.toml to learn how to override capability configs + + [nodesets.db] + image = "postgres:12.0" + port = 13000 + +[[nodesets.node_specs]] + roles = ["plugin"] + [nodesets.node_specs.node] + docker_ctx = "../../../.." + docker_file = "core/chainlink.Dockerfile" + docker_build_args = { "CL_IS_PROD_BUILD" = "false" } + # image = "chainlink-tmp:latest" + user_config_overrides = """ +# Configure gRPC alternative workflow source +# The mock server runs on port 8544 (started by the test before environment setup) +[[Capabilities.WorkflowRegistry.AlternativeSources]] +URL = 'host.docker.internal:8544' +TLSEnabled = false +Name = 'mock-private-registry' +""" + +[[nodesets]] + nodes = 1 + name = "bootstrap-gateway" + don_types = ["bootstrap", "gateway"] + override_mode = "each" + http_port_range_start = 10300 + + env_vars = { CL_EVM_CMD = "" } + supported_evm_chains = [1337, 2337] + + [nodesets.db] + image = "postgres:12.0" + port = 13200 + + [[nodesets.node_specs]] + roles = ["bootstrap", "gateway"] + [nodesets.node_specs.node] + docker_ctx = "../../../.." + docker_file = "core/chainlink.Dockerfile" + docker_build_args = { "CL_IS_PROD_BUILD" = "false" } + # 5002 is the web API capabilities port for incoming requests + # 15002 is the vault port for incoming requests + custom_ports = ["5002:5002","15002:15002"] + # image = "chainlink-tmp:latest" + user_config_overrides = "" + diff --git a/core/scripts/cre/environment/examples/workflows_metadata_example.json b/core/scripts/cre/environment/examples/workflows_metadata_example.json new file mode 100644 index 00000000000..f29b4f635c1 --- /dev/null +++ b/core/scripts/cre/environment/examples/workflows_metadata_example.json @@ -0,0 +1,16 @@ +{ + "_comment": "This is an EXAMPLE file showing the format. The workflow_id must be calculated using GenerateWorkflowID(owner, name, binary, config, secrets). See MULTI_SOURCE_MVP.md for details.", + "workflows": [ + { + "workflow_id": "REPLACE_WITH_CALCULATED_WORKFLOW_ID", + "owner": "f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "created_at": 1733250000, + "status": 0, + "workflow_name": "file-source-workflow", + "binary_url": "file:///home/chainlink/workflows/file_source_workflow.wasm", + "config_url": "file:///home/chainlink/workflows/file_source_config.json", + "tag": "v1.0.0", + "don_family": "workflow" + } + ] +} \ No newline at end of file diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 72b0ff41d35..01ab1ca2e4f 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -25,6 +25,7 @@ require ( require ( github.com/Masterminds/semver/v3 v3.4.0 + github.com/andybalholm/brotli v1.2.0 github.com/c-bata/go-prompt v0.2.6 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v28.5.1+incompatible @@ -118,7 +119,6 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/alitto/pond/v2 v2.5.0 // indirect - github.com/andybalholm/brotli v1.2.0 // indirect github.com/apache/arrow-go/v18 v18.3.1 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -628,9 +628,9 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/grpc v1.78.0 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/guregu/null.v4 v4.0.0 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 7fe9b9956bb..ea4b207f76c 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -2532,10 +2532,10 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2560,8 +2560,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 0737446f4fc..03eec7f3139 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -1294,6 +1294,21 @@ func newCREServices( return nil, fmt.Errorf("unable to create workflow registry event handler: %w", err) } + // Build alternative sources configuration from config + // JWT auth is always enabled for gRPC sources + altSources := capCfg.WorkflowRegistry().AlternativeSources() + altSourceConfigs := make([]syncerV2.AlternativeSourceConfig, 0, len(altSources)) + for _, src := range altSources { + altSourceConfigs = append(altSourceConfigs, syncerV2.AlternativeSourceConfig{ + URL: src.GetURL(), + Name: src.GetName(), + TLSEnabled: src.GetTLSEnabled(), + JWTGenerator: opts.JWTGenerator, + }) + } + + // Create syncer - contract address may be empty for pure alternative-source deployments + // File sources are detected by file:// URL prefix in WithAlternativeSources workflowRegistrySyncerV2, err = syncerV2.NewWorkflowRegistry( lggr, crFactory, @@ -1305,6 +1320,7 @@ func newCREServices( eventHandler, workflowDonNotifier, engineRegistry, + syncerV2.WithAlternativeSources(altSourceConfigs), ) if err != nil { return nil, fmt.Errorf("unable to create workflow registry syncer: %w", err) diff --git a/core/services/chainlink/config_capabilities.go b/core/services/chainlink/config_capabilities.go index e3126abccdf..bcb204babbb 100644 --- a/core/services/chainlink/config_capabilities.go +++ b/core/services/chainlink/config_capabilities.go @@ -232,6 +232,14 @@ func (c *capabilitiesWorkflowRegistry) WorkflowStorage() config.WorkflowStorage } } +func (c *capabilitiesWorkflowRegistry) AlternativeSources() []config.AlternativeWorkflowSource { + sources := make([]config.AlternativeWorkflowSource, len(c.c.AlternativeSourcesConfig)) + for i, src := range c.c.AlternativeSourcesConfig { + sources[i] = &alternativeWorkflowSource{c: src} + } + return sources +} + type workflowStorage struct { c toml.WorkflowStorage } @@ -248,6 +256,31 @@ func (c *workflowStorage) ArtifactStorageHost() string { return *c.c.ArtifactStorageHost } +type alternativeWorkflowSource struct { + c toml.AlternativeWorkflowSource +} + +func (a *alternativeWorkflowSource) GetURL() string { + if a.c.URL == nil { + return "" + } + return *a.c.URL +} + +func (a *alternativeWorkflowSource) GetTLSEnabled() bool { + if a.c.TLSEnabled == nil { + return true // Default to true + } + return *a.c.TLSEnabled +} + +func (a *alternativeWorkflowSource) GetName() string { + if a.c.Name == nil { + return "" + } + return *a.c.Name +} + type gatewayConnector struct { c toml.GatewayConnector } diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index c6a5fa5f770..fe21bbed922 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -535,6 +535,13 @@ func TestConfig_Marshal(t *testing.T) { URL: ptr(""), TLSEnabled: ptr(true), }, + AlternativeSourcesConfig: []toml.AlternativeWorkflowSource{ + { + URL: ptr("localhost:50051"), + TLSEnabled: ptr(true), + Name: ptr("test-grpc-source"), + }, + }, }, Dispatcher: toml.Dispatcher{ SupportedVersion: ptr(1), diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml index d090b8d15e9..e113068c3b6 100644 --- a/core/services/chainlink/testdata/config-empty-effective.toml +++ b/core/services/chainlink/testdata/config-empty-effective.toml @@ -326,6 +326,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index b2f2447b804..c97cd83ba2a 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -342,6 +342,11 @@ ArtifactStorageHost = '' URL = '' TLSEnabled = true +[[Capabilities.WorkflowRegistry.AlternativeSources]] +URL = 'localhost:50051' +TLSEnabled = true +Name = 'test-grpc-source' + [Capabilities.GatewayConnector] ChainIDForNodeKey = '11155111' NodeAddress = '0x68902d681c28119f9b2531473a417088bf008e59' diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index c93130eb868..457c6f03358 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -326,6 +326,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/core/services/workflows/events/emit.go b/core/services/workflows/events/emit.go index 94292fa67e5..8ad03183c00 100644 --- a/core/services/workflows/events/emit.go +++ b/core/services/workflows/events/emit.go @@ -493,6 +493,7 @@ func buildCREMetadataV2(kvs map[string]string) *eventsv2.CreInfo { m.EngineVersion = kvs[platform.EngineVersion] m.CapabilitiesRegistryVersion = kvs[platform.CapabilitiesRegistryVersion] m.DonVersion = kvs[platform.DonVersion] + m.WorkflowSource = kvs[platform.KeyWorkflowSource] return m } diff --git a/core/services/workflows/syncer/v2/contract_workflow_source.go b/core/services/workflows/syncer/v2/contract_workflow_source.go new file mode 100644 index 00000000000..6e4c7a8d2cd --- /dev/null +++ b/core/services/workflows/syncer/v2/contract_workflow_source.go @@ -0,0 +1,237 @@ +package v2 + +import ( + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + "github.com/smartcontractkit/chainlink-evm/gethwrappers/workflow/generated/workflow_registry_wrapper_v2" + "github.com/smartcontractkit/chainlink-evm/pkg/config" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/syncer/versioning" +) + +const ( + // ContractWorkflowSourceName is the name used for logging and identification. + ContractWorkflowSourceName = "ContractWorkflowSource" +) + +// ContractWorkflowSource implements WorkflowMetadataSource by reading from the on-chain +// workflow registry contract. +type ContractWorkflowSource struct { + lggr logger.Logger + workflowRegistryAddress string + contractReaderFn versioning.ContractReaderFactory + contractReader commontypes.ContractReader + mu sync.RWMutex +} + +// NewContractWorkflowSource creates a new contract-based workflow source. +func NewContractWorkflowSource( + lggr logger.Logger, + contractReaderFn versioning.ContractReaderFactory, + workflowRegistryAddress string, +) *ContractWorkflowSource { + return &ContractWorkflowSource{ + lggr: lggr.Named(ContractWorkflowSourceName), + contractReaderFn: contractReaderFn, + workflowRegistryAddress: workflowRegistryAddress, + } +} + +// ListWorkflowMetadata fetches workflow metadata from the on-chain contract. +// It lazily initializes the contract reader on first call. +func (c *ContractWorkflowSource) ListWorkflowMetadata(ctx context.Context, don capabilities.DON) ([]WorkflowMetadataView, *commontypes.Head, error) { + c.tryInitialize(ctx) + + c.mu.RLock() + reader := c.contractReader + c.mu.RUnlock() + + if reader == nil { + return nil, nil, errors.New("contract reader not initialized") + } + + contractBinding := commontypes.BoundContract{ + Address: c.workflowRegistryAddress, + Name: WorkflowRegistryContractName, + } + + readIdentifier := contractBinding.ReadIdentifier(GetWorkflowsByDONMethodName) + var headAtLastRead *commontypes.Head + var allWorkflows []WorkflowMetadataView + + for _, family := range don.Families { + params := GetWorkflowListByDONParams{ + DonFamily: family, + Start: big.NewInt(0), + Limit: big.NewInt(MaxResultsPerQuery), + } + + for { + var err error + var workflows struct { + List []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView + } + + headAtLastRead, err = reader.GetLatestValueWithHeadData(ctx, readIdentifier, primitives.Finalized, params, &workflows) + if err != nil { + return []WorkflowMetadataView{}, &commontypes.Head{Height: "0"}, fmt.Errorf("failed to get latest value with head data: %w", err) + } + + for _, wfMeta := range workflows.List { + // Log warnings for incomplete metadata but don't skip processing + c.validateWorkflowMetadata(wfMeta) + + allWorkflows = append(allWorkflows, WorkflowMetadataView{ + WorkflowID: wfMeta.WorkflowId, + Owner: wfMeta.Owner.Bytes(), + CreatedAt: wfMeta.CreatedAt, + Status: wfMeta.Status, + WorkflowName: wfMeta.WorkflowName, + BinaryURL: wfMeta.BinaryUrl, + ConfigURL: wfMeta.ConfigUrl, + Tag: wfMeta.Tag, + Attributes: wfMeta.Attributes, + DonFamily: wfMeta.DonFamily, + Source: ContractWorkflowSourceName, + }) + } + + // if less workflows than limit, then we have reached the end of the list + if int64(len(workflows.List)) < MaxResultsPerQuery { + break + } + + // otherwise, increment the start parameter and continue to fetch more workflows + params.Start.Add(params.Start, big.NewInt(int64(len(workflows.List)))) + } + } + + c.lggr.Debugw("Loaded workflows from contract", + "address", c.workflowRegistryAddress, + "count", len(allWorkflows), + "donFamilies", don.Families) + + if headAtLastRead == nil { + return allWorkflows, &commontypes.Head{Height: "0"}, nil + } + + return allWorkflows, headAtLastRead, nil +} + +func (c *ContractWorkflowSource) Name() string { + return ContractWorkflowSourceName +} + +// Ready returns nil if the contract reader is initialized. +func (c *ContractWorkflowSource) Ready() error { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.contractReader == nil { + return errors.New("contract reader not initialized") + } + return nil +} + +// tryInitialize attempts to initialize the contract reader. Returns true if ready. +func (c *ContractWorkflowSource) tryInitialize(ctx context.Context) bool { + c.mu.Lock() + defer c.mu.Unlock() + + if c.contractReader != nil { + return true + } + + reader, err := c.newWorkflowRegistryContractReader(ctx) + if err != nil { + c.lggr.Debugw("Contract reader not yet available", "error", err) + return false + } + + c.contractReader = reader + c.lggr.Debugw("Contract reader initialized successfully") + return true +} + +// newWorkflowRegistryContractReader creates a new contract reader configured for the workflow registry. +func (c *ContractWorkflowSource) newWorkflowRegistryContractReader(ctx context.Context) (commontypes.ContractReader, error) { + contractReaderCfg := config.ChainReaderConfig{ + Contracts: map[string]config.ChainContractReader{ + WorkflowRegistryContractName: { + ContractABI: workflow_registry_wrapper_v2.WorkflowRegistryABI, + Configs: map[string]*config.ChainReaderDefinition{ + GetWorkflowsByDONMethodName: { + ChainSpecificName: GetWorkflowsByDONMethodName, + ReadType: config.Method, + }, + }, + }, + }, + } + + marshalledCfg, err := json.Marshal(contractReaderCfg) + if err != nil { + return nil, err + } + + reader, err := c.contractReaderFn(ctx, marshalledCfg) + if err != nil { + return nil, err + } + + bc := commontypes.BoundContract{ + Name: WorkflowRegistryContractName, + Address: c.workflowRegistryAddress, + } + + // bind contract to contract reader + if err := reader.Bind(ctx, []commontypes.BoundContract{bc}); err != nil { + return nil, err + } + + if err := reader.Start(ctx); err != nil { + return nil, err + } + + return reader, nil +} + +// validateWorkflowMetadata logs warnings for incomplete workflow metadata from contract. +func (c *ContractWorkflowSource) validateWorkflowMetadata(wfMeta workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView) { + if isEmptyWorkflowID(wfMeta.WorkflowId) { + c.lggr.Warnw("Workflow has empty WorkflowID from contract", + "source", ContractWorkflowSourceName, + "workflowName", wfMeta.WorkflowName, + "owner", hex.EncodeToString(wfMeta.Owner.Bytes()), + "binaryURL", wfMeta.BinaryUrl, + "configURL", wfMeta.ConfigUrl) + } + + if len(wfMeta.Owner.Bytes()) == 0 { + c.lggr.Warnw("Workflow has empty Owner from contract", + "source", ContractWorkflowSourceName, + "workflowID", hex.EncodeToString(wfMeta.WorkflowId[:]), + "workflowName", wfMeta.WorkflowName, + "binaryURL", wfMeta.BinaryUrl, + "configURL", wfMeta.ConfigUrl) + } + + if wfMeta.BinaryUrl == "" || wfMeta.ConfigUrl == "" { + c.lggr.Warnw("Workflow has empty BinaryURL or ConfigURL from contract", + "source", ContractWorkflowSourceName, + "workflowID", hex.EncodeToString(wfMeta.WorkflowId[:]), + "workflowName", wfMeta.WorkflowName, + "owner", hex.EncodeToString(wfMeta.Owner.Bytes()), + "binaryURL", wfMeta.BinaryUrl, + "configURL", wfMeta.ConfigUrl) + } +} diff --git a/core/services/workflows/syncer/v2/contract_workflow_source_test.go b/core/services/workflows/syncer/v2/contract_workflow_source_test.go new file mode 100644 index 00000000000..058b0a8fb4c --- /dev/null +++ b/core/services/workflows/syncer/v2/contract_workflow_source_test.go @@ -0,0 +1,368 @@ +package v2 + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + "github.com/smartcontractkit/chainlink-common/pkg/workflows" + "github.com/smartcontractkit/chainlink-evm/gethwrappers/workflow/generated/workflow_registry_wrapper_v2" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// Test constants for workflow metadata +const ( + testOwnerAddress = "0x1234567890123456789012345678901234567890" + testBinaryURL = "https://example.com/binary.wasm" + testConfigURL = "https://example.com/config.json" +) + +// testBinaryContent and testConfigContent are mock content used for canonical workflowID calculation +var ( + testBinaryContent = []byte("mock-wasm-binary-content") + testConfigContent = []byte("{}") +) + +// mockWorkflowContractReader is a mock implementation of ContractReader for testing ContractWorkflowSource. +// Note: Reflection is required here because the ContractReader interface in chainlink-common +// uses `any` for the result parameter, and the production code passes an anonymous struct. +type mockWorkflowContractReader struct { + commontypes.ContractReader + workflowList []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView + head *commontypes.Head + getLatestErr error + bindErr error + startErr error +} + +func (m *mockWorkflowContractReader) GetLatestValueWithHeadData( + _ context.Context, + _ string, + _ primitives.ConfidenceLevel, + _ any, + result any, +) (*commontypes.Head, error) { + if m.getLatestErr != nil { + return nil, m.getLatestErr + } + + resultVal := reflect.ValueOf(result).Elem() + listField := resultVal.FieldByName("List") + if listField.IsValid() && listField.CanSet() { + listField.Set(reflect.ValueOf(m.workflowList)) + } + + return m.head, nil +} + +func (m *mockWorkflowContractReader) Bind(_ context.Context, _ []commontypes.BoundContract) error { + return m.bindErr +} + +func (m *mockWorkflowContractReader) Start(_ context.Context) error { + return m.startErr +} + +// createTestWorkflowMetadata creates a test workflow metadata view for testing. +// It uses the canonical workflow ID calculation to ensure test data is realistic. +func createTestWorkflowMetadata(name string, family string) workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView { + owner := common.HexToAddress(testOwnerAddress) + + // Use canonical workflow ID calculation + workflowID, err := workflows.GenerateWorkflowID(owner.Bytes(), name, testBinaryContent, testConfigContent, "") + if err != nil { + panic("failed to generate workflow ID: " + err.Error()) + } + + return workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView{ + WorkflowId: workflowID, + Owner: owner, + CreatedAt: 1234567890, + Status: WorkflowStatusActive, + WorkflowName: name, + BinaryUrl: testBinaryURL, + ConfigUrl: testConfigURL, + Tag: "v1.0.0", + Attributes: []byte("{}"), + DonFamily: family, + } +} + +func TestContractWorkflowSource_ListWorkflowMetadata_Success(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockReader := &mockWorkflowContractReader{ + workflowList: []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView{ + createTestWorkflowMetadata("workflow-1", "family-a"), + createTestWorkflowMetadata("workflow-2", "family-a"), + }, + head: &commontypes.Head{Height: "100"}, + } + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + + // Manually set the contract reader (simulating successful initialization) + source.contractReader = mockReader + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, headResult, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 2) + assert.Equal(t, "100", headResult.Height) + assert.Equal(t, "workflow-1", wfs[0].WorkflowName) + assert.Equal(t, "workflow-2", wfs[1].WorkflowName) +} + +func TestContractWorkflowSource_ListWorkflowMetadata_MultipleDONFamilies(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // This mock will be called twice (once for each DON family) + mockReader := &mockWorkflowContractReader{ + workflowList: []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView{ + createTestWorkflowMetadata("workflow-1", "family-a"), + }, + head: &commontypes.Head{Height: "100"}, + } + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + + source.contractReader = mockReader + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a", "family-b"}, + } + + wfs, headResult, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + // Should return 2 workflows (1 per family call, but mock returns same list each time = 2 total) + assert.Len(t, wfs, 2) + assert.Equal(t, "100", headResult.Height) +} + +func TestContractWorkflowSource_ListWorkflowMetadata_NotInitialized(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // Factory that always fails + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return nil, errors.New("factory error") + }, + testOwnerAddress, + ) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + // Contract reader is nil, should return error + wfs, headResult, err := source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.Contains(t, err.Error(), "contract reader not initialized") + assert.Nil(t, wfs) + assert.Nil(t, headResult) +} + +func TestContractWorkflowSource_ListWorkflowMetadata_ContractReaderError(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockReader := &mockWorkflowContractReader{ + getLatestErr: errors.New("contract read failed"), + } + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + source.contractReader = mockReader + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, headResult, err := source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get latest value with head data") + assert.Empty(t, wfs) + assert.Equal(t, "0", headResult.Height) +} + +func TestContractWorkflowSource_ListWorkflowMetadata_EmptyResult(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockReader := &mockWorkflowContractReader{ + workflowList: []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView{}, + head: &commontypes.Head{Height: "100"}, + } + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + source.contractReader = mockReader + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, headResult, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Empty(t, wfs) + assert.Equal(t, "100", headResult.Height) +} + +func TestContractWorkflowSource_Ready_NotInitialized(t *testing.T) { + lggr := logger.TestLogger(t) + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return nil, errors.New("factory error") + }, + testOwnerAddress, + ) + + err := source.Ready() + require.Error(t, err) + assert.Contains(t, err.Error(), "contract reader not initialized") +} + +func TestContractWorkflowSource_Ready_Initialized(t *testing.T) { + lggr := logger.TestLogger(t) + + mockReader := &mockWorkflowContractReader{} + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + source.contractReader = mockReader + + err := source.Ready() + require.NoError(t, err) +} + +func TestContractWorkflowSource_tryInitialize_Success(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockReader := &mockWorkflowContractReader{} + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return mockReader, nil + }, + testOwnerAddress, + ) + + // Initially not ready + require.Error(t, source.Ready()) + + // Try to initialize + result := source.tryInitialize(ctx) + assert.True(t, result) + + // Now should be ready + assert.NoError(t, source.Ready()) +} + +func TestContractWorkflowSource_tryInitialize_AlreadyInitialized(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + callCount := 0 + mockReader := &mockWorkflowContractReader{} + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + callCount++ + return mockReader, nil + }, + testOwnerAddress, + ) + + // First initialization + result := source.tryInitialize(ctx) + assert.True(t, result) + assert.Equal(t, 1, callCount) + + // Second call should return true without calling factory again + result = source.tryInitialize(ctx) + assert.True(t, result) + assert.Equal(t, 1, callCount) // Still 1, factory not called again +} + +func TestContractWorkflowSource_tryInitialize_FactoryError(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return nil, errors.New("factory error") + }, + testOwnerAddress, + ) + + result := source.tryInitialize(ctx) + assert.False(t, result) + assert.Error(t, source.Ready()) +} + +func TestContractWorkflowSource_Name(t *testing.T) { + lggr := logger.TestLogger(t) + + source := NewContractWorkflowSource( + lggr, + func(_ context.Context, _ []byte) (commontypes.ContractReader, error) { + return nil, nil + }, + testOwnerAddress, + ) + + assert.Equal(t, ContractWorkflowSourceName, source.Name()) +} diff --git a/core/services/workflows/syncer/v2/engine_registry.go b/core/services/workflows/syncer/v2/engine_registry.go index 0be6fb7301c..ead8ffa1606 100644 --- a/core/services/workflows/syncer/v2/engine_registry.go +++ b/core/services/workflows/syncer/v2/engine_registry.go @@ -14,28 +14,38 @@ var ErrAlreadyExists = errors.New("attempting to register duplicate engine") type ServiceWithMetadata struct { WorkflowID types.WorkflowID + Source string // Which source this workflow came from (e.g., "ContractWorkflowSource", "GRPCWorkflowSource") services.Service } +// engineEntry holds the engine and its associated source for internal storage +type engineEntry struct { + engine services.Service + source string +} + type EngineRegistry struct { - engines map[[32]byte]services.Service + engines map[[32]byte]engineEntry mu sync.RWMutex } func NewEngineRegistry() *EngineRegistry { return &EngineRegistry{ - engines: make(map[[32]byte]services.Service), + engines: make(map[[32]byte]engineEntry), } } -// Add adds an engine to the registry. -func (r *EngineRegistry) Add(workflowID types.WorkflowID, engine services.Service) error { +// Add adds an engine to the registry with its source. +func (r *EngineRegistry) Add(workflowID types.WorkflowID, source string, engine services.Service) error { r.mu.Lock() defer r.mu.Unlock() if _, found := r.engines[workflowID]; found { return ErrAlreadyExists } - r.engines[workflowID] = engine + r.engines[workflowID] = engineEntry{ + engine: engine, + source: source, + } return nil } @@ -43,13 +53,14 @@ func (r *EngineRegistry) Add(workflowID types.WorkflowID, engine services.Servic func (r *EngineRegistry) Get(workflowID types.WorkflowID) (ServiceWithMetadata, bool) { r.mu.RLock() defer r.mu.RUnlock() - engine, found := r.engines[workflowID] + entry, found := r.engines[workflowID] if !found { return ServiceWithMetadata{}, false } return ServiceWithMetadata{ WorkflowID: workflowID, - Service: engine, + Source: entry.source, + Service: entry.engine, }, true } @@ -58,15 +69,33 @@ func (r *EngineRegistry) GetAll() []ServiceWithMetadata { r.mu.RLock() defer r.mu.RUnlock() engines := []ServiceWithMetadata{} - for workflowID, engine := range r.engines { + for workflowID, entry := range r.engines { engines = append(engines, ServiceWithMetadata{ WorkflowID: workflowID, - Service: engine, + Source: entry.source, + Service: entry.engine, }) } return engines } +// GetBySource retrieves all engines from a specific source. +func (r *EngineRegistry) GetBySource(source string) []ServiceWithMetadata { + r.mu.RLock() + defer r.mu.RUnlock() + var result []ServiceWithMetadata + for workflowID, entry := range r.engines { + if entry.source == source { + result = append(result, ServiceWithMetadata{ + WorkflowID: workflowID, + Source: entry.source, + Service: entry.engine, + }) + } + } + return result +} + // Contains is true if the engine exists. func (r *EngineRegistry) Contains(workflowID types.WorkflowID) bool { r.mu.RLock() @@ -79,14 +108,15 @@ func (r *EngineRegistry) Contains(workflowID types.WorkflowID) bool { func (r *EngineRegistry) Pop(workflowID types.WorkflowID) (ServiceWithMetadata, error) { r.mu.Lock() defer r.mu.Unlock() - engine, ok := r.engines[workflowID] + entry, ok := r.engines[workflowID] if !ok { return ServiceWithMetadata{}, fmt.Errorf("pop failed: %w", ErrNotFound) } delete(r.engines, workflowID) return ServiceWithMetadata{ WorkflowID: workflowID, - Service: engine, + Source: entry.source, + Service: entry.engine, }, nil } @@ -95,12 +125,13 @@ func (r *EngineRegistry) PopAll() []ServiceWithMetadata { r.mu.Lock() defer r.mu.Unlock() engines := []ServiceWithMetadata{} - for workflowID, engine := range r.engines { + for workflowID, entry := range r.engines { engines = append(engines, ServiceWithMetadata{ WorkflowID: workflowID, - Service: engine, + Source: entry.source, + Service: entry.engine, }) } - r.engines = make(map[[32]byte]services.Service) + r.engines = make(map[[32]byte]engineEntry) return engines } diff --git a/core/services/workflows/syncer/v2/engine_registry_test.go b/core/services/workflows/syncer/v2/engine_registry_test.go index d0c37e91d1e..0243a583ca0 100644 --- a/core/services/workflows/syncer/v2/engine_registry_test.go +++ b/core/services/workflows/syncer/v2/engine_registry_test.go @@ -29,13 +29,13 @@ func TestEngineRegistry(t *testing.T) { require.Nil(t, e.Service) // add - require.NoError(t, er.Add(workflowID1, srv)) + require.NoError(t, er.Add(workflowID1, "TestSource", srv)) ok = er.Contains(workflowID1) require.True(t, ok) // add another item // this verifies that keys are unique - require.NoError(t, er.Add(workflowID2, srv)) + require.NoError(t, er.Add(workflowID2, "TestSource", srv)) ok = er.Contains(workflowID2) require.True(t, ok) @@ -56,13 +56,101 @@ func TestEngineRegistry(t *testing.T) { require.False(t, ok) // re-add - require.NoError(t, er.Add(workflowID1, srv)) + require.NoError(t, er.Add(workflowID1, "TestSource", srv)) // pop all es = er.PopAll() require.Len(t, es, 2) } +func TestEngineRegistry_SourceTracking(t *testing.T) { + er := NewEngineRegistry() + + wfID1 := types.WorkflowID([32]byte{1}) + wfID2 := types.WorkflowID([32]byte{2}) + wfID3 := types.WorkflowID([32]byte{3}) + + // Add engines from different sources + require.NoError(t, er.Add(wfID1, ContractWorkflowSourceName, &fakeService{})) + require.NoError(t, er.Add(wfID2, ContractWorkflowSourceName, &fakeService{})) + require.NoError(t, er.Add(wfID3, GRPCWorkflowSourceName, &fakeService{})) + + // GetBySource filters correctly + contractEngines := er.GetBySource(ContractWorkflowSourceName) + require.Len(t, contractEngines, 2) + + grpcEngines := er.GetBySource(GRPCWorkflowSourceName) + require.Len(t, grpcEngines, 1) + + // Unknown source returns empty + unknownEngines := er.GetBySource("UnknownSource") + require.Empty(t, unknownEngines) +} + +func TestEngineRegistry_SourceInMetadata(t *testing.T) { + er := NewEngineRegistry() + wfID := types.WorkflowID([32]byte{1}) + + require.NoError(t, er.Add(wfID, "TestSource", &fakeService{})) + + engine, ok := er.Get(wfID) + require.True(t, ok) + require.Equal(t, "TestSource", engine.Source) +} + +func TestEngineRegistry_GetAllIncludesSource(t *testing.T) { + er := NewEngineRegistry() + + wfID1 := types.WorkflowID([32]byte{1}) + wfID2 := types.WorkflowID([32]byte{2}) + + require.NoError(t, er.Add(wfID1, ContractWorkflowSourceName, &fakeService{})) + require.NoError(t, er.Add(wfID2, GRPCWorkflowSourceName, &fakeService{})) + + engines := er.GetAll() + require.Len(t, engines, 2) + + // Verify each engine has its source + sources := make(map[string]bool) + for _, e := range engines { + sources[e.Source] = true + } + require.True(t, sources[ContractWorkflowSourceName]) + require.True(t, sources[GRPCWorkflowSourceName]) +} + +func TestEngineRegistry_PopReturnsSource(t *testing.T) { + er := NewEngineRegistry() + wfID := types.WorkflowID([32]byte{1}) + + require.NoError(t, er.Add(wfID, ContractWorkflowSourceName, &fakeService{})) + + engine, err := er.Pop(wfID) + require.NoError(t, err) + require.Equal(t, ContractWorkflowSourceName, engine.Source) +} + +func TestEngineRegistry_PopAllReturnsSource(t *testing.T) { + er := NewEngineRegistry() + + wfID1 := types.WorkflowID([32]byte{1}) + wfID2 := types.WorkflowID([32]byte{2}) + + require.NoError(t, er.Add(wfID1, ContractWorkflowSourceName, &fakeService{})) + require.NoError(t, er.Add(wfID2, GRPCWorkflowSourceName, &fakeService{})) + + engines := er.PopAll() + require.Len(t, engines, 2) + + // Verify sources are preserved + sources := make(map[string]bool) + for _, e := range engines { + sources[e.Source] = true + } + require.True(t, sources[ContractWorkflowSourceName]) + require.True(t, sources[GRPCWorkflowSourceName]) +} + type fakeService struct{} func (f fakeService) Start(ctx context.Context) error { return nil } diff --git a/core/services/workflows/syncer/v2/file_workflow_source.go b/core/services/workflows/syncer/v2/file_workflow_source.go new file mode 100644 index 00000000000..6d4a76c5866 --- /dev/null +++ b/core/services/workflows/syncer/v2/file_workflow_source.go @@ -0,0 +1,207 @@ +package v2 + +import ( + "context" + "encoding/hex" + "encoding/json" + "errors" + "os" + "strconv" + "sync" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/types" +) + +const ( + // FileWorkflowSourceName is the name used for logging and identification. + FileWorkflowSourceName = "FileWorkflowSource" +) + +// FileWorkflowMetadata represents a single workflow entry in the JSON file. +// This mirrors the WorkflowMetadataView structure but uses JSON-friendly types. +type FileWorkflowMetadata struct { + // WorkflowID is the hex-encoded workflow ID (without 0x prefix) + WorkflowID string `json:"workflow_id"` + // Owner is the hex-encoded owner address (without 0x prefix) + Owner string `json:"owner"` + // CreatedAt is the Unix timestamp when the workflow was created + CreatedAt uint64 `json:"created_at"` + // Status is the workflow status (0=active, 1=paused) + Status uint8 `json:"status"` + // WorkflowName is the human-readable name of the workflow + WorkflowName string `json:"workflow_name"` + // BinaryURL is the URL to fetch the workflow binary (same format as contract) + BinaryURL string `json:"binary_url"` + // ConfigURL is the URL to fetch the workflow config (same format as contract) + ConfigURL string `json:"config_url"` + // Tag is the workflow tag/version + Tag string `json:"tag"` + // Attributes is optional JSON-encoded attributes + Attributes string `json:"attributes,omitempty"` + // DonFamily is the DON family this workflow belongs to + DonFamily string `json:"don_family"` +} + +// FileWorkflowSourceData is the root structure of the JSON file. +type FileWorkflowSourceData struct { + // Workflows is the list of workflow metadata entries + Workflows []FileWorkflowMetadata `json:"workflows"` +} + +// FileWorkflowSource implements WorkflowMetadataSource by reading from a JSON file. +type FileWorkflowSource struct { + lggr logger.Logger + filePath string + mu sync.RWMutex +} + +// NewFileWorkflowSourceWithPath creates a new file-based workflow source with a custom path. +// Returns an error if the file does not exist - a configured file source must have a valid file. +func NewFileWorkflowSourceWithPath(lggr logger.Logger, path string) (*FileWorkflowSource, error) { + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, errors.New("workflow metadata file does not exist: " + path) + } + return &FileWorkflowSource{ + lggr: lggr.Named(FileWorkflowSourceName), + filePath: path, + }, nil +} + +// ListWorkflowMetadata reads the JSON file and returns workflow metadata filtered by DON families. +func (f *FileWorkflowSource) ListWorkflowMetadata(ctx context.Context, don capabilities.DON) ([]WorkflowMetadataView, *commontypes.Head, error) { + f.tryInitialize(ctx) + + f.mu.RLock() + defer f.mu.RUnlock() + + filePath := f.filePath + + // Read file contents + data, err := os.ReadFile(filePath) + if err != nil { + return nil, nil, err + } + + // Handle empty file + if len(data) == 0 { + f.lggr.Debugw("Workflow metadata file is empty, returning empty list", "path", filePath) + return []WorkflowMetadataView{}, f.syntheticHead(), nil + } + + // Parse JSON + var sourceData FileWorkflowSourceData + if err := json.Unmarshal(data, &sourceData); err != nil { + return nil, nil, err + } + + // Build a set of DON families for efficient lookup + donFamilySet := make(map[string]bool) + for _, family := range don.Families { + donFamilySet[family] = true + } + + // Filter and convert workflows + workflows := make([]WorkflowMetadataView, 0, len(sourceData.Workflows)) + for _, wf := range sourceData.Workflows { + // Filter by DON family + if !donFamilySet[wf.DonFamily] { + continue + } + + // Convert to WorkflowMetadataView + view, err := f.toWorkflowMetadataView(wf) + if err != nil { + f.lggr.Warnw("Failed to parse workflow metadata, skipping", + "source", FileWorkflowSourceName, + "workflowName", wf.WorkflowName, + "error", err) + continue + } + + workflows = append(workflows, view) + } + + f.lggr.Debugw("Loaded workflows from file", + "path", filePath, + "totalInFile", len(sourceData.Workflows), + "matchingDON", len(workflows), + "donFamilies", don.Families) + + return workflows, f.syntheticHead(), nil +} + +func (f *FileWorkflowSource) Name() string { + return FileWorkflowSourceName +} + +// Ready returns nil if the file exists, or an error if it doesn't. +func (f *FileWorkflowSource) Ready() error { + if _, err := os.Stat(f.filePath); os.IsNotExist(err) { + return errors.New("workflow metadata file does not exist: " + f.filePath) + } + return nil +} + +// tryInitialize always returns true (file validated in constructor). +func (f *FileWorkflowSource) tryInitialize(_ context.Context) bool { + return true +} + +// toWorkflowMetadataView converts a FileWorkflowMetadata to a WorkflowMetadataView. +func (f *FileWorkflowSource) toWorkflowMetadataView(wf FileWorkflowMetadata) (WorkflowMetadataView, error) { + // Parse workflow ID from hex string + workflowIDBytes, err := hex.DecodeString(wf.WorkflowID) + if err != nil { + return WorkflowMetadataView{}, errors.New("invalid workflow_id hex: " + err.Error()) + } + if len(workflowIDBytes) != 32 { + return WorkflowMetadataView{}, errors.New("workflow_id must be 32 bytes") + } + var workflowID types.WorkflowID + copy(workflowID[:], workflowIDBytes) + + // Parse owner from hex string + ownerBytes, err := hex.DecodeString(wf.Owner) + if err != nil { + return WorkflowMetadataView{}, errors.New("invalid owner hex: " + err.Error()) + } + + // Parse attributes if present + var attributes []byte + if wf.Attributes != "" { + attributes = []byte(wf.Attributes) + } + + return WorkflowMetadataView{ + WorkflowID: workflowID, + Owner: ownerBytes, + CreatedAt: wf.CreatedAt, + Status: wf.Status, + WorkflowName: wf.WorkflowName, + BinaryURL: wf.BinaryURL, + ConfigURL: wf.ConfigURL, + Tag: wf.Tag, + Attributes: attributes, + DonFamily: wf.DonFamily, + Source: FileWorkflowSourceName, + }, nil +} + +// syntheticHead creates a synthetic head for the file source. +// Since file sources don't have blockchain blocks, we use the current timestamp. +func (f *FileWorkflowSource) syntheticHead() *commontypes.Head { + now := time.Now().Unix() + var timestamp uint64 + if now >= 0 { // satisfies overflow check on linter + timestamp = uint64(now) + } + return &commontypes.Head{ + Height: strconv.FormatInt(now, 10), + Hash: []byte("file-source"), + Timestamp: timestamp, + } +} diff --git a/core/services/workflows/syncer/v2/file_workflow_source_test.go b/core/services/workflows/syncer/v2/file_workflow_source_test.go new file mode 100644 index 00000000000..ea66f9ac046 --- /dev/null +++ b/core/services/workflows/syncer/v2/file_workflow_source_test.go @@ -0,0 +1,324 @@ +package v2 + +import ( + "context" + "encoding/hex" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestFileWorkflowSource_FileNotExists(t *testing.T) { + lggr := logger.TestLogger(t) + _, err := NewFileWorkflowSourceWithPath(lggr, "/nonexistent/path/workflows.json") + require.Error(t, err) + assert.Contains(t, err.Error(), "does not exist") +} + +func TestFileWorkflowSource_ListWorkflowMetadata_EmptyFile(t *testing.T) { + lggr := logger.TestLogger(t) + + // Create a temp file + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err := os.WriteFile(tmpFile, []byte(""), 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"workflow"}, + } + + workflows, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Empty(t, workflows) + assert.NotNil(t, head) +} + +func TestFileWorkflowSource_ListWorkflowMetadata_ValidFile(t *testing.T) { + lggr := logger.TestLogger(t) + + // Create workflow ID (32 bytes) + workflowID := make([]byte, 32) + for i := range workflowID { + workflowID[i] = byte(i) + } + + // Create owner (20 bytes for Ethereum address) + owner := make([]byte, 20) + for i := range owner { + owner[i] = byte(i + 100) + } + + sourceData := FileWorkflowSourceData{ + Workflows: []FileWorkflowMetadata{ + { + WorkflowID: hex.EncodeToString(workflowID), + Owner: hex.EncodeToString(owner), + CreatedAt: 1234567890, + Status: WorkflowStatusActive, + WorkflowName: "test-workflow", + BinaryURL: "file:///path/to/binary.wasm", + ConfigURL: "file:///path/to/config.json", + Tag: "v1.0.0", + DonFamily: "workflow", + }, + { + WorkflowID: hex.EncodeToString(workflowID), + Owner: hex.EncodeToString(owner), + CreatedAt: 1234567891, + Status: WorkflowStatusActive, + WorkflowName: "other-workflow", + BinaryURL: "file:///path/to/other.wasm", + ConfigURL: "file:///path/to/other.json", + Tag: "v2.0.0", + DonFamily: "other-don", // Different DON family + }, + }, + } + + data, err := json.Marshal(sourceData) + require.NoError(t, err) + + // Create a temp file + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err = os.WriteFile(tmpFile, data, 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"workflow"}, + } + + workflows, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, workflows, 1) // Only one matches the DON family + assert.NotNil(t, head) + + // Verify the workflow metadata + wf := workflows[0] + assert.Equal(t, "test-workflow", wf.WorkflowName) + assert.Equal(t, "file:///path/to/binary.wasm", wf.BinaryURL) + assert.Equal(t, "file:///path/to/config.json", wf.ConfigURL) + assert.Equal(t, "v1.0.0", wf.Tag) + assert.Equal(t, "workflow", wf.DonFamily) + assert.Equal(t, WorkflowStatusActive, wf.Status) + assert.Equal(t, uint64(1234567890), wf.CreatedAt) +} + +func TestFileWorkflowSource_ListWorkflowMetadata_MultipleDONFamilies(t *testing.T) { + lggr := logger.TestLogger(t) + + // Create workflow ID (32 bytes) + workflowID1 := make([]byte, 32) + workflowID2 := make([]byte, 32) + for i := range workflowID1 { + workflowID1[i] = byte(i) + workflowID2[i] = byte(i + 50) + } + + owner := make([]byte, 20) + for i := range owner { + owner[i] = byte(i + 100) + } + + sourceData := FileWorkflowSourceData{ + Workflows: []FileWorkflowMetadata{ + { + WorkflowID: hex.EncodeToString(workflowID1), + Owner: hex.EncodeToString(owner), + Status: WorkflowStatusActive, + WorkflowName: "workflow-a", + BinaryURL: "file:///a.wasm", + ConfigURL: "file:///a.json", + DonFamily: "family-a", + }, + { + WorkflowID: hex.EncodeToString(workflowID2), + Owner: hex.EncodeToString(owner), + Status: WorkflowStatusActive, + WorkflowName: "workflow-b", + BinaryURL: "file:///b.wasm", + ConfigURL: "file:///b.json", + DonFamily: "family-b", + }, + }, + } + + data, err := json.Marshal(sourceData) + require.NoError(t, err) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err = os.WriteFile(tmpFile, data, 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a", "family-b"}, + } + + workflows, _, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, workflows, 2) // Both workflows match +} + +func TestFileWorkflowSource_ListWorkflowMetadata_PausedWorkflow(t *testing.T) { + lggr := logger.TestLogger(t) + + workflowID := make([]byte, 32) + for i := range workflowID { + workflowID[i] = byte(i) + } + owner := make([]byte, 20) + + sourceData := FileWorkflowSourceData{ + Workflows: []FileWorkflowMetadata{ + { + WorkflowID: hex.EncodeToString(workflowID), + Owner: hex.EncodeToString(owner), + Status: WorkflowStatusPaused, // Paused status + WorkflowName: "paused-workflow", + BinaryURL: "file:///paused.wasm", + ConfigURL: "file:///paused.json", + DonFamily: "workflow", + }, + }, + } + + data, err := json.Marshal(sourceData) + require.NoError(t, err) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err = os.WriteFile(tmpFile, data, 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"workflow"}, + } + + workflows, _, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, workflows, 1) + assert.Equal(t, WorkflowStatusPaused, workflows[0].Status) +} + +func TestFileWorkflowSource_Name(t *testing.T) { + lggr := logger.TestLogger(t) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err := os.WriteFile(tmpFile, []byte("{}"), 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + assert.Equal(t, FileWorkflowSourceName, source.Name()) +} + +func TestFileWorkflowSource_Ready(t *testing.T) { + lggr := logger.TestLogger(t) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err := os.WriteFile(tmpFile, []byte("{}"), 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + assert.NoError(t, source.Ready()) + + // Delete the file and check Ready returns error + err = os.Remove(tmpFile) + require.NoError(t, err) + assert.Error(t, source.Ready()) +} + +func TestFileWorkflowSource_InvalidJSON(t *testing.T) { + lggr := logger.TestLogger(t) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err := os.WriteFile(tmpFile, []byte("invalid json"), 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"workflow"}, + } + + _, _, err = source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) +} + +func TestFileWorkflowSource_InvalidWorkflowID(t *testing.T) { + lggr := logger.TestLogger(t) + + owner := make([]byte, 20) + + sourceData := FileWorkflowSourceData{ + Workflows: []FileWorkflowMetadata{ + { + WorkflowID: "invalid-hex", + Owner: hex.EncodeToString(owner), + Status: WorkflowStatusActive, + WorkflowName: "invalid-workflow", + BinaryURL: "file:///invalid.wasm", + ConfigURL: "file:///invalid.json", + DonFamily: "workflow", + }, + }, + } + + data, err := json.Marshal(sourceData) + require.NoError(t, err) + + tmpDir := t.TempDir() + tmpFile := filepath.Join(tmpDir, "workflows.json") + err = os.WriteFile(tmpFile, data, 0600) + require.NoError(t, err) + + source, err := NewFileWorkflowSourceWithPath(lggr, tmpFile) + require.NoError(t, err) + + ctx := context.Background() + don := capabilities.DON{ + ID: 1, + Families: []string{"workflow"}, + } + + // Invalid workflows are skipped, not errored + workflows, _, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Empty(t, workflows) +} diff --git a/core/services/workflows/syncer/v2/grpc_workflow_source.go b/core/services/workflows/syncer/v2/grpc_workflow_source.go new file mode 100644 index 00000000000..fffdc77a0f5 --- /dev/null +++ b/core/services/workflows/syncer/v2/grpc_workflow_source.go @@ -0,0 +1,375 @@ +package v2 + +import ( + "context" + "errors" + "fmt" + "math/rand/v2" + "strconv" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + nodeauthjwt "github.com/smartcontractkit/chainlink-common/pkg/nodeauth/jwt" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink-common/pkg/workflows/grpcsource" + pb "github.com/smartcontractkit/chainlink-protos/workflows/go/sources" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/types" +) + +const ( + // GRPCWorkflowSourceName is the name used for logging and identification. + GRPCWorkflowSourceName = "GRPCWorkflowSource" + + // Default configuration values + defaultPageSize int64 = 1000 + defaultMaxRetries int = 2 + defaultRetryBaseDelay time.Duration = 100 * time.Millisecond + defaultRetryMaxDelay time.Duration = 5 * time.Second +) + +// grpcClient is an interface for the GRPC client to enable testing. +type grpcClient interface { + ListWorkflowMetadata(ctx context.Context, families []string, start, limit int64) ([]*pb.WorkflowMetadata, bool, error) + Close() error +} + +// GRPCWorkflowSource implements WorkflowMetadataSource by fetching from a GRPC server. +// This enables external systems to provide workflows for deployment. +type GRPCWorkflowSource struct { + lggr logger.Logger + client grpcClient + name string + pageSize int64 + maxRetries int + retryBaseDelay time.Duration + retryMaxDelay time.Duration + mu sync.RWMutex + ready bool +} + +// GRPCWorkflowSourceConfig holds configuration for creating a GRPCWorkflowSource. +type GRPCWorkflowSourceConfig struct { + // URL is the GRPC server address (e.g., "localhost:50051") + URL string + // Name is a human-readable identifier for this source + Name string + // TLSEnabled determines whether to use TLS for the connection + TLSEnabled bool + // JWTGenerator is the JWT generator for authentication (always enabled, matching billing/storage pattern) + JWTGenerator nodeauthjwt.JWTGenerator + // PageSize is the number of workflows to fetch per page (default: 1000) + PageSize int64 + // MaxRetries is the maximum number of retry attempts for transient errors (default: 2) + MaxRetries int + // RetryBaseDelay is the base delay for exponential backoff (default: 100ms) + RetryBaseDelay time.Duration + // RetryMaxDelay is the maximum delay between retries (default: 5s) + RetryMaxDelay time.Duration +} + +// NewGRPCWorkflowSource creates a new GRPC-based workflow source. +func NewGRPCWorkflowSource(lggr logger.Logger, cfg GRPCWorkflowSourceConfig) (*GRPCWorkflowSource, error) { + if cfg.URL == "" { + return nil, errors.New("GRPC URL is required") + } + + sourceName := cfg.Name + if sourceName == "" { + sourceName = GRPCWorkflowSourceName + } + + // Build client options - JWT auth is always enabled + clientOpts := []grpcsource.ClientOption{ + grpcsource.WithTLS(cfg.TLSEnabled), + } + if cfg.JWTGenerator != nil { + clientOpts = append(clientOpts, grpcsource.WithJWTGenerator(cfg.JWTGenerator)) + } + + client, err := grpcsource.NewClient(cfg.URL, sourceName, clientOpts...) + if err != nil { + return nil, err + } + + return newGRPCWorkflowSourceWithClient(lggr, client, cfg) +} + +// NewGRPCWorkflowSourceWithClient creates a new GRPC-based workflow source with an injected client. +// This is useful for testing with mock clients. +func NewGRPCWorkflowSourceWithClient(lggr logger.Logger, client grpcClient, cfg GRPCWorkflowSourceConfig) (*GRPCWorkflowSource, error) { + return newGRPCWorkflowSourceWithClient(lggr, client, cfg) +} + +func newGRPCWorkflowSourceWithClient(lggr logger.Logger, client grpcClient, cfg GRPCWorkflowSourceConfig) (*GRPCWorkflowSource, error) { + sourceName := cfg.Name + if sourceName == "" { + sourceName = GRPCWorkflowSourceName + } + + pageSize := cfg.PageSize + if pageSize <= 0 { + pageSize = defaultPageSize + } + + maxRetries := cfg.MaxRetries + if maxRetries <= 0 { + maxRetries = defaultMaxRetries + } + + retryBaseDelay := cfg.RetryBaseDelay + if retryBaseDelay <= 0 { + retryBaseDelay = defaultRetryBaseDelay + } + + retryMaxDelay := cfg.RetryMaxDelay + if retryMaxDelay <= 0 { + retryMaxDelay = defaultRetryMaxDelay + } + + return &GRPCWorkflowSource{ + lggr: lggr.Named(sourceName), + client: client, + name: sourceName, + pageSize: pageSize, + maxRetries: maxRetries, + retryBaseDelay: retryBaseDelay, + retryMaxDelay: retryMaxDelay, + ready: true, + }, nil +} + +// ListWorkflowMetadata fetches workflow metadata from the GRPC source. +// Pagination is handled internally - this method fetches all pages and returns all workflows. +// Transient errors (Unavailable, ResourceExhausted) are retried with exponential backoff. +// Returns a synthetic head since GRPC sources don't have blockchain state. +func (g *GRPCWorkflowSource) ListWorkflowMetadata(ctx context.Context, don capabilities.DON) ([]WorkflowMetadataView, *commontypes.Head, error) { + g.tryInitialize(ctx) + + g.mu.RLock() + defer g.mu.RUnlock() + + if !g.ready { + return nil, nil, errors.New("GRPC source not ready") + } + + var allViews []WorkflowMetadataView + var start int64 + + // Fetch all pages + for { + workflows, hasMore, err := g.fetchPageWithRetry(ctx, don.Families, start) + if err != nil { + return nil, nil, err + } + + // Convert workflows to views, skipping invalid ones + for _, wf := range workflows { + view, err := g.toWorkflowMetadataView(wf) + if err != nil { + g.lggr.Warnw("Failed to parse workflow metadata, skipping", + "workflowName", wf.GetWorkflowName(), + "error", err) + continue + } + allViews = append(allViews, view) + } + + // Check if we've fetched all pages + if !hasMore { + break + } + + // Move to next page + start += g.pageSize + } + + g.lggr.Debugw("Loaded workflows from GRPC source", + "count", len(allViews), + "donID", don.ID, + "donFamilies", don.Families) + + return allViews, g.syntheticHead(), nil +} + +// fetchPageWithRetry fetches a single page with retry logic for transient errors. +func (g *GRPCWorkflowSource) fetchPageWithRetry(ctx context.Context, families []string, start int64) ([]*pb.WorkflowMetadata, bool, error) { + var lastErr error + + for attempt := 0; attempt <= g.maxRetries; attempt++ { + // Check context before making request + if ctx.Err() != nil { + return nil, false, ctx.Err() + } + + workflows, hasMore, err := g.client.ListWorkflowMetadata(ctx, families, start, g.pageSize) + if err == nil { + return workflows, hasMore, nil + } + + lastErr = err + + // Check if this is a retryable error + if !g.isRetryableError(err) { + g.lggr.Errorw("Non-retryable error from GRPC source", + "error", err, + "start", start, + "pageSize", g.pageSize) + return nil, false, err + } + + // Log retry attempt + g.lggr.Warnw("Retryable error from GRPC source", + "error", err, + "attempt", attempt+1, + "maxRetries", g.maxRetries) + + // If we've exhausted retries, return the error + if attempt >= g.maxRetries { + g.lggr.Errorw("Max retries exceeded for GRPC request", + "error", err, + "maxRetries", g.maxRetries) + return nil, false, fmt.Errorf("max retries exceeded: %w", err) + } + + // Calculate backoff with jitter + backoff := g.calculateBackoff(attempt + 1) + + // Wait for backoff or context cancellation + select { + case <-ctx.Done(): + return nil, false, ctx.Err() + case <-time.After(backoff): + g.lggr.Debugw("Retrying GRPC request", + "attempt", attempt+1, + "delay", backoff, + "lastError", lastErr) + } + } + + return nil, false, lastErr +} + +// isRetryableError determines if an error should be retried. +func (g *GRPCWorkflowSource) isRetryableError(err error) bool { + st, ok := status.FromError(err) + if !ok { + return false + } + + switch st.Code() { + case codes.Unavailable, codes.ResourceExhausted: + return true + default: + return false + } +} + +// calculateBackoff calculates the backoff duration for a given attempt with jitter. +func (g *GRPCWorkflowSource) calculateBackoff(attempt int) time.Duration { + // Exponential backoff: baseDelay * 2^(attempt-1) + backoff := g.retryBaseDelay * time.Duration(1<<(attempt-1)) + + // Apply jitter (0.5 to 1.5 multiplier) - math/rand/v2 is auto-seeded and concurrent-safe + jitter := 0.5 + rand.Float64() //nolint:gosec // G404: weak random is fine for retry jitter + backoff = time.Duration(float64(backoff) * jitter) + + // Cap at max delay + if backoff > g.retryMaxDelay { + backoff = g.retryMaxDelay + } + + return backoff +} + +func (g *GRPCWorkflowSource) Name() string { + return g.name +} + +// Ready returns nil if the GRPC client is connected. +func (g *GRPCWorkflowSource) Ready() error { + g.mu.RLock() + defer g.mu.RUnlock() + + if !g.ready { + return errors.New("GRPC source not ready") + } + return nil +} + +// tryInitialize returns the current ready state (GRPC client initialized in constructor). +func (g *GRPCWorkflowSource) tryInitialize(_ context.Context) bool { + g.mu.RLock() + defer g.mu.RUnlock() + return g.ready +} + +// Close closes the underlying GRPC connection. +func (g *GRPCWorkflowSource) Close() error { + g.mu.Lock() + defer g.mu.Unlock() + + g.ready = false + if g.client != nil { + return g.client.Close() + } + return nil +} + +// toWorkflowMetadataView converts a protobuf WorkflowMetadata to a WorkflowMetadataView. +func (g *GRPCWorkflowSource) toWorkflowMetadataView(wf *pb.WorkflowMetadata) (WorkflowMetadataView, error) { + // Validate workflow ID length + workflowIDBytes := wf.GetWorkflowId() + if len(workflowIDBytes) != 32 { + return WorkflowMetadataView{}, fmt.Errorf("workflow_id must be 32 bytes, got %d", len(workflowIDBytes)) + } + var workflowID types.WorkflowID + copy(workflowID[:], workflowIDBytes) + + // Get owner bytes directly + ownerBytes := wf.GetOwner() + + // Get attributes directly (already bytes in proto) + attributes := wf.GetAttributes() + + // Safe conversion of status (uint32 to uint8) + statusVal := wf.GetStatus() + if statusVal > 255 { + return WorkflowMetadataView{}, fmt.Errorf("status value %d exceeds uint8 range", statusVal) + } + + return WorkflowMetadataView{ + WorkflowID: workflowID, + Owner: ownerBytes, + CreatedAt: wf.GetCreatedAt(), + Status: uint8(statusVal), + WorkflowName: wf.GetWorkflowName(), + BinaryURL: wf.GetBinaryUrl(), + ConfigURL: wf.GetConfigUrl(), + Tag: wf.GetTag(), + Attributes: attributes, + DonFamily: wf.GetDonFamily(), + Source: g.name, + }, nil +} + +// syntheticHead returns a synthetic head for GRPC sources. +// GRPC sources don't have blockchain state, so we generate a synthetic head +// with the current timestamp for consistency with the WorkflowMetadataSource interface. +func (g *GRPCWorkflowSource) syntheticHead() *commontypes.Head { + now := time.Now().Unix() + var timestamp uint64 + if now >= 0 { + timestamp = uint64(now) + } + return &commontypes.Head{ + Height: strconv.FormatInt(now, 10), + Hash: []byte("grpc-source"), + Timestamp: timestamp, + } +} diff --git a/core/services/workflows/syncer/v2/grpc_workflow_source_test.go b/core/services/workflows/syncer/v2/grpc_workflow_source_test.go new file mode 100644 index 00000000000..8baf740f5dd --- /dev/null +++ b/core/services/workflows/syncer/v2/grpc_workflow_source_test.go @@ -0,0 +1,521 @@ +package v2 + +import ( + "context" + "encoding/hex" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + "github.com/smartcontractkit/chainlink-common/pkg/workflows" + pb "github.com/smartcontractkit/chainlink-protos/workflows/go/sources" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// Test constants for workflow metadata +const ( + grpcTestOwnerHex = "0102030405060708091011121314151617181920" + grpcTestBinaryURL = "https://example.com/binary.wasm" + grpcTestConfigURL = "https://example.com/config.json" +) + +// grpcTestBinaryContent and grpcTestConfigContent are mock content used for canonical workflowID calculation +var ( + grpcTestBinaryContent = []byte("mock-wasm-binary-content") + grpcTestConfigContent = []byte("{}") +) + +// mockGRPCClient is a mock implementation of grpcClient for testing. +// It supports stateless pagination - callers provide all workflow data and the mock +// returns appropriate slices based on offset/limit parameters. +type mockGRPCClient struct { + // allWorkflows contains all workflows to be returned (used for stateless pagination) + allWorkflows []*pb.WorkflowMetadata + // err is the error to return (if set, takes precedence) + err error + // errSequence allows returning different errors on successive calls (for retry testing) + errSequence []error + // callCount tracks how many times ListWorkflowMetadata was called + callCount atomic.Int32 + // closed tracks if Close was called + closed bool + // closeErr is the error to return from Close + closeErr error +} + +func (m *mockGRPCClient) ListWorkflowMetadata(_ context.Context, _ []string, offset, limit int64) ([]*pb.WorkflowMetadata, bool, error) { + callNum := int(m.callCount.Add(1)) - 1 // 0-indexed call number + + // Check if there's a specific error for this call number + if callNum < len(m.errSequence) && m.errSequence[callNum] != nil { + return nil, false, m.errSequence[callNum] + } + + // Check for general error + if m.err != nil { + return nil, false, m.err + } + + // Stateless pagination based on offset/limit + start := int(offset) + if start >= len(m.allWorkflows) { + return []*pb.WorkflowMetadata{}, false, nil + } + + end := start + int(limit) + if end > len(m.allWorkflows) { + end = len(m.allWorkflows) + } + + hasMore := end < len(m.allWorkflows) + return m.allWorkflows[start:end], hasMore, nil +} + +func (m *mockGRPCClient) Close() error { + m.closed = true + return m.closeErr +} + +// CallCount returns the number of times ListWorkflowMetadata was called +func (m *mockGRPCClient) CallCount() int { + return int(m.callCount.Load()) +} + +// createTestProtoWorkflow creates a test protobuf WorkflowMetadata for testing. +// It uses the canonical workflow ID calculation to ensure test data is realistic. +func createTestProtoWorkflow(name string, family string) *pb.WorkflowMetadata { + owner, err := hex.DecodeString(grpcTestOwnerHex) + if err != nil { + panic("failed to decode owner hex: " + err.Error()) + } + + // Use canonical workflow ID calculation + workflowID, err := workflows.GenerateWorkflowID(owner, name, grpcTestBinaryContent, grpcTestConfigContent, "") + if err != nil { + panic("failed to generate workflow ID: " + err.Error()) + } + + return &pb.WorkflowMetadata{ + WorkflowId: workflowID[:], + Owner: owner, + CreatedAt: 1234567890, + Status: 0, // Active + WorkflowName: name, + BinaryUrl: grpcTestBinaryURL, + ConfigUrl: grpcTestConfigURL, + Tag: "v1.0.0", + Attributes: []byte("{}"), + DonFamily: family, + } +} + +func TestGRPCWorkflowSource_NewGRPCWorkflowSource_EmptyURL(t *testing.T) { + lggr := logger.TestLogger(t) + + _, err := NewGRPCWorkflowSource(lggr, GRPCWorkflowSourceConfig{ + URL: "", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "GRPC URL is required") +} + +func TestGRPCWorkflowSource_ListWorkflowMetadata_Success(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockClient := &mockGRPCClient{ + allWorkflows: []*pb.WorkflowMetadata{ + createTestProtoWorkflow("workflow-1", "family-a"), + createTestProtoWorkflow("workflow-2", "family-a"), + }, + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 2) + require.NotNil(t, head) + assert.NotEmpty(t, head.Height) + assert.Equal(t, []byte("grpc-source"), head.Hash) + assert.Equal(t, "workflow-1", wfs[0].WorkflowName) + assert.Equal(t, "workflow-2", wfs[1].WorkflowName) + assert.Equal(t, 1, mockClient.CallCount()) +} + +func TestGRPCWorkflowSource_ListWorkflowMetadata_Pagination(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // Configure mock with all workflows - pagination is handled stateless via offset/limit + mockClient := &mockGRPCClient{ + allWorkflows: []*pb.WorkflowMetadata{ + createTestProtoWorkflow("workflow-1", "family-a"), + createTestProtoWorkflow("workflow-2", "family-a"), + createTestProtoWorkflow("workflow-3", "family-a"), + }, + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + PageSize: 2, // Small page size to test pagination + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 3) // 2 from first page + 1 from second page + require.NotNil(t, head) + assert.NotEmpty(t, head.Height) + assert.Equal(t, 2, mockClient.CallCount()) // Two pages fetched +} + +func TestGRPCWorkflowSource_ListWorkflowMetadata_InvalidWorkflow(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // Create a workflow with invalid ID (not 32 bytes) + invalidWorkflow := &pb.WorkflowMetadata{ + WorkflowId: []byte{1, 2, 3}, // Invalid: only 3 bytes + WorkflowName: "invalid-workflow", + } + + mockClient := &mockGRPCClient{ + allWorkflows: []*pb.WorkflowMetadata{ + createTestProtoWorkflow("valid-workflow", "family-a"), + invalidWorkflow, + }, + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 1) // Only valid workflow is returned + assert.Equal(t, "valid-workflow", wfs[0].WorkflowName) + require.NotNil(t, head) + assert.NotEmpty(t, head.Height) +} + +func TestGRPCWorkflowSource_Retry_Unavailable(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // Use errSequence to return errors on first two calls, then succeed + mockClient := &mockGRPCClient{ + allWorkflows: []*pb.WorkflowMetadata{ + createTestProtoWorkflow("workflow-1", "family-a"), + }, + errSequence: []error{ + status.Error(codes.Unavailable, "server unavailable"), + status.Error(codes.Unavailable, "server unavailable"), + nil, // Third call succeeds + }, + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + MaxRetries: 2, + RetryBaseDelay: 1 * time.Millisecond, // Fast retries for testing + RetryMaxDelay: 10 * time.Millisecond, + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, head, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 1) + require.NotNil(t, head) + assert.NotEmpty(t, head.Height) + assert.Equal(t, 3, mockClient.CallCount()) // 2 failures + 1 success +} + +func TestGRPCWorkflowSource_Retry_ResourceExhausted(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockClient := &mockGRPCClient{ + allWorkflows: []*pb.WorkflowMetadata{ + createTestProtoWorkflow("workflow-1", "family-a"), + }, + errSequence: []error{ + status.Error(codes.ResourceExhausted, "rate limited"), + nil, // Second call succeeds + }, + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + MaxRetries: 2, + RetryBaseDelay: 1 * time.Millisecond, + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + wfs, _, err := source.ListWorkflowMetadata(ctx, don) + require.NoError(t, err) + assert.Len(t, wfs, 1) + assert.Equal(t, 2, mockClient.CallCount()) // 1 failure + 1 success +} + +func TestGRPCWorkflowSource_Retry_MaxExceeded(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + // Always return unavailable error + mockClient := &mockGRPCClient{ + err: status.Error(codes.Unavailable, "server unavailable"), + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + MaxRetries: 2, + RetryBaseDelay: 1 * time.Millisecond, + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + _, _, err = source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.Contains(t, err.Error(), "max retries") + assert.Equal(t, 3, mockClient.CallCount()) // 1 initial + 2 retries +} + +func TestGRPCWorkflowSource_Retry_NonRetryable(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + mockClient := &mockGRPCClient{ + err: status.Error(codes.InvalidArgument, "bad request"), + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + MaxRetries: 2, + RetryBaseDelay: 1 * time.Millisecond, + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + _, _, err = source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.Equal(t, 1, mockClient.CallCount()) // No retries for non-retryable errors +} + +func TestGRPCWorkflowSource_Backoff_Jitter(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{ + Name: "test-source", + RetryBaseDelay: 100 * time.Millisecond, + RetryMaxDelay: 2 * time.Second, + }) + require.NoError(t, err) + + // Test backoff calculation + backoff1 := source.calculateBackoff(1) + backoff2 := source.calculateBackoff(2) + backoff3 := source.calculateBackoff(3) + + // Backoff should increase exponentially (with jitter) + // Attempt 1: baseDelay * 2^0 * jitter = 100ms * 1 * [0.5, 1.5] = [50ms, 150ms] + assert.GreaterOrEqual(t, backoff1, 50*time.Millisecond) + assert.LessOrEqual(t, backoff1, 150*time.Millisecond) + + // Attempt 2: baseDelay * 2^1 * jitter = 100ms * 2 * [0.5, 1.5] = [100ms, 300ms] + assert.GreaterOrEqual(t, backoff2, 100*time.Millisecond) + assert.LessOrEqual(t, backoff2, 300*time.Millisecond) + + // Attempt 3: baseDelay * 2^2 * jitter = 100ms * 4 * [0.5, 1.5] = [200ms, 600ms] + assert.GreaterOrEqual(t, backoff3, 200*time.Millisecond) + assert.LessOrEqual(t, backoff3, 600*time.Millisecond) +} + +func TestGRPCWorkflowSource_ContextCancellation(t *testing.T) { + lggr := logger.TestLogger(t) + ctx, cancel := context.WithCancel(context.Background()) + + // Always return unavailable to trigger retries + mockClient := &mockGRPCClient{ + err: status.Error(codes.Unavailable, "server unavailable"), + } + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + MaxRetries: 5, // High retry count + RetryBaseDelay: 100 * time.Millisecond, + }) + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + // Cancel context immediately after first call + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + + _, _, err = source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.ErrorIs(t, err, context.Canceled) +} + +func TestGRPCWorkflowSource_ConfigDefaults(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{}) + require.NoError(t, err) + + // Verify defaults are applied + assert.Equal(t, defaultPageSize, source.pageSize) + assert.Equal(t, defaultMaxRetries, source.maxRetries) + assert.Equal(t, defaultRetryBaseDelay, source.retryBaseDelay) + assert.Equal(t, defaultRetryMaxDelay, source.retryMaxDelay) + assert.Equal(t, GRPCWorkflowSourceName, source.name) // Default name +} + +func TestGRPCWorkflowSource_Ready(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + // Initially ready + assert.NoError(t, source.Ready()) + + // After close, not ready + err = source.Close() + require.NoError(t, err) + assert.Error(t, source.Ready()) +} + +func TestGRPCWorkflowSource_ListWorkflowMetadata_NotReady(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := context.Background() + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + // Close the source to make it not ready + err = source.Close() + require.NoError(t, err) + + don := capabilities.DON{ + ID: 1, + Families: []string{"family-a"}, + } + + _, _, err = source.ListWorkflowMetadata(ctx, don) + require.Error(t, err) + assert.Contains(t, err.Error(), "not ready") +} + +func TestGRPCWorkflowSource_Close(t *testing.T) { + lggr := logger.TestLogger(t) + + mockClient := &mockGRPCClient{} + + source, err := NewGRPCWorkflowSourceWithClient(lggr, mockClient, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + // Initially ready + assert.NoError(t, source.Ready()) + assert.False(t, mockClient.closed) + + // Close + err = source.Close() + require.NoError(t, err) + + // Now not ready and client is closed + require.Error(t, source.Ready()) + assert.True(t, mockClient.closed) +} + +func TestGRPCWorkflowSource_Name(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{ + Name: "my-custom-source", + }) + require.NoError(t, err) + + assert.Equal(t, "my-custom-source", source.Name()) +} + +func TestGRPCWorkflowSource_Name_Default(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{}) + require.NoError(t, err) + + assert.Equal(t, GRPCWorkflowSourceName, source.Name()) +} + +func TestGRPCWorkflowSource_syntheticHead(t *testing.T) { + lggr := logger.TestLogger(t) + + source, err := NewGRPCWorkflowSourceWithClient(lggr, &mockGRPCClient{}, GRPCWorkflowSourceConfig{ + Name: "test-source", + }) + require.NoError(t, err) + + head := source.syntheticHead() + require.NotNil(t, head) + // Should return synthetic head with current timestamp + assert.NotEmpty(t, head.Height) + assert.Equal(t, []byte("grpc-source"), head.Hash) + assert.Positive(t, head.Timestamp) +} diff --git a/core/services/workflows/syncer/v2/handler.go b/core/services/workflows/syncer/v2/handler.go index e3040b6c880..3dfcc8d7230 100644 --- a/core/services/workflows/syncer/v2/handler.go +++ b/core/services/workflows/syncer/v2/handler.go @@ -209,6 +209,7 @@ func (h *eventHandler) Handle(ctx context.Context, event Event) error { platform.KeyOrganizationID, orgID, platform.WorkflowRegistryAddress, h.workflowRegistryAddress, platform.WorkflowRegistryChainSelector, h.workflowRegistryChainSelector, + platform.KeyWorkflowSource, payload.Source, ) var err error @@ -248,6 +249,7 @@ func (h *eventHandler) Handle(ctx context.Context, event Event) error { platform.KeyOrganizationID, orgID, platform.WorkflowRegistryAddress, h.workflowRegistryAddress, platform.WorkflowRegistryChainSelector, h.workflowRegistryChainSelector, + platform.KeyWorkflowSource, payload.Source, ) var err error @@ -299,6 +301,7 @@ func (h *eventHandler) Handle(ctx context.Context, event Event) error { platform.KeyOrganizationID, orgID, platform.WorkflowRegistryAddress, h.workflowRegistryAddress, platform.WorkflowRegistryChainSelector, h.workflowRegistryChainSelector, + platform.KeyWorkflowSource, payload.Source, ) var herr error @@ -395,7 +398,7 @@ func (h *eventHandler) workflowRegisteredEvent( return fmt.Errorf("could not clean up old engine: %w", cleanupErr) } - return h.tryEngineCreate(ctx, spec) + return h.tryEngineCreate(ctx, spec, payload.Source) } func toSpecStatus(s uint8) job.WorkflowSpecStatus { @@ -617,7 +620,7 @@ func (h *eventHandler) tryEngineCleanup(workflowID types.WorkflowID) error { // tryEngineCreate attempts to create a new workflow engine, start it, and register it with the engine registry. // This function waits for the engine to complete initialization (including trigger subscriptions) before returning, // ensuring that the workflowActivated event accurately reflects the deployment status including trigger registration. -func (h *eventHandler) tryEngineCreate(ctx context.Context, spec *job.WorkflowSpec) error { +func (h *eventHandler) tryEngineCreate(ctx context.Context, spec *job.WorkflowSpec, source string) error { // Ensure the capabilities registry is ready before creating any Engine instances. // This should be guaranteed by the Workflow Registry Syncer. if err := h.ensureCapRegistryReady(ctx); err != nil { @@ -702,8 +705,8 @@ func (h *eventHandler) tryEngineCreate(ctx context.Context, spec *job.WorkflowSp } } - // Engine is fully initialized, add to registry - if err := h.engineRegistry.Add(wid, engine); err != nil { + // Engine is fully initialized, add to registry with source tracking + if err := h.engineRegistry.Add(wid, source, engine); err != nil { if closeErr := engine.Close(); closeErr != nil { return fmt.Errorf("failed to close workflow engine: %w during invariant violation: %w", closeErr, err) } diff --git a/core/services/workflows/syncer/v2/handler_test.go b/core/services/workflows/syncer/v2/handler_test.go index cad1ecd89a8..e61b7a7e84c 100644 --- a/core/services/workflows/syncer/v2/handler_test.go +++ b/core/services/workflows/syncer/v2/handler_test.go @@ -357,7 +357,7 @@ func Test_workflowRegisteredHandler(t *testing.T) { }, validationFn: func(t *testing.T, ctx context.Context, event WorkflowRegisteredEvent, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID types.WorkflowID, fetcher *mockFetcher, binaryURL string, configURL string) { me := &mockEngine{} - err := h.engineRegistry.Add(wfID, me) + err := h.engineRegistry.Add(wfID, event.Source, me) require.NoError(t, err) err = h.workflowRegisteredEvent(ctx, event) require.NoError(t, err) @@ -396,7 +396,7 @@ func Test_workflowRegisteredHandler(t *testing.T) { validationFn: func(t *testing.T, ctx context.Context, event WorkflowRegisteredEvent, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID types.WorkflowID, fetcher *mockFetcher, binaryURL string, configURL string) { me := &mockEngine{} oldWfIDBytes := [32]byte{0, 1, 2, 3, 5} - err := h.engineRegistry.Add(oldWfIDBytes, me) + err := h.engineRegistry.Add(oldWfIDBytes, event.Source, me) require.NoError(t, err) err = h.workflowRegisteredEvent(ctx, event) require.NoError(t, err) diff --git a/core/services/workflows/syncer/v2/metrics.go b/core/services/workflows/syncer/v2/metrics.go index 09703e45453..8ebc98adaa3 100644 --- a/core/services/workflows/syncer/v2/metrics.go +++ b/core/services/workflows/syncer/v2/metrics.go @@ -16,6 +16,12 @@ type metrics struct { fetchedWorkflows metric.Int64Gauge runningWorkflows metric.Int64Gauge completedSyncs metric.Int64Counter + + // Per-source metrics for multi-source observability + sourceHealth metric.Int64Gauge // 1=healthy, 0=unhealthy per source + workflowsPerSource metric.Int64Gauge // workflows fetched per source + sourceFetchDuration metric.Int64Histogram // fetch latency per source + sourceFetchErrors metric.Int64Counter // error count per source } func (m *metrics) recordHandleDuration(ctx context.Context, d time.Duration, event string, success bool) { @@ -37,6 +43,25 @@ func (m *metrics) incrementCompletedSyncs(ctx context.Context) { m.completedSyncs.Add(ctx, 1) } +// recordSourceFetch records metrics for a source fetch operation. +func (m *metrics) recordSourceFetch(ctx context.Context, sourceName string, workflowCount int, duration time.Duration, err error) { + attrs := metric.WithAttributes(attribute.String("source", sourceName)) + + // Record fetch duration + m.sourceFetchDuration.Record(ctx, duration.Milliseconds(), attrs) + + // Record workflow count per source + m.workflowsPerSource.Record(ctx, int64(workflowCount), attrs) + + // Record health status (1=healthy, 0=unhealthy) + if err != nil { + m.sourceHealth.Record(ctx, 0, attrs) + m.sourceFetchErrors.Add(ctx, 1, attrs) + } else { + m.sourceHealth.Record(ctx, 1, attrs) + } +} + func newMetrics() (*metrics, error) { handleDuration, err := beholder.GetMeter().Int64Histogram("platform_workflow_registry_syncer_handler_duration_ms") if err != nil { @@ -58,10 +83,35 @@ func newMetrics() (*metrics, error) { return nil, err } + // Per-source metrics + sourceHealth, err := beholder.GetMeter().Int64Gauge("platform_workflow_registry_syncer_source_health") + if err != nil { + return nil, err + } + + workflowsPerSource, err := beholder.GetMeter().Int64Gauge("platform_workflow_registry_syncer_workflows_per_source") + if err != nil { + return nil, err + } + + sourceFetchDuration, err := beholder.GetMeter().Int64Histogram("platform_workflow_registry_syncer_source_fetch_duration_ms") + if err != nil { + return nil, err + } + + sourceFetchErrors, err := beholder.GetMeter().Int64Counter("platform_workflow_registry_syncer_source_fetch_errors_total") + if err != nil { + return nil, err + } + return &metrics{ - handleDuration: handleDuration, - fetchedWorkflows: fetchedWorkflows, - runningWorkflows: runningWorkflows, - completedSyncs: completedSyncs, + handleDuration: handleDuration, + fetchedWorkflows: fetchedWorkflows, + runningWorkflows: runningWorkflows, + completedSyncs: completedSyncs, + sourceHealth: sourceHealth, + workflowsPerSource: workflowsPerSource, + sourceFetchDuration: sourceFetchDuration, + sourceFetchErrors: sourceFetchErrors, }, nil } diff --git a/core/services/workflows/syncer/v2/types.go b/core/services/workflows/syncer/v2/types.go index 01e8e576cfd..60e9a9075ff 100644 --- a/core/services/workflows/syncer/v2/types.go +++ b/core/services/workflows/syncer/v2/types.go @@ -4,6 +4,8 @@ import ( "context" "math/big" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" ghcapabilities "github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/capabilities" "github.com/smartcontractkit/chainlink/v2/core/services/workflows/types" ) @@ -50,6 +52,9 @@ type WorkflowMetadataView struct { Tag string Attributes []byte DonFamily string + // Source identifies where this workflow metadata came from + // e.g., "ContractWorkflowSource", "GRPCWorkflowSource", "FileWorkflowSource" + Source string } type GetWorkflowListByDONParams struct { @@ -95,6 +100,7 @@ type WorkflowRegisteredEvent struct { ConfigURL string Tag string Attributes []byte + Source string // source that provided this workflow metadata } type WorkflowActivatedEvent struct { @@ -108,6 +114,7 @@ type WorkflowActivatedEvent struct { ConfigURL string Tag string Attributes []byte + Source string // source that provided this workflow metadata } type WorkflowPausedEvent struct { @@ -121,8 +128,24 @@ type WorkflowPausedEvent struct { ConfigURL string Tag string Attributes []byte + Source string } type WorkflowDeletedEvent struct { WorkflowID types.WorkflowID + Source string +} + +// WorkflowMetadataSource is an interface for fetching workflow metadata from various sources. +// This abstraction allows the workflow registry syncer to aggregate workflows from multiple +// sources (e.g., on-chain contract, file-based, API-based) while treating them uniformly. +type WorkflowMetadataSource interface { + // ListWorkflowMetadata returns all workflow metadata for the given DON. + ListWorkflowMetadata(ctx context.Context, don capabilities.DON) ([]WorkflowMetadataView, *commontypes.Head, error) + + // Name returns a human-readable name for this source. + Name() string + + // Ready returns nil if the source is ready to be queried. + Ready() error } diff --git a/core/services/workflows/syncer/v2/workflow_registry.go b/core/services/workflows/syncer/v2/workflow_registry.go index 382c0ec254b..1166e611e76 100644 --- a/core/services/workflows/syncer/v2/workflow_registry.go +++ b/core/services/workflows/syncer/v2/workflow_registry.go @@ -9,12 +9,14 @@ import ( "io" "maps" "math/big" + "strings" "sync" "time" "github.com/jonboulle/clockwork" "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + nodeauthjwt "github.com/smartcontractkit/chainlink-common/pkg/nodeauth/jwt" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" @@ -76,7 +78,14 @@ type workflowRegistry struct { allowListedMu sync.RWMutex contractReaderFn versioning.ContractReaderFactory - contractReader types.ContractReader + + // contractReader is used exclusively for fetching allowlisted requests from the WorkflowRegistry + // contract. This data is consumed by Vault DON nodes to authorize incoming vault requests. + // Workflow metadata is fetched separately via workflowSources (see below). + contractReader types.ContractReader + + // workflowSources holds workflow metadata sources (contract, file, gRPC). + workflowSources []WorkflowMetadataSource config Config @@ -122,7 +131,82 @@ func WithRetryInterval(retryInterval time.Duration) func(*workflowRegistry) { } } +// AlternativeSourceConfig holds configuration for a GRPC workflow source. +type AlternativeSourceConfig struct { + URL string + Name string + TLSEnabled bool + JWTGenerator nodeauthjwt.JWTGenerator +} + +// WithAlternativeSources adds alternative workflow sources to the registry. +// Sources are detected by URL scheme: +// - file:// prefix -> FileWorkflowSource (reads from local JSON file) +// - Otherwise -> GRPCWorkflowSource (connects to GRPC server) +// +// These sources supplement or replace the primary contract source. +func WithAlternativeSources(sources []AlternativeSourceConfig) func(*workflowRegistry) { + return func(wr *workflowRegistry) { + successCount := 0 + failedSources := []string{} + + for _, src := range sources { + // Detect source type by URL scheme + if strings.HasPrefix(src.URL, "file://") { + // File source - extract path from file:// URL + filePath := strings.TrimPrefix(src.URL, "file://") + fileSource, err := NewFileWorkflowSourceWithPath(wr.lggr, filePath) + if err != nil { + wr.lggr.Errorw("Failed to create file workflow source", + "name", src.Name, + "path", filePath, + "error", err) + failedSources = append(failedSources, src.Name) + continue + } + wr.workflowSources = append(wr.workflowSources, fileSource) + successCount++ + wr.lggr.Infow("Added file workflow source", + "name", src.Name, + "path", filePath) + } else { + // GRPC source (default) + grpcSource, err := NewGRPCWorkflowSource(wr.lggr, GRPCWorkflowSourceConfig{ + URL: src.URL, + TLSEnabled: src.TLSEnabled, + Name: src.Name, + JWTGenerator: src.JWTGenerator, + }) + if err != nil { + wr.lggr.Errorw("Failed to create GRPC workflow source", + "name", src.Name, + "url", src.URL, + "error", err) + failedSources = append(failedSources, src.Name) + continue + } + wr.workflowSources = append(wr.workflowSources, grpcSource) + successCount++ + wr.lggr.Infow("Added GRPC workflow source", + "name", src.Name, + "url", src.URL, + "tls", src.TLSEnabled) + } + } + + // Log summary if any sources failed to initialize + if len(failedSources) > 0 { + wr.lggr.Warnw("Some alternative sources failed to initialize", + "expected", len(sources), + "active", successCount, + "failed", failedSources) + } + } +} + // NewWorkflowRegistry returns a new v2 workflowRegistry. +// The addr parameter is optional - if empty, no contract source will be created, +// enabling pure GRPC-only or file-only workflow deployments. func NewWorkflowRegistry( lggr logger.Logger, contractReaderFn versioning.ContractReaderFactory, @@ -142,6 +226,18 @@ func NewWorkflowRegistry( return nil, err } + var workflowSources []WorkflowMetadataSource + + // Only add contract source if address is configured + if addr != "" { + contractSource := NewContractWorkflowSource(lggr, contractReaderFn, addr) + workflowSources = append(workflowSources, contractSource) + lggr.Infow("Added contract workflow source", + "contractAddress", addr) + } else { + lggr.Infow("No contract address configured, skipping contract workflow source") + } + wr := &workflowRegistry{ lggr: lggr, contractReaderFn: contractReaderFn, @@ -159,12 +255,17 @@ func NewWorkflowRegistry( hooks: Hooks{ OnStartFailure: func(_ error) {}, }, + workflowSources: workflowSources, } for _, opt := range opts { opt(wr) } + lggr.Infow("Initialized workflow registry with multi-source support", + "sourceCount", len(wr.workflowSources), + "hasContractSource", addr != "") + switch wr.config.SyncStrategy { case SyncStrategyReconciliation: break @@ -194,10 +295,9 @@ func (w *workflowRegistry) Start(_ context.Context) error { w.lggr.Debug("shutting down workflowregistry, %s", ctx.Err()) return case <-ticker: - // Async initialization of contract reader because there is an on-chain - // call dependency. Blocking on initialization results in a - // deadlock. Instead, wait until the contract reader is ready. - reader, err := w.newWorkflowRegistryContractReader(ctx) + // Async initialization of contract reader for allowlisted requests. + // Blocking on initialization results in a deadlock, so we poll until ready. + reader, err := w.newAllowlistedRequestsContractReader(ctx) if err != nil { w.lggr.Infow("contract reader unavailable", "error", err.Error()) break @@ -280,12 +380,19 @@ func toLocalHead(head *types.Head) Head { } } -// generateReconciliationEvents compares the workflow registry workflow metadata state against the engine registry's state. -// Differences are handled by the event handler by creating events that are sent to the events channel for handling. -func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendingEvents map[string]*reconciliationEvent, workflowMetadata []WorkflowMetadataView, head *types.Head) ([]*reconciliationEvent, error) { +// generateReconciliationEvents compares workflow metadata from a specific source against the engine registry's state. +// It only considers engines from the specified source when determining deletions. This ensures that when a source +// fails to fetch, we don't incorrectly delete engines from other sources. +func (w *workflowRegistry) generateReconciliationEvents( + _ context.Context, + pendingEvents map[string]*reconciliationEvent, + workflowMetadata []WorkflowMetadataView, + head *types.Head, + sourceName string, +) ([]*reconciliationEvent, error) { var events []*reconciliationEvent localHead := toLocalHead(head) - // workflowMetadataMap is only used for lookups; disregard when reading the state machine. + // workflowMetadataMap is only used for lookups workflowMetadataMap := make(map[string]WorkflowMetadataView) for _, wfMeta := range workflowMetadata { workflowMetadataMap[wfMeta.WorkflowID.Hex()] = wfMeta @@ -300,8 +407,6 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi switch wfMeta.Status { case WorkflowStatusActive: switch engineFound { - // we can't tell the difference between an activation and registration without holding - // state in the db; so we handle as an activation event. case false: signature := fmt.Sprintf("%s-%s-%s", WorkflowActivated, id, toSpecStatus(wfMeta.Status)) @@ -323,20 +428,19 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi ConfigURL: wfMeta.ConfigURL, Tag: wfMeta.Tag, Attributes: wfMeta.Attributes, + Source: wfMeta.Source, } events = append(events, &reconciliationEvent{ Event: Event{ Data: toActivatedEvent, Name: WorkflowActivated, Head: localHead, - Info: fmt.Sprintf("[ID: %s, Name: %s, Owner: %s]", wfMeta.WorkflowID.Hex(), wfMeta.WorkflowName, hex.EncodeToString(wfMeta.Owner)), + Info: fmt.Sprintf("[ID: %s, Name: %s, Owner: %s, Source: %s]", wfMeta.WorkflowID.Hex(), wfMeta.WorkflowName, hex.EncodeToString(wfMeta.Owner), sourceName), }, signature: signature, id: id, }) workflowsSeen[id] = true - // if the workflow is active, the workflow engine is in the engine registry, and the metadata has not changed - // then we don't need to action the event further. Mark as seen and continue. case true: workflowsSeen[id] = true } @@ -344,17 +448,10 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi signature := fmt.Sprintf("%s-%s-%s", WorkflowPaused, id, toSpecStatus(wfMeta.Status)) switch engineFound { case false: - // Account for a state change from active to paused, by checking - // whether an existing pendingEvent exists. - // We do this regardless of whether we have an event to handle or not, since this ensures - // we correctly handle the state of pending events in the following situation: - // - we registered an active workflow, but it failed to process successfully - // - we then paused the workflow; this should clear the pending event if _, ok := pendingEvents[id]; ok && pendingEvents[id].signature != signature { delete(pendingEvents, id) } case true: - // Will be handled in the event handler as a deleted event and will clear the DB workflow spec. workflowsSeen[id] = true if _, ok := pendingEvents[id]; ok && pendingEvents[id].signature == signature { @@ -371,6 +468,7 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi CreatedAt: wfMeta.CreatedAt, Status: wfMeta.Status, WorkflowName: wfMeta.WorkflowName, + Source: wfMeta.Source, } events = append( []*reconciliationEvent{ @@ -379,7 +477,7 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi Data: toPausedEvent, Name: WorkflowPaused, Head: localHead, - Info: fmt.Sprintf("[ID: %s, Name: %s, Owner: %s]", wfMeta.WorkflowID.Hex(), wfMeta.WorkflowName, hex.EncodeToString(wfMeta.Owner)), + Info: fmt.Sprintf("[ID: %s, Name: %s, Owner: %s, Source: %s]", wfMeta.WorkflowID.Hex(), wfMeta.WorkflowName, hex.EncodeToString(wfMeta.Owner), sourceName), }, signature: signature, id: id, @@ -394,8 +492,8 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi } // Shut down engines that are no longer in the contract's latest workflow metadata state - allEngines := w.engineRegistry.GetAll() - for _, engine := range allEngines { + sourceEngines := w.engineRegistry.GetBySource(sourceName) + for _, engine := range sourceEngines { id := engine.WorkflowID.Hex() if !workflowsSeen[id] { signature := fmt.Sprintf("%s-%s", WorkflowDeleted, id) @@ -410,6 +508,7 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi toDeletedEvent := WorkflowDeletedEvent{ WorkflowID: engine.WorkflowID, + Source: sourceName, } events = append( []*reconciliationEvent{ @@ -418,7 +517,7 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi Data: toDeletedEvent, Name: WorkflowDeleted, Head: localHead, - Info: fmt.Sprintf("[ID: %s]", id), + Info: fmt.Sprintf("[ID: %s, Source: %s]", id, sourceName), }, signature: signature, id: id, @@ -430,7 +529,7 @@ func (w *workflowRegistry) generateReconciliationEvents(_ context.Context, pendi } // Clean up create events which no longer need to be attempted because - // the workflow no longer exists in the workflow registry contract + // the workflow no longer exists in this source's metadata for id, event := range pendingEvents { if event.Name == WorkflowActivated { if _, ok := workflowMetadataMap[event.Data.(WorkflowActivatedEvent).WorkflowID.Hex()]; !ok { @@ -490,9 +589,10 @@ func (w *workflowRegistry) syncAllowlistedRequests(ctx context.Context) { // syncUsingReconciliationStrategy syncs workflow registry contract state by polling the workflow metadata state and comparing to local state. // NOTE: In this mode paused states will be treated as a deleted workflow. Workflows will not be registered as paused. +// This function processes each source independently to ensure that failure in one source doesn't affect workflows from other sources. func (w *workflowRegistry) syncUsingReconciliationStrategy(ctx context.Context) { ticker := w.getTicker(defaultTickInterval) - pendingEvents := map[string]*reconciliationEvent{} + pendingEventsBySource := make(map[string]map[string]*reconciliationEvent) w.lggr.Debug("running readRegistryStateLoop") for { select { @@ -505,54 +605,87 @@ func (w *workflowRegistry) syncUsingReconciliationStrategy(ctx context.Context) w.lggr.Errorw("failed to get get don from notifier", "err", err) continue } - w.lggr.Debugw("fetching workflow registry metadata", "don", don.Families) - allWorkflowsMetadata, head, err := w.getAllWorkflowsMetadata(ctx, don, w.contractReader) - if err != nil { - w.lggr.Errorw("failed to get registry state", "err", err) - continue - } - w.metrics.recordFetchedWorkflows(ctx, len(allWorkflowsMetadata)) - w.lggr.Debugw("preparing events to reconcile", "numWorkflows", len(allWorkflowsMetadata), "blockHeight", head.Height, "numPendingEvents", len(pendingEvents)) - events, err := w.generateReconciliationEvents(ctx, pendingEvents, allWorkflowsMetadata, head) - if err != nil { - w.lggr.Errorw("failed to generate reconciliation events", "err", err) - continue - } - w.lggr.Debugw("generated events to reconcile", "num", len(events), "events", events) + w.lggr.Debugw("fetching workflow metadata from all sources", "don", don.Families) - pendingEvents = map[string]*reconciliationEvent{} - - // Send events generated from differences to the handler + // Process each source independently to isolate failures + totalWorkflowsFetched := 0 reconcileReport := newReconcileReport() - for _, event := range events { - select { - case <-ctx.Done(): - w.lggr.Debug("readRegistryStateLoop stopped during processing") - return - default: - w.lggr.Debugw("processing event", "event", event.Name, "id", event.id, "signature", event.signature, "workflowInfo", event.Info) - reconcileReport.NumEventsByType[string(event.Name)]++ - if event.retryCount == 0 || w.clock.Now().After(event.nextRetryAt) { - err := w.handleWithMetrics(ctx, event.Event) - if err != nil { - event.updateNextRetryFor(w.clock, w.retryInterval, w.maxRetryInterval) + for _, source := range w.workflowSources { + sourceName := source.Name() + + // Initialize pending events for this source if needed + if pendingEventsBySource[sourceName] == nil { + pendingEventsBySource[sourceName] = make(map[string]*reconciliationEvent) + } + pendingEvents := pendingEventsBySource[sourceName] + + // Fetch workflows from this source (each source handles lazy initialization internally) + start := time.Now() + workflows, head, fetchErr := source.ListWorkflowMetadata(ctx, don) + duration := time.Since(start) + + // Record metrics for this source fetch + w.metrics.recordSourceFetch(ctx, sourceName, len(workflows), duration, fetchErr) + + if fetchErr != nil { + w.lggr.Errorw("Failed to fetch from source, skipping reconciliation for this source", + "source", sourceName, "error", fetchErr, "durationMs", duration.Milliseconds()) + // KEY: Skip this source entirely - no events generated, no deletions + continue + } + + totalWorkflowsFetched += len(workflows) + w.lggr.Debugw("Fetched workflows from source", + "source", sourceName, + "count", len(workflows), + "durationMs", duration.Milliseconds()) + + // Generate events only for this source's engines + events, genErr := w.generateReconciliationEvents(ctx, pendingEvents, workflows, head, sourceName) + if genErr != nil { + w.lggr.Errorw("Failed to generate reconciliation events for source", + "source", sourceName, "error", genErr) + continue + } + + w.lggr.Debugw("Generated events for source", "source", sourceName, "num", len(events)) - pendingEvents[event.id] = event + // Clear pending events after successful reconciliation + pendingEventsBySource[sourceName] = make(map[string]*reconciliationEvent) + + // Handle events (shared handler) + for _, event := range events { + select { + case <-ctx.Done(): + w.lggr.Debug("readRegistryStateLoop stopped during processing") + return + default: + w.lggr.Debugw("processing event", "source", sourceName, "event", event.Name, "id", event.id, "signature", event.signature, "workflowInfo", event.Info) + reconcileReport.NumEventsByType[string(event.Name)]++ + + if event.retryCount == 0 || w.clock.Now().After(event.nextRetryAt) { + handleErr := w.handleWithMetrics(ctx, event.Event) + if handleErr != nil { + event.updateNextRetryFor(w.clock, w.retryInterval, w.maxRetryInterval) + + pendingEventsBySource[sourceName][event.id] = event + + reconcileReport.Backoffs[event.id] = event.nextRetryAt + w.lggr.Errorw("failed to handle event, backing off...", "err", handleErr, "type", event.Name, "nextRetryAt", event.nextRetryAt, "retryCount", event.retryCount, "workflowInfo", event.Info) + } + } else { + // It's not ready to execute yet, let's put it back on the pending queue. + pendingEventsBySource[sourceName][event.id] = event reconcileReport.Backoffs[event.id] = event.nextRetryAt - w.lggr.Errorw("failed to handle event, backing off...", "err", err, "type", event.Name, "nextRetryAt", event.nextRetryAt, "retryCount", event.retryCount, "workflowInfo", event.Info) + w.lggr.Debugw("skipping event, still in backoff", "nextRetryAt", event.nextRetryAt, "event", event.Name, "id", event.id, "signature", event.signature, "workflowInfo", event.Info) } - } else { - // It's not ready to execute yet, let's put it back on the pending queue. - pendingEvents[event.id] = event - - reconcileReport.Backoffs[event.id] = event.nextRetryAt - w.lggr.Debugw("skipping event, still in backoff", "nextRetryAt", event.nextRetryAt, "event", event.Name, "id", event.id, "signature", event.signature, "workflowInfo", event.Info) } } } + w.metrics.recordFetchedWorkflows(ctx, totalWorkflowsFetched) w.lggr.Debugw("reconciled events", "report", reconcileReport) runningWorkflows := w.engineRegistry.GetAll() @@ -578,35 +711,15 @@ func isEmptyWorkflowID(wfID [32]byte) bool { return wfID == emptyID } -// validateWorkflowMetadata logs warnings for incomplete workflow metadata from contract -func validateWorkflowMetadata(wfMeta workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView, lggr logger.Logger) { - if isEmptyWorkflowID(wfMeta.WorkflowId) { - lggr.Warnw("Workflow has empty WorkflowID from contract", - "workflowName", wfMeta.WorkflowName, - "owner", hex.EncodeToString(wfMeta.Owner.Bytes()), - "binaryURL", wfMeta.BinaryUrl, - "configURL", wfMeta.ConfigUrl) - } - - if len(wfMeta.Owner.Bytes()) == 0 { - lggr.Warnw("Workflow has empty Owner from contract", - "workflowID", hex.EncodeToString(wfMeta.WorkflowId[:]), - "workflowName", wfMeta.WorkflowName, - "binaryURL", wfMeta.BinaryUrl, - "configURL", wfMeta.ConfigUrl) - } - - if wfMeta.BinaryUrl == "" || wfMeta.ConfigUrl == "" { - lggr.Warnw("Workflow has empty BinaryURL or ConfigURL from contract", - "workflowID", hex.EncodeToString(wfMeta.WorkflowId[:]), - "workflowName", wfMeta.WorkflowName, - "owner", hex.EncodeToString(wfMeta.Owner.Bytes()), - "binaryURL", wfMeta.BinaryUrl, - "configURL", wfMeta.ConfigUrl) - } -} - -func (w *workflowRegistry) newWorkflowRegistryContractReader( +// newAllowlistedRequestsContractReader creates a contract reader specifically for fetching +// allowlisted requests from the WorkflowRegistry contract. This is used by Vault DON nodes +// to verify that incoming vault requests have been pre-authorized on-chain by workflow owners. +// +// Note: Workflow metadata is fetched separately via ContractWorkflowSource, which maintains +// its own contract reader. The two concerns are separated because: +// - Allowlisted requests: Used by Vault DON for request authorization +// - Workflow metadata: Used by workflow engine for deployment/reconciliation +func (w *workflowRegistry) newAllowlistedRequestsContractReader( ctx context.Context, ) (types.ContractReader, error) { contractReaderCfg := config.ChainReaderConfig{ @@ -614,10 +727,6 @@ func (w *workflowRegistry) newWorkflowRegistryContractReader( WorkflowRegistryContractName: { ContractABI: workflow_registry_wrapper_v2.WorkflowRegistryABI, Configs: map[string]*config.ChainReaderDefinition{ - GetWorkflowsByDONMethodName: { - ChainSpecificName: GetWorkflowsByDONMethodName, - ReadType: config.Method, - }, GetActiveAllowlistedRequestsReverseMethodName: { ChainSpecificName: GetActiveAllowlistedRequestsReverseMethodName, ReadType: config.Method, @@ -658,75 +767,6 @@ func (w *workflowRegistry) newWorkflowRegistryContractReader( return reader, nil } -// getAllWorkflowsMetadata uses contract reader to query the WorkflowRegistry contract using the method getWorkflowListByDON. -// It gets metadata for all workflows assigned to any of current DON's families. -func (w *workflowRegistry) getAllWorkflowsMetadata(ctx context.Context, don capabilities.DON, contractReader types.ContractReader) ([]WorkflowMetadataView, *types.Head, error) { - if contractReader == nil { - return nil, nil, errors.New("cannot fetch workflow metadata: nil contract reader") - } - contractBinding := types.BoundContract{ - Address: w.workflowRegistryAddress, - Name: WorkflowRegistryContractName, - } - - readIdentifier := contractBinding.ReadIdentifier(GetWorkflowsByDONMethodName) - var headAtLastRead *types.Head - var allWorkflows []WorkflowMetadataView - - for _, family := range don.Families { - params := GetWorkflowListByDONParams{ - DonFamily: family, - Start: big.NewInt(0), - Limit: big.NewInt(MaxResultsPerQuery), - } - - for { - var err error - var workflows struct { - List []workflow_registry_wrapper_v2.WorkflowRegistryWorkflowMetadataView - } - - headAtLastRead, err = contractReader.GetLatestValueWithHeadData(ctx, readIdentifier, primitives.Finalized, params, &workflows) - if err != nil { - return []WorkflowMetadataView{}, &types.Head{Height: "0"}, fmt.Errorf("failed to get lastest value with head data %w", err) - } - - for _, wfMeta := range workflows.List { - // Log warnings for incomplete metadata but don't skip processing - validateWorkflowMetadata(wfMeta, w.lggr) - - // TODO: https://smartcontract-it.atlassian.net/browse/CAPPL-1021 load balance across workflow nodes in DON Family - allWorkflows = append(allWorkflows, WorkflowMetadataView{ - WorkflowID: wfMeta.WorkflowId, - Owner: wfMeta.Owner.Bytes(), - CreatedAt: wfMeta.CreatedAt, - Status: wfMeta.Status, - WorkflowName: wfMeta.WorkflowName, - BinaryURL: wfMeta.BinaryUrl, - ConfigURL: wfMeta.ConfigUrl, - Tag: wfMeta.Tag, - Attributes: wfMeta.Attributes, - DonFamily: wfMeta.DonFamily, - }) - } - - // if less workflows than limit, then we have reached the end of the list - if int64(len(workflows.List)) < MaxResultsPerQuery { - break - } - - // otherwise, increment the start parameter and continue to fetch more workflows - params.Start.Add(params.Start, big.NewInt(int64(len(workflows.List)))) - } - } - - if headAtLastRead == nil { - return allWorkflows, &types.Head{Height: "0"}, nil - } - - return allWorkflows, headAtLastRead, nil -} - func (w *workflowRegistry) GetAllowlistedRequests(_ context.Context) []workflow_registry_wrapper_v2.WorkflowRegistryOwnerAllowlistedRequest { w.allowListedMu.RLock() defer w.allowListedMu.RUnlock() diff --git a/core/services/workflows/syncer/v2/workflow_registry_test.go b/core/services/workflows/syncer/v2/workflow_registry_test.go index 5bc072ba7cf..d2b5a232707 100644 --- a/core/services/workflows/syncer/v2/workflow_registry_test.go +++ b/core/services/workflows/syncer/v2/workflow_registry_test.go @@ -19,6 +19,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/capabilities" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/logger" + wfTypes "github.com/smartcontractkit/chainlink/v2/core/services/workflows/types" ) func Test_generateReconciliationEventsV2(t *testing.T) { @@ -73,7 +74,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // The only event is WorkflowActivatedEvent @@ -102,7 +103,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { wfID := [32]byte{1} owner := []byte{1} wfName := "wf name 1" - err := er.Add(wfID, &mockService{}) + err := er.Add(wfID, "TestSource", &mockService{}) require.NoError(t, err) wr, err := NewWorkflowRegistry( lggr, @@ -145,13 +146,14 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) require.Len(t, events, 2) require.Equal(t, WorkflowDeleted, events[0].Name) expectedDeletedEvent := WorkflowDeletedEvent{ WorkflowID: wfID, + Source: "TestSource", } require.Equal(t, expectedDeletedEvent, events[0].Data) require.Equal(t, WorkflowActivated, events[1].Name) @@ -176,7 +178,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { // Engine already in the workflow registry er := NewEngineRegistry() wfID := [32]byte{1} - err := er.Add(wfID, &mockService{}) + err := er.Add(wfID, "TestSource", &mockService{}) require.NoError(t, err) wr, err := NewWorkflowRegistry( lggr, @@ -198,7 +200,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { metadata := []WorkflowMetadataView{} pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // The only event is WorkflowDeletedEvent @@ -206,6 +208,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { require.Equal(t, WorkflowDeleted, events[0].Name) expectedDeletedEvent := WorkflowDeletedEvent{ WorkflowID: wfID, + Source: "TestSource", } require.Equal(t, expectedDeletedEvent, events[0].Data) }) @@ -258,7 +261,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // The only event is WorkflowActivatedEvent @@ -278,11 +281,11 @@ func Test_generateReconciliationEventsV2(t *testing.T) { require.Equal(t, expectedActivatedEvent, events[0].Data) // Add the workflow to the engine registry as the handler would - err = er.Add(wfID, &mockService{}) + err = er.Add(wfID, ContractWorkflowSourceName, &mockService{}) require.NoError(t, err) // Repeated ticks do not make any new events - events, err = wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err = wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) require.Empty(t, events) }) @@ -335,7 +338,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // No events require.Empty(t, events) @@ -350,7 +353,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { wfID := [32]byte{1} owner := []byte{} wfName := "wf name 1" - err := er.Add(wfID, &mockService{}) + err := er.Add(wfID, ContractWorkflowSourceName, &mockService{}) require.NoError(t, err) wr, err := NewWorkflowRegistry( lggr, @@ -392,7 +395,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // The only event is WorkflowPausedEvent @@ -479,7 +482,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { nextRetryAt: nextRetryAt, }, } - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // The only event is WorkflowActivatedEvent @@ -569,7 +572,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { nextRetryAt: nextRetryAt, }, } - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) require.Empty(t, pendingEvents) @@ -585,7 +588,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { wfID := [32]byte{1} owner := []byte{1} wfName := "wf name 1" - err := er.Add(wfID, &mockService{}) + err := er.Add(wfID, "TestSource", &mockService{}) require.NoError(t, err) wr, err := NewWorkflowRegistry( lggr, @@ -629,7 +632,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { } pendingEvents := map[string]*reconciliationEvent{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) // Delete event happens before activate event @@ -644,7 +647,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { // Engine already in the workflow registry er := NewEngineRegistry() wfID := [32]byte{1} - err := er.Add(wfID, &mockService{}) + err := er.Add(wfID, "TestSource", &mockService{}) require.NoError(t, err) wr, err := NewWorkflowRegistry( lggr, @@ -667,6 +670,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { // A workflow is to be removed, but hits a failure, causing it to stay pending event := WorkflowDeletedEvent{ WorkflowID: wfID, + Source: "TestSource", } pendingEvents := map[string]*reconciliationEvent{ hex.EncodeToString(wfID[:]): { @@ -675,7 +679,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { Name: WorkflowDeleted, }, id: hex.EncodeToString(wfID[:]), - signature: fmt.Sprintf("%s-%s-%s", WorkflowDeleted, hex.EncodeToString(wfID[:]), toSpecStatus(WorkflowStatusActive)), + signature: fmt.Sprintf("%s-%s", WorkflowDeleted, hex.EncodeToString(wfID[:])), nextRetryAt: time.Now(), retryCount: 5, }, @@ -684,7 +688,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { // No workflows in metadata metadata := []WorkflowMetadataView{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) require.Len(t, events, 1) require.Equal(t, WorkflowDeleted, events[0].Name) @@ -750,7 +754,7 @@ func Test_generateReconciliationEventsV2(t *testing.T) { // The workflow then gets removed metadata := []WorkflowMetadataView{} - events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}) + events, err := wr.generateReconciliationEvents(ctx, pendingEvents, metadata, &types.Head{Height: "123"}, "TestSource") require.NoError(t, err) require.Empty(t, events) require.Empty(t, pendingEvents) @@ -876,3 +880,442 @@ func (m *mockContractReader) Start( ) error { return m.startErr } + +func Test_generateReconciliationEvents_SourceIsolation(t *testing.T) { + t.Run("only deletes engines from specified source", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engines from two sources + er := NewEngineRegistry() + wfIDContract := [32]byte{1} + wfIDGrpc := [32]byte{2} + require.NoError(t, er.Add(wfIDContract, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc, GRPCWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Reconcile ContractWorkflowSource with empty metadata + // Should only delete contract engine, not GRPC engine + pendingEvents := make(map[string]*reconciliationEvent) + events, err := wr.generateReconciliationEvents( + ctx, pendingEvents, []WorkflowMetadataView{}, &types.Head{Height: "123"}, ContractWorkflowSourceName) + + require.NoError(t, err) + require.Len(t, events, 1) + require.Equal(t, WorkflowDeleted, events[0].Name) + deletedEvent := events[0].Data.(WorkflowDeletedEvent) + require.Equal(t, wfTypes.WorkflowID(wfIDContract), deletedEvent.WorkflowID) + require.Equal(t, ContractWorkflowSourceName, deletedEvent.Source) + }) + + t.Run("activates workflows tagged with source", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + er := NewEngineRegistry() + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // New workflow from GRPCWorkflowSource + wfID := [32]byte{1} + metadata := []WorkflowMetadataView{{ + WorkflowID: wfID, + Owner: []byte{1, 2, 3}, + Status: WorkflowStatusActive, + Source: GRPCWorkflowSourceName, + WorkflowName: "test-workflow", + BinaryURL: "http://binary.url", + ConfigURL: "http://config.url", + }} + + pendingEvents := make(map[string]*reconciliationEvent) + events, err := wr.generateReconciliationEvents( + ctx, pendingEvents, metadata, &types.Head{Height: "123"}, GRPCWorkflowSourceName) + + require.NoError(t, err) + require.Len(t, events, 1) + require.Equal(t, WorkflowActivated, events[0].Name) + activatedEvent := events[0].Data.(WorkflowActivatedEvent) + require.Equal(t, wfTypes.WorkflowID(wfID), activatedEvent.WorkflowID) + require.Equal(t, GRPCWorkflowSourceName, activatedEvent.Source) + }) + + t.Run("does not delete engines from other sources when source returns empty", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engines from two sources + er := NewEngineRegistry() + wfIDContract := [32]byte{1} + wfIDGrpc := [32]byte{2} + require.NoError(t, er.Add(wfIDContract, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc, GRPCWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Reconcile GRPCWorkflowSource with empty metadata + // Should only generate delete event for GRPC engine, not contract engine + pendingEvents := make(map[string]*reconciliationEvent) + events, err := wr.generateReconciliationEvents( + ctx, pendingEvents, []WorkflowMetadataView{}, &types.Head{Height: "123"}, GRPCWorkflowSourceName) + + require.NoError(t, err) + require.Len(t, events, 1) + deletedEvent := events[0].Data.(WorkflowDeletedEvent) + require.Equal(t, wfTypes.WorkflowID(wfIDGrpc), deletedEvent.WorkflowID) + + // Contract engine should still be in registry (we're just checking the event, not actually processing) + _, ok := er.Get(wfIDContract) + require.True(t, ok, "Contract engine should still exist") + }) + + t.Run("handles paused workflow from source", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engine exists for a workflow + er := NewEngineRegistry() + wfID := [32]byte{1} + require.NoError(t, er.Add(wfID, ContractWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Workflow is now paused + metadata := []WorkflowMetadataView{{ + WorkflowID: wfID, + Owner: []byte{1, 2, 3}, + Status: WorkflowStatusPaused, + Source: ContractWorkflowSourceName, + WorkflowName: "test-workflow", + }} + + pendingEvents := make(map[string]*reconciliationEvent) + events, err := wr.generateReconciliationEvents( + ctx, pendingEvents, metadata, &types.Head{Height: "123"}, ContractWorkflowSourceName) + + require.NoError(t, err) + require.Len(t, events, 1) + require.Equal(t, WorkflowPaused, events[0].Name) + pausedEvent := events[0].Data.(WorkflowPausedEvent) + require.Equal(t, wfTypes.WorkflowID(wfID), pausedEvent.WorkflowID) + require.Equal(t, ContractWorkflowSourceName, pausedEvent.Source) + }) + + t.Run("no events when source has no engines and returns empty metadata", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engine only from contract source + er := NewEngineRegistry() + wfIDContract := [32]byte{1} + require.NoError(t, er.Add(wfIDContract, ContractWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Reconcile GRPCWorkflowSource with empty metadata + // Should generate no events since GRPC has no engines + pendingEvents := make(map[string]*reconciliationEvent) + events, err := wr.generateReconciliationEvents( + ctx, pendingEvents, []WorkflowMetadataView{}, &types.Head{Height: "123"}, GRPCWorkflowSourceName) + + require.NoError(t, err) + require.Empty(t, events) + }) +} + +// Test_PerSourceReconciliation_FailureIsolation validates the main bug fix: +// when a source fails to fetch, engines from that source should NOT be deleted. +func Test_PerSourceReconciliation_FailureIsolation(t *testing.T) { + t.Run("source failure does not delete engines from that source", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engines from ContractWorkflowSource and GRPCWorkflowSource + er := NewEngineRegistry() + wfIDContract := [32]byte{1} + wfIDGrpc := [32]byte{2} + require.NoError(t, er.Add(wfIDContract, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc, GRPCWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Simulate: contract source succeeds with its workflow + contractPendingEvents := make(map[string]*reconciliationEvent) + contractMetadata := []WorkflowMetadataView{{ + WorkflowID: wfIDContract, + Owner: []byte{1, 2, 3}, + Status: WorkflowStatusActive, + Source: ContractWorkflowSourceName, + WorkflowName: "contract-workflow", + BinaryURL: "http://binary.url", + ConfigURL: "http://config.url", + }} + contractEvents, err := wr.generateReconciliationEvents( + ctx, contractPendingEvents, contractMetadata, &types.Head{Height: "123"}, ContractWorkflowSourceName) + require.NoError(t, err) + require.Empty(t, contractEvents, "No events expected since engine already exists") + + // Simulate: GRPC source FAILS (returns error, so we skip reconciliation) + // In the actual sync loop, we would NOT call generateReconciliationEvents + // when the source fetch fails. This test validates that by NOT calling the method + // for the failed source, the GRPC engine is preserved. + + // Assert: Both engines should still exist + _, ok := er.Get(wfIDContract) + require.True(t, ok, "Contract engine should exist after contract source reconciliation") + + _, ok = er.Get(wfIDGrpc) + require.True(t, ok, "GRPC engine should NOT be deleted when GRPC source fails (skipped reconciliation)") + }) + + t.Run("source recovers after failure - normal reconciliation resumes", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: engines from GRPCWorkflowSource + er := NewEngineRegistry() + wfIDGrpc1 := [32]byte{1} + wfIDGrpc2 := [32]byte{2} + require.NoError(t, er.Add(wfIDGrpc1, GRPCWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc2, GRPCWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Tick 1: GRPC source fails (skip reconciliation - both engines preserved) + // ... (simulated by not calling generateReconciliationEvents) + + // Tick 2: GRPC source recovers with only wfIDGrpc1 + grpcPendingEvents := make(map[string]*reconciliationEvent) + grpcMetadata := []WorkflowMetadataView{{ + WorkflowID: wfIDGrpc1, + Owner: []byte{1, 2, 3}, + Status: WorkflowStatusActive, + Source: GRPCWorkflowSourceName, + WorkflowName: "grpc-workflow-1", + BinaryURL: "http://binary.url", + ConfigURL: "http://config.url", + }} + events, err := wr.generateReconciliationEvents( + ctx, grpcPendingEvents, grpcMetadata, &types.Head{Height: "124"}, GRPCWorkflowSourceName) + require.NoError(t, err) + + // Should generate delete event for wfIDGrpc2 (no longer in metadata) + require.Len(t, events, 1) + require.Equal(t, WorkflowDeleted, events[0].Name) + deletedEvent := events[0].Data.(WorkflowDeletedEvent) + require.Equal(t, wfTypes.WorkflowID(wfIDGrpc2), deletedEvent.WorkflowID) + require.Equal(t, GRPCWorkflowSourceName, deletedEvent.Source) + }) + + t.Run("all sources fail - no deletions", func(t *testing.T) { + // This test validates that when all sources fail, no deletion events are generated + // because we skip reconciliation for each failed source. + lggr := logger.TestLogger(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + er := NewEngineRegistry() + wfIDContract := [32]byte{1} + wfIDGrpc := [32]byte{2} + require.NoError(t, er.Add(wfIDContract, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc, GRPCWorkflowSourceName, &mockService{})) + + _, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Both sources fail - we don't call generateReconciliationEvents for either + // This is simulated by simply not calling the method + + // Both engines should still exist + require.True(t, er.Contains(wfIDContract)) + require.True(t, er.Contains(wfIDGrpc)) + }) + + t.Run("independent source reconciliation preserves isolation", func(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + workflowDonNotifier := capabilities.NewDonNotifier() + + // Setup: multiple workflows from each source + er := NewEngineRegistry() + wfIDContract1 := [32]byte{1} + wfIDContract2 := [32]byte{2} + wfIDGrpc1 := [32]byte{3} + wfIDGrpc2 := [32]byte{4} + require.NoError(t, er.Add(wfIDContract1, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDContract2, ContractWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc1, GRPCWorkflowSourceName, &mockService{})) + require.NoError(t, er.Add(wfIDGrpc2, GRPCWorkflowSourceName, &mockService{})) + + wr, err := NewWorkflowRegistry( + lggr, + func(ctx context.Context, bytes []byte) (types.ContractReader, error) { + return nil, nil + }, + "", + Config{ + QueryCount: 20, + SyncStrategy: SyncStrategyReconciliation, + }, + &eventHandler{}, + workflowDonNotifier, + er, + ) + require.NoError(t, err) + + // Contract source: wfIDContract1 removed (only wfIDContract2 remains) + contractPending := make(map[string]*reconciliationEvent) + contractMeta := []WorkflowMetadataView{{ + WorkflowID: wfIDContract2, + Status: WorkflowStatusActive, + Source: ContractWorkflowSourceName, + WorkflowName: "contract-workflow-2", + BinaryURL: "http://binary.url", + ConfigURL: "http://config.url", + }} + contractEvents, err := wr.generateReconciliationEvents( + ctx, contractPending, contractMeta, &types.Head{Height: "123"}, ContractWorkflowSourceName) + require.NoError(t, err) + + // Should delete wfIDContract1 + require.Len(t, contractEvents, 1) + require.Equal(t, WorkflowDeleted, contractEvents[0].Name) + require.Equal(t, wfTypes.WorkflowID(wfIDContract1), contractEvents[0].Data.(WorkflowDeletedEvent).WorkflowID) + + // GRPC source: wfIDGrpc2 removed (only wfIDGrpc1 remains) + grpcPending := make(map[string]*reconciliationEvent) + grpcMeta := []WorkflowMetadataView{{ + WorkflowID: wfIDGrpc1, + Status: WorkflowStatusActive, + Source: GRPCWorkflowSourceName, + WorkflowName: "grpc-workflow-1", + BinaryURL: "http://binary.url", + ConfigURL: "http://config.url", + }} + grpcEvents, err := wr.generateReconciliationEvents( + ctx, grpcPending, grpcMeta, &types.Head{Height: "123"}, GRPCWorkflowSourceName) + require.NoError(t, err) + + // Should delete wfIDGrpc2, but NOT any contract workflows + require.Len(t, grpcEvents, 1) + require.Equal(t, WorkflowDeleted, grpcEvents[0].Name) + require.Equal(t, wfTypes.WorkflowID(wfIDGrpc2), grpcEvents[0].Data.(WorkflowDeletedEvent).WorkflowID) + }) +} diff --git a/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go b/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go index f48d8aeb345..66f276af8ea 100644 --- a/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go +++ b/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go @@ -579,7 +579,8 @@ func Test_RegistrySyncer_DONUpdate(t *testing.T) { // Fill in some placeholder engines that the actual event handler would have created for _, event := range testEventHandler.GetEvents() { - err := engineRegistry.Add(event.Data.(WorkflowActivatedEvent).WorkflowID, &mockService{}) + data := event.Data.(WorkflowActivatedEvent) + err := engineRegistry.Add(data.WorkflowID, data.Source, &mockService{}) require.NoError(t, err) } diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml index d090b8d15e9..e113068c3b6 100644 --- a/core/web/resolver/testdata/config-empty-effective.toml +++ b/core/web/resolver/testdata/config-empty-effective.toml @@ -326,6 +326,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index df0dc9ddaa6..6726498a3b2 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -336,6 +336,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index c2160390a08..6844d6fef74 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -326,6 +326,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/deployment/go.mod b/deployment/go.mod index 568745f5fa1..8ceea51150e 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -76,7 +76,7 @@ require ( golang.org/x/mod v0.31.0 golang.org/x/oauth2 v0.32.0 golang.org/x/sync v0.19.0 - google.golang.org/grpc v1.77.0 + google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.10 gopkg.in/guregu/null.v4 v4.0.0 gopkg.in/yaml.v3 v3.0.1 @@ -508,8 +508,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/deployment/go.sum b/deployment/go.sum index 0aaff305bf0..b431e913201 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -2178,10 +2178,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2204,8 +2204,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 8eaf31c1f66..bc4937ebea7 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1485,6 +1485,34 @@ ArtifactStorageHost = 'artifact.cre.chain.link' # Example ``` ArtifactStorageHost is the host name that, when present within the workflow metadata binary or config URL, designates that a signed URL should be retrieved from the workflow storage service. +## Capabilities.WorkflowRegistry.AlternativeSources +```toml +[[Capabilities.WorkflowRegistry.AlternativeSources]] +URL = 'localhost:50051' # Example +TLSEnabled = true # Default +Name = 'my-workflow-source' # Example +``` + + +### URL +```toml +URL = 'localhost:50051' # Example +``` +URL is the GRPC endpoint for the alternative workflow metadata source. +This allows workflows to be loaded from sources other than the on-chain registry contract. + +### TLSEnabled +```toml +TLSEnabled = true # Default +``` +TLSEnabled enables TLS for the GRPC connection. Defaults to true. + +### Name +```toml +Name = 'my-workflow-source' # Example +``` +Name is a human-readable identifier for logging purposes. + ## Workflows ```toml [Workflows] diff --git a/go.mod b/go.mod index 7e2cbd9c2bf..aa6f76f755d 100644 --- a/go.mod +++ b/go.mod @@ -137,12 +137,12 @@ require ( golang.org/x/crypto v0.45.0 golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc golang.org/x/mod v0.31.0 - golang.org/x/oauth2 v0.30.0 + golang.org/x/oauth2 v0.32.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.37.0 golang.org/x/time v0.12.0 gonum.org/v1/gonum v0.16.0 - google.golang.org/grpc v1.76.0 + google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.10 gopkg.in/guregu/null.v4 v4.0.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 @@ -243,7 +243,7 @@ require ( github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect - github.com/go-jose/go-jose/v4 v4.1.2 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -420,8 +420,8 @@ require ( golang.org/x/tools v0.39.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect gopkg.in/guregu/null.v2 v2.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools/v3 v3.5.2 // indirect diff --git a/go.sum b/go.sum index cb85ccbd6c0..d08f3778ac2 100644 --- a/go.sum +++ b/go.sum @@ -443,8 +443,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= -github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -1637,8 +1637,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1933,10 +1933,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff h1:8Zg5TdmcbU8A7CXGjGXF1Slqu/nIFCRaR3S5gT2plIA= -google.golang.org/genproto/googleapis/api v0.0.0-20251007200510-49b9836ed3ff/go.mod h1:dbWfpVPvW/RqafStmRWBUpMN14puDezDMHxNYiRfQu0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1957,8 +1957,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= -google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 9a36bc4c282..4d9b8fe5b32 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -82,7 +82,7 @@ require ( golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc golang.org/x/sync v0.19.0 golang.org/x/text v0.31.0 - google.golang.org/grpc v1.77.0 + google.golang.org/grpc v1.78.0 gopkg.in/guregu/null.v4 v4.0.0 k8s.io/apimachinery v0.33.2 ) @@ -625,8 +625,8 @@ require ( gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/api v0.241.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 068c4460e10..41a10f8fb52 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -2514,10 +2514,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2541,8 +2541,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 0d13fae4790..32ccce0a667 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -626,9 +626,9 @@ require ( gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/api v0.241.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/grpc v1.78.0 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/guregu/null.v4 v4.0.0 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 23630dd0db4..726cebc8c64 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -2492,10 +2492,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2519,8 +2519,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/system-tests/lib/cre/don/config/config.go b/system-tests/lib/cre/don/config/config.go index a25e5b53530..03a8f5954f0 100644 --- a/system-tests/lib/cre/don/config/config.go +++ b/system-tests/lib/cre/don/config/config.go @@ -181,6 +181,19 @@ func PrepareNodeTOMLs( } } + // Transform UserConfigOverrides to use platform-specific Docker host addresses. + // This handles differences between macOS (host.docker.internal) and Linux (172.17.0.1) + // for URLs in user-provided config overrides (e.g., AlternativeSources). + for i := range localNodeSets { + for j := range localNodeSets[i].NodeSpecs { + if localNodeSets[i].NodeSpecs[j].Node.UserConfigOverrides != "" { + localNodeSets[i].NodeSpecs[j].Node.UserConfigOverrides = transformUserConfigOverrides( + localNodeSets[i].NodeSpecs[j].Node.UserConfigOverrides, + ) + } + } + } + return localNodeSets, nil } @@ -402,6 +415,9 @@ func addWorkerNodeConfig( } } + // Preserve existing WorkflowRegistry config (e.g., AlternativeSourcesConfig from user_config_overrides) + // before resetting Capabilities struct + existingWorkflowRegistry := existingConfig.Capabilities.WorkflowRegistry existingConfig.Capabilities = coretoml.Capabilities{ Peering: coretoml.P2P{ V2: coretoml.P2PV2{ @@ -414,6 +430,7 @@ func addWorkerNodeConfig( Dispatcher: coretoml.Dispatcher{ SendToSharedPeer: ptr.Ptr(true), }, + WorkflowRegistry: existingWorkflowRegistry, } for _, evmChain := range commonInputs.evmChains { @@ -434,12 +451,16 @@ func addWorkerNodeConfig( } if donMetadata.HasFlag(cre.WorkflowDON) && existingConfig.Capabilities.WorkflowRegistry.Address == nil { + // Preserve existing AlternativeSourcesConfig when setting WorkflowRegistry fields + // Transform URLs to use platform-specific Docker host (handles macOS vs Linux differences) + existingAltSources := transformAlternativeSourceURLs(existingConfig.Capabilities.WorkflowRegistry.AlternativeSourcesConfig) existingConfig.Capabilities.WorkflowRegistry = coretoml.WorkflowRegistry{ - Address: ptr.Ptr(commonInputs.workflowRegistry.address), - NetworkID: ptr.Ptr("evm"), - ChainID: ptr.Ptr(strconv.FormatUint(commonInputs.registryChainID, 10)), - ContractVersion: ptr.Ptr(commonInputs.workflowRegistry.version.String()), - SyncStrategy: ptr.Ptr("reconciliation"), + Address: ptr.Ptr(commonInputs.workflowRegistry.address), + NetworkID: ptr.Ptr("evm"), + ChainID: ptr.Ptr(strconv.FormatUint(commonInputs.registryChainID, 10)), + ContractVersion: ptr.Ptr(commonInputs.workflowRegistry.version.String()), + SyncStrategy: ptr.Ptr("reconciliation"), + AlternativeSourcesConfig: existingAltSources, } } @@ -752,6 +773,50 @@ func appendSolanaChain(existingConfig *solcfg.TOMLConfigs, solChain *solanaChain }) } +// transformAlternativeSourceURLs transforms URLs in AlternativeSourcesConfig to use +// platform-specific Docker host addresses. This handles differences between macOS +// (host.docker.internal) and Linux (172.17.0.1 or similar) Docker host resolution. +func transformAlternativeSourceURLs(sources []coretoml.AlternativeWorkflowSource) []coretoml.AlternativeWorkflowSource { + if len(sources) == 0 { + return sources + } + + // Get the platform-specific Docker host (e.g., "http://host.docker.internal" on macOS, + // "http://172.17.0.1" on Linux) + dockerHost := strings.TrimPrefix(framework.HostDockerInternal(), "http://") + + transformed := make([]coretoml.AlternativeWorkflowSource, len(sources)) + for i, src := range sources { + transformed[i] = src + if src.URL != nil { + // Replace "host.docker.internal" with the platform-specific host + url := *src.URL + url = strings.Replace(url, "host.docker.internal", dockerHost, 1) + transformed[i].URL = &url + } + } + + return transformed +} + +// transformUserConfigOverrides transforms URLs in a user config overrides string to use +// platform-specific Docker host addresses. This handles differences between macOS +// (host.docker.internal) and Linux (172.17.0.1 or similar) Docker host resolution. +// This is necessary because UserConfigOverrides is passed directly to containers as a +// separate config file, bypassing the structured config transformation. +func transformUserConfigOverrides(userConfig string) string { + if userConfig == "" { + return userConfig + } + + // Get the platform-specific Docker host (e.g., "http://host.docker.internal" on macOS, + // "http://172.17.0.1" on Linux) + dockerHost := strings.TrimPrefix(framework.HostDockerInternal(), "http://") + + // Replace all occurrences of "host.docker.internal" with the platform-specific host + return strings.ReplaceAll(userConfig, "host.docker.internal", dockerHost) +} + // generateInstanceNames creates Kubernetes-compatible instance names for nodes // Bootstrap nodes get names like "workflow-bt-0", plugin nodes get "workflow-0", "workflow-1", etc. // This is a wrapper around infra.GenerateNodeInstanceNames that converts NodeMetadata to bool roles diff --git a/system-tests/lib/cre/grpc_source_mock/auth.go b/system-tests/lib/cre/grpc_source_mock/auth.go new file mode 100644 index 00000000000..59c0819a7d5 --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/auth.go @@ -0,0 +1,123 @@ +package grpcsourcemock + +import ( + "context" + "crypto/ed25519" + "encoding/hex" + "log/slog" + "os" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + nodeauthgrpc "github.com/smartcontractkit/chainlink-common/pkg/nodeauth/grpc" + "github.com/smartcontractkit/chainlink-common/pkg/nodeauth/jwt" +) + +// MockNodeAuthProvider is a mock implementation of NodeAuthProvider for testing +type MockNodeAuthProvider struct { + mu sync.RWMutex + trustedPubKeys map[string]bool +} + +// NewMockNodeAuthProvider creates a new MockNodeAuthProvider +func NewMockNodeAuthProvider() *MockNodeAuthProvider { + return &MockNodeAuthProvider{ + trustedPubKeys: make(map[string]bool), + } +} + +// AddTrustedKey adds a public key to the trusted list +func (m *MockNodeAuthProvider) AddTrustedKey(publicKey ed25519.PublicKey) { + m.mu.Lock() + defer m.mu.Unlock() + m.trustedPubKeys[hex.EncodeToString(publicKey)] = true +} + +// RemoveTrustedKey removes a public key from the trusted list +func (m *MockNodeAuthProvider) RemoveTrustedKey(publicKey ed25519.PublicKey) { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.trustedPubKeys, hex.EncodeToString(publicKey)) +} + +// ClearTrustedKeys removes all trusted keys +func (m *MockNodeAuthProvider) ClearTrustedKeys() { + m.mu.Lock() + defer m.mu.Unlock() + m.trustedPubKeys = make(map[string]bool) +} + +// SetTrustedKeys replaces all trusted keys with the provided list +func (m *MockNodeAuthProvider) SetTrustedKeys(publicKeys []ed25519.PublicKey) { + m.mu.Lock() + defer m.mu.Unlock() + m.trustedPubKeys = make(map[string]bool) + for _, pk := range publicKeys { + m.trustedPubKeys[hex.EncodeToString(pk)] = true + } +} + +// IsNodePubKeyTrusted checks if a node's public key is trusted +func (m *MockNodeAuthProvider) IsNodePubKeyTrusted(ctx context.Context, publicKey ed25519.PublicKey) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return m.trustedPubKeys[hex.EncodeToString(publicKey)], nil +} + +// RejectAllAuthProvider is an implementation that rejects all public keys +// Used for testing graceful auth failure handling +type RejectAllAuthProvider struct{} + +// IsNodePubKeyTrusted always returns false for RejectAllAuthProvider +func (r *RejectAllAuthProvider) IsNodePubKeyTrusted(ctx context.Context, publicKey ed25519.PublicKey) (bool, error) { + return false, nil +} + +// AcceptAllAuthProvider is an implementation that accepts all public keys +// Used for testing when we don't know node keys ahead of time +type AcceptAllAuthProvider struct{} + +// IsNodePubKeyTrusted always returns true for AcceptAllAuthProvider +func (a *AcceptAllAuthProvider) IsNodePubKeyTrusted(ctx context.Context, publicKey ed25519.PublicKey) (bool, error) { + return true, nil +} + +// NewJWTAuthInterceptor creates a gRPC unary interceptor that validates JWT tokens. +// Uses the nodeauth token extractor from chainlink-common for consistent token extraction. +func NewJWTAuthInterceptor(authProvider NodeAuthProvider) grpc.UnaryServerInterceptor { + // Create the JWT authenticator with the provided auth provider + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelDebug, + })).With("logger", "grpc_source_mock.JWTAuthInterceptor") + authenticator := jwt.NewNodeJWTAuthenticator(authProvider, logger) + + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + // Extract token from metadata using the shared token extractor + token, err := nodeauthgrpc.ExtractBearerToken(ctx) + if err != nil { + return nil, status.Errorf(codes.Unauthenticated, "missing auth: %v", err) + } + + // Validate the JWT token + valid, _, err := authenticator.AuthenticateJWT(ctx, token, req) + if err != nil { + // Return unauthenticated error without panicking + return nil, status.Errorf(codes.Unauthenticated, "authentication failed: %v", err) + } + + if !valid { + return nil, status.Error(codes.Unauthenticated, "invalid authentication") + } + + // Continue to the handler if authenticated + return handler(ctx, req) + } +} diff --git a/system-tests/lib/cre/grpc_source_mock/private_registry_service.go b/system-tests/lib/cre/grpc_source_mock/private_registry_service.go new file mode 100644 index 00000000000..f92f6798265 --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/private_registry_service.go @@ -0,0 +1,50 @@ +package grpcsourcemock + +import ( + "context" + "encoding/hex" + "log/slog" + "os" + + "github.com/smartcontractkit/chainlink-common/pkg/workflows/privateregistry" +) + +var registryLogger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelDebug, +})).With("logger", "grpc_source_mock.PrivateRegistryService") + +// PrivateRegistryService implements the WorkflowDeploymentAction interface for managing workflows +type PrivateRegistryService struct { + store *WorkflowStore +} + +// NewPrivateRegistryService creates a new PrivateRegistryService +func NewPrivateRegistryService(store *WorkflowStore) *PrivateRegistryService { + return &PrivateRegistryService{ + store: store, + } +} + +// Ensure PrivateRegistryService implements WorkflowDeploymentAction +var _ privateregistry.WorkflowDeploymentAction = (*PrivateRegistryService)(nil) + +// AddWorkflow registers a new workflow with the source +func (s *PrivateRegistryService) AddWorkflow(ctx context.Context, workflow *privateregistry.WorkflowRegistration) error { + registryLogger.Debug("AddWorkflow called", + "workflowID", hex.EncodeToString(workflow.WorkflowID[:]), + "workflowName", workflow.WorkflowName, + "donFamily", workflow.DonFamily, + "binaryURL", workflow.BinaryURL, + ) + return s.store.Add(workflow) +} + +// UpdateWorkflow updates the workflow's status configuration +func (s *PrivateRegistryService) UpdateWorkflow(ctx context.Context, workflowID [32]byte, config *privateregistry.WorkflowStatusConfig) error { + return s.store.Update(workflowID, config) +} + +// DeleteWorkflow removes the workflow from the source +func (s *PrivateRegistryService) DeleteWorkflow(ctx context.Context, workflowID [32]byte) error { + return s.store.Delete(workflowID) +} diff --git a/system-tests/lib/cre/grpc_source_mock/server.go b/system-tests/lib/cre/grpc_source_mock/server.go new file mode 100644 index 00000000000..0bef4b199d1 --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/server.go @@ -0,0 +1,174 @@ +package grpcsourcemock + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "net" + "sync" + + "google.golang.org/grpc" + + sourcesv1 "github.com/smartcontractkit/chainlink-protos/workflows/go/sources" + + "github.com/smartcontractkit/chainlink-common/pkg/workflows/privateregistry" +) + +const ( + // DefaultSourcePort is the default port for the WorkflowMetadataSourceService + DefaultSourcePort = 8544 + // DefaultPrivateRegistryPort is the default port for the private registry API + // Uses 8547 to avoid conflicts with anvil chains (8545 for chain 1337, 8546 for chain 2337) + DefaultPrivateRegistryPort = 8547 +) + +// NodeAuthProvider is the interface for validating node public keys +type NodeAuthProvider interface { + IsNodePubKeyTrusted(ctx context.Context, publicKey ed25519.PublicKey) (bool, error) +} + +// ServerConfig contains configuration for the mock server +type ServerConfig struct { + // SourcePort is the port for the WorkflowMetadataSourceService (default: 8544) + SourcePort int + // PrivateRegistryPort is the port for the private registry API (default: 8545) + PrivateRegistryPort int + // AuthProvider is the provider for validating node public keys + // If nil, all requests are allowed (no auth) + AuthProvider NodeAuthProvider +} + +// Server is the mock gRPC workflow source server +type Server struct { + config ServerConfig + store *WorkflowStore + sourceServer *grpc.Server + privateRegistryServer *grpc.Server + privateRegistryService *PrivateRegistryService + + sourceListener net.Listener + privateRegistryListener net.Listener + + mu sync.Mutex + started bool +} + +// NewServer creates a new mock gRPC workflow source server +func NewServer(config ServerConfig) *Server { + if config.SourcePort == 0 { + config.SourcePort = DefaultSourcePort + } + if config.PrivateRegistryPort == 0 { + config.PrivateRegistryPort = DefaultPrivateRegistryPort + } + + store := NewWorkflowStore() + + // Create source server with optional auth interceptor + var sourceOpts []grpc.ServerOption + if config.AuthProvider != nil { + sourceOpts = append(sourceOpts, grpc.UnaryInterceptor( + NewJWTAuthInterceptor(config.AuthProvider), + )) + } + sourceServer := grpc.NewServer(sourceOpts...) + sourcesv1.RegisterWorkflowMetadataSourceServiceServer(sourceServer, NewSourceService(store)) + + // Create private registry server (no auth needed for tests) + privateRegistryServer := grpc.NewServer() + privateRegistryService := NewPrivateRegistryService(store) + + return &Server{ + config: config, + store: store, + sourceServer: sourceServer, + privateRegistryServer: privateRegistryServer, + privateRegistryService: privateRegistryService, + } +} + +// Start starts both gRPC servers +func (s *Server) Start() error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.started { + return errors.New("server already started") + } + + // Start source server + sourceAddr := fmt.Sprintf(":%d", s.config.SourcePort) + lc := &net.ListenConfig{} + sourceListener, err := lc.Listen(context.Background(), "tcp", sourceAddr) + if err != nil { + return fmt.Errorf("failed to listen on source port %d: %w", s.config.SourcePort, err) + } + s.sourceListener = sourceListener + + // Start private registry server + privateRegistryAddr := fmt.Sprintf(":%d", s.config.PrivateRegistryPort) + privateRegistryListener, err := lc.Listen(context.Background(), "tcp", privateRegistryAddr) + if err != nil { + sourceListener.Close() + return fmt.Errorf("failed to listen on private registry port %d: %w", s.config.PrivateRegistryPort, err) + } + s.privateRegistryListener = privateRegistryListener + + // Serve source requests + go func() { + _ = s.sourceServer.Serve(sourceListener) + // Error is expected when server is stopped gracefully + }() + + // Serve private registry requests + go func() { + _ = s.privateRegistryServer.Serve(privateRegistryListener) + // Error is expected when server is stopped gracefully + }() + + s.started = true + return nil +} + +// Stop stops both gRPC servers +func (s *Server) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return + } + + s.sourceServer.GracefulStop() + s.privateRegistryServer.GracefulStop() + + if s.sourceListener != nil { + s.sourceListener.Close() + } + if s.privateRegistryListener != nil { + s.privateRegistryListener.Close() + } + + s.started = false +} + +// SourceAddr returns the address of the source service +func (s *Server) SourceAddr() string { + return fmt.Sprintf("localhost:%d", s.config.SourcePort) +} + +// PrivateRegistryAddr returns the address of the private registry service +func (s *Server) PrivateRegistryAddr() string { + return fmt.Sprintf("localhost:%d", s.config.PrivateRegistryPort) +} + +// PrivateRegistryService returns the private registry service for direct manipulation in tests +func (s *Server) PrivateRegistryService() privateregistry.WorkflowDeploymentAction { + return s.privateRegistryService +} + +// Store returns the underlying workflow store for direct inspection in tests +func (s *Server) Store() *WorkflowStore { + return s.store +} diff --git a/system-tests/lib/cre/grpc_source_mock/source_service.go b/system-tests/lib/cre/grpc_source_mock/source_service.go new file mode 100644 index 00000000000..87bf08c3f91 --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/source_service.go @@ -0,0 +1,89 @@ +package grpcsourcemock + +import ( + "context" + "log/slog" + "os" + + sourcesv1 "github.com/smartcontractkit/chainlink-protos/workflows/go/sources" +) + +var sourceLogger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelDebug, +})).With("logger", "grpc_source_mock.SourceService") + +// SourceService implements the WorkflowMetadataSourceService gRPC service +type SourceService struct { + sourcesv1.UnimplementedWorkflowMetadataSourceServiceServer + store *WorkflowStore +} + +// NewSourceService creates a new SourceService +func NewSourceService(store *WorkflowStore) *SourceService { + return &SourceService{ + store: store, + } +} + +// ListWorkflowMetadata returns all workflow metadata for the given DON +func (s *SourceService) ListWorkflowMetadata(ctx context.Context, req *sourcesv1.ListWorkflowMetadataRequest) (*sourcesv1.ListWorkflowMetadataResponse, error) { + sourceLogger.Debug("ListWorkflowMetadata called", + "donFamilies", req.GetDonFamilies(), + "start", req.GetStart(), + "limit", req.GetLimit(), + ) + + // Get all workflows matching the filter + workflows := s.store.List(req.GetDonFamilies()) + + sourceLogger.Debug("ListWorkflowMetadata results", + "donFamiliesFilter", req.GetDonFamilies(), + "workflowCount", len(workflows), + ) + + // Apply pagination + start := req.GetStart() + limit := req.GetLimit() + if limit == 0 { + limit = 1000 // default limit + } + + // Calculate pagination bounds + totalCount := int64(len(workflows)) + if start >= totalCount { + // No results for this page + return &sourcesv1.ListWorkflowMetadataResponse{ + Workflows: []*sourcesv1.WorkflowMetadata{}, + HasMore: false, + }, nil + } + + end := min(start+limit, totalCount) + + // Convert to proto messages + protoWorkflows := make([]*sourcesv1.WorkflowMetadata, 0, end-start) + for i := start; i < end; i++ { + wf := workflows[i] + var createdAt uint64 + if wf.CreatedAt >= 0 { + createdAt = uint64(wf.CreatedAt) // #nosec G115 -- CreatedAt is always positive timestamp + } + protoWorkflows = append(protoWorkflows, &sourcesv1.WorkflowMetadata{ + WorkflowId: wf.Registration.WorkflowID[:], + Owner: wf.Registration.Owner, + CreatedAt: createdAt, + Status: uint32(wf.Status), + WorkflowName: wf.Registration.WorkflowName, + BinaryUrl: wf.Registration.BinaryURL, + ConfigUrl: wf.Registration.ConfigURL, + Tag: wf.Registration.Tag, + Attributes: wf.Registration.Attributes, + DonFamily: wf.Registration.DonFamily, + }) + } + + return &sourcesv1.ListWorkflowMetadataResponse{ + Workflows: protoWorkflows, + HasMore: end < totalCount, + }, nil +} diff --git a/system-tests/lib/cre/grpc_source_mock/store.go b/system-tests/lib/cre/grpc_source_mock/store.go new file mode 100644 index 00000000000..0af3a9a1178 --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/store.go @@ -0,0 +1,141 @@ +package grpcsourcemock + +import ( + "errors" + "sync" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/workflows/privateregistry" +) + +// ErrWorkflowNotFound is returned when a workflow is not found in the store +var ErrWorkflowNotFound = errors.New("workflow not found") + +// WorkflowStatus represents the status of a workflow +type WorkflowStatus uint32 + +const ( + // WorkflowStatusActive indicates the workflow is active + WorkflowStatusActive WorkflowStatus = 0 + // WorkflowStatusPaused indicates the workflow is paused + WorkflowStatusPaused WorkflowStatus = 1 +) + +// StoredWorkflow represents a workflow stored in memory +type StoredWorkflow struct { + Registration *privateregistry.WorkflowRegistration + Status WorkflowStatus + // CreatedAt is the Unix timestamp in milliseconds when the workflow was first added + CreatedAt int64 + // UpdatedAt is the Unix timestamp in milliseconds when the workflow was last modified + UpdatedAt int64 +} + +// WorkflowStore is an in-memory store for workflows +type WorkflowStore struct { + mu sync.RWMutex + workflows map[[32]byte]*StoredWorkflow +} + +// NewWorkflowStore creates a new in-memory workflow store +func NewWorkflowStore() *WorkflowStore { + return &WorkflowStore{ + workflows: make(map[[32]byte]*StoredWorkflow), + } +} + +// Add adds a workflow to the store. Concurrent safe. +// If the workflow already exists, it updates the existing workflow and bumps UpdatedAt. +func (s *WorkflowStore) Add(registration *privateregistry.WorkflowRegistration) error { + s.mu.Lock() + defer s.mu.Unlock() + + now := time.Now().UnixMilli() + + // Check if workflow already exists + if existing, exists := s.workflows[registration.WorkflowID]; exists { + // Update existing workflow and bump UpdatedAt + existing.Registration = registration + existing.UpdatedAt = now + return nil + } + + // Create new workflow with both timestamps set + s.workflows[registration.WorkflowID] = &StoredWorkflow{ + Registration: registration, + Status: WorkflowStatusActive, + CreatedAt: now, + UpdatedAt: now, + } + return nil +} + +// Update updates a workflow's status. Concurrent safe. +// It bumps the UpdatedAt timestamp whenever the workflow is modified. +func (s *WorkflowStore) Update(workflowID [32]byte, config *privateregistry.WorkflowStatusConfig) error { + s.mu.Lock() + defer s.mu.Unlock() + + wf, exists := s.workflows[workflowID] + if !exists { + return ErrWorkflowNotFound + } + + if config.Paused { + wf.Status = WorkflowStatusPaused + } else { + wf.Status = WorkflowStatusActive + } + + // Bump UpdatedAt timestamp + wf.UpdatedAt = time.Now().UnixMilli() + return nil +} + +// Delete removes a workflow from the store. Concurrent safe. +func (s *WorkflowStore) Delete(workflowID [32]byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, exists := s.workflows[workflowID]; !exists { + return ErrWorkflowNotFound + } + + delete(s.workflows, workflowID) + return nil +} + +// List returns all workflows matching the given DON family filter +// If donFamilies is empty, all workflows are returned +func (s *WorkflowStore) List(donFamilies []string) []*StoredWorkflow { + s.mu.RLock() + defer s.mu.RUnlock() + + familySet := make(map[string]bool) + for _, f := range donFamilies { + familySet[f] = true + } + + var result []*StoredWorkflow + for _, wf := range s.workflows { + // If no family filter, include all workflows + if len(donFamilies) == 0 { + result = append(result, wf) + continue + } + // Otherwise, filter by family + if familySet[wf.Registration.DonFamily] { + result = append(result, wf) + } + } + return result +} + +// Get retrieves a workflow by ID. Concurrent safe. +func (s *WorkflowStore) Get(workflowID [32]byte) (*StoredWorkflow, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + wf, exists := s.workflows[workflowID] + return wf, exists +} diff --git a/system-tests/lib/cre/grpc_source_mock/testcontainer.go b/system-tests/lib/cre/grpc_source_mock/testcontainer.go new file mode 100644 index 00000000000..f41b8e3d96f --- /dev/null +++ b/system-tests/lib/cre/grpc_source_mock/testcontainer.go @@ -0,0 +1,158 @@ +package grpcsourcemock + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "strings" + "sync" + + "github.com/smartcontractkit/chainlink-common/pkg/workflows/privateregistry" + "github.com/smartcontractkit/chainlink-testing-framework/framework" +) + +// TestContainer wraps the mock gRPC server for use in integration tests +// It runs the server in-process and provides the URL that Docker containers +// can use to connect to it via host.docker.internal +type TestContainer struct { + server *Server + authProvider *MockNodeAuthProvider + mu sync.Mutex + started bool +} + +// TestContainerConfig contains configuration for the test container +type TestContainerConfig struct { + // SourcePort is the port for the WorkflowMetadataSourceService (default: 8544) + SourcePort int + // PrivateRegistryPort is the port for the private registry API (default: 8545) + PrivateRegistryPort int + // TrustedKeys is the initial set of trusted public keys + TrustedKeys []ed25519.PublicKey + // RejectAllAuth if true, will reject all authentication attempts + RejectAllAuth bool +} + +// NewTestContainer creates a new test container for the mock gRPC server +func NewTestContainer(config TestContainerConfig) *TestContainer { + if config.SourcePort == 0 { + config.SourcePort = DefaultSourcePort + } + if config.PrivateRegistryPort == 0 { + config.PrivateRegistryPort = DefaultPrivateRegistryPort + } + + var authProvider NodeAuthProvider + var mockAuthProvider *MockNodeAuthProvider + + switch { + case config.RejectAllAuth: + authProvider = &RejectAllAuthProvider{} + case len(config.TrustedKeys) > 0: + // Use MockNodeAuthProvider with specific trusted keys + mockAuthProvider = NewMockNodeAuthProvider() + for _, key := range config.TrustedKeys { + mockAuthProvider.AddTrustedKey(key) + } + authProvider = mockAuthProvider + default: + // Accept all valid JWTs when no specific keys are provided + // This is useful for tests where we don't know node keys ahead of time + authProvider = &AcceptAllAuthProvider{} + } + + server := NewServer(ServerConfig{ + SourcePort: config.SourcePort, + PrivateRegistryPort: config.PrivateRegistryPort, + AuthProvider: authProvider, + }) + + return &TestContainer{ + server: server, + authProvider: mockAuthProvider, + } +} + +// Start starts the mock server +func (tc *TestContainer) Start(ctx context.Context) error { + tc.mu.Lock() + defer tc.mu.Unlock() + + if tc.started { + return errors.New("test container already started") + } + + if err := tc.server.Start(); err != nil { + return fmt.Errorf("failed to start mock server: %w", err) + } + + tc.started = true + return nil +} + +// Stop stops the mock server +func (tc *TestContainer) Stop(ctx context.Context) error { + tc.mu.Lock() + defer tc.mu.Unlock() + + if !tc.started { + return nil + } + + tc.server.Stop() + tc.started = false + return nil +} + +// SourceURL returns the URL that Docker containers should use to connect to the source service. +// Uses framework.HostDockerInternal() which handles both local (Mac) and CI (Linux) environments. +func (tc *TestContainer) SourceURL() string { + // Same pattern as telemetry endpoint in lib/cre/don/config/config.go:279 + host := strings.TrimPrefix(framework.HostDockerInternal(), "http://") + return fmt.Sprintf("%s:%d", host, tc.server.config.SourcePort) +} + +// PrivateRegistryURL returns the URL that can be used to connect to the private registry service +// This is typically used from the test process, not from Docker containers +func (tc *TestContainer) PrivateRegistryURL() string { + return tc.server.PrivateRegistryAddr() +} + +// InternalSourceURL returns the source URL for use within Docker containers +// This is an alias for SourceURL for clarity +func (tc *TestContainer) InternalSourceURL() string { + return tc.SourceURL() +} + +// PrivateRegistryService returns the private registry service for direct manipulation in tests +func (tc *TestContainer) PrivateRegistryService() privateregistry.WorkflowDeploymentAction { + return tc.server.PrivateRegistryService() +} + +// Store returns the underlying workflow store for direct inspection in tests +func (tc *TestContainer) Store() *WorkflowStore { + return tc.server.Store() +} + +// AuthProvider returns the mock auth provider for managing trusted keys +// Returns nil if RejectAllAuth was set in the config +func (tc *TestContainer) AuthProvider() *MockNodeAuthProvider { + return tc.authProvider +} + +// AddTrustedKey adds a public key to the trusted list +// This is a no-op if RejectAllAuth was set in the config +func (tc *TestContainer) AddTrustedKey(publicKey ed25519.PublicKey) { + if tc.authProvider != nil { + tc.authProvider.AddTrustedKey(publicKey) + } +} + +// SetTrustedKeys replaces all trusted keys with the provided list +// This is a no-op if RejectAllAuth was set in the config +func (tc *TestContainer) SetTrustedKeys(publicKeys []ed25519.PublicKey) { + if tc.authProvider != nil { + tc.authProvider.SetTrustedKeys(publicKeys) + } +} diff --git a/system-tests/lib/go.mod b/system-tests/lib/go.mod index 47787155ce4..7d5d7741a9c 100644 --- a/system-tests/lib/go.mod +++ b/system-tests/lib/go.mod @@ -39,6 +39,7 @@ require ( github.com/smartcontractkit/chainlink-evm/gethwrappers v0.0.0-20251211123524-f0c4fe7cfc0a github.com/smartcontractkit/chainlink-protos/cre/go v0.0.0-20251124151448-0448aefdaab9 github.com/smartcontractkit/chainlink-protos/job-distributor v0.17.0 + github.com/smartcontractkit/chainlink-protos/workflows/go v0.0.0-20260106052706-6dd937cb5ec6 github.com/smartcontractkit/chainlink-solana v1.1.2-0.20251121223352-370eb61346d6 github.com/smartcontractkit/chainlink-testing-framework/framework v0.12.1 github.com/smartcontractkit/chainlink-testing-framework/framework/components/dockercompose v0.1.15 @@ -54,7 +55,7 @@ require ( go.uber.org/ratelimit v0.3.1 go.uber.org/zap v1.27.0 golang.org/x/sync v0.19.0 - google.golang.org/grpc v1.77.0 + google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.32.3 @@ -479,7 +480,6 @@ require ( github.com/smartcontractkit/chainlink-protos/rmn/v1.6/go v0.0.0-20250131130834-15e0d4cde2a6 // indirect github.com/smartcontractkit/chainlink-protos/storage-service v0.3.0 // indirect github.com/smartcontractkit/chainlink-protos/svr v1.1.0 // indirect - github.com/smartcontractkit/chainlink-protos/workflows/go v0.0.0-20260106052706-6dd937cb5ec6 // indirect github.com/smartcontractkit/chainlink-sui v0.0.0-20251205161630-88314452254c // indirect github.com/smartcontractkit/chainlink-sui/deployment v0.0.0-20251205161630-88314452254c // indirect github.com/smartcontractkit/chainlink-testing-framework/parrot v0.6.2 // indirect @@ -588,8 +588,8 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/guregu/null.v4 v4.0.0 // indirect diff --git a/system-tests/lib/go.sum b/system-tests/lib/go.sum index 30356478d5c..51b5a73e3f0 100644 --- a/system-tests/lib/go.sum +++ b/system-tests/lib/go.sum @@ -2487,10 +2487,10 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2515,8 +2515,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/system-tests/tests/go.mod b/system-tests/tests/go.mod index 4ca999d574c..f8edae0d47f 100644 --- a/system-tests/tests/go.mod +++ b/system-tests/tests/go.mod @@ -720,9 +720,9 @@ require ( gonum.org/v1/gonum v0.16.0 // indirect google.golang.org/api v0.241.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect + google.golang.org/grpc v1.78.0 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/guregu/null.v4 v4.0.0 // indirect diff --git a/system-tests/tests/go.sum b/system-tests/tests/go.sum index c81a6067596..a97b489076e 100644 --- a/system-tests/tests/go.sum +++ b/system-tests/tests/go.sum @@ -2776,10 +2776,10 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= +google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2805,8 +2805,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/system-tests/tests/smoke/cre/billing_helpers.go b/system-tests/tests/smoke/cre/billing_helpers.go index f085b2ae657..389790506a1 100644 --- a/system-tests/tests/smoke/cre/billing_helpers.go +++ b/system-tests/tests/smoke/cre/billing_helpers.go @@ -1,6 +1,7 @@ package cre import ( + "context" "database/sql" "encoding/hex" "fmt" @@ -145,7 +146,7 @@ func startBillingStackIfIsNotRunning(t *testing.T, relativePathToRepoRoot, envir } framework.L.Info().Str("state file", config.MustBillingStateFileAbsPath(relativePathToRepoRoot)).Msg("Billing state file was not found. Starting Billing...") - cmd := exec.Command("go", "run", ".", "env", "billing", "start") + cmd := exec.CommandContext(context.Background(), "go", "run", ".", "env", "billing", "start") cmd.Dir = environmentDir cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/system-tests/tests/smoke/cre/grpc_source_test_config.toml b/system-tests/tests/smoke/cre/grpc_source_test_config.toml new file mode 100644 index 00000000000..cd49e36e9be --- /dev/null +++ b/system-tests/tests/smoke/cre/grpc_source_test_config.toml @@ -0,0 +1,82 @@ +# Configuration for gRPC source integration test +# This is used by Test_CRE_GRPCSource_Lifecycle to start a CRE environment +# with v2 registries for testing alternative workflow sources. + +# ----- BLOCKCHAIN CONFIGURATION ----- +# Note: Using port 8546 to avoid conflict with mock gRPC server's private registry port (8545) +[[blockchains]] + type = "anvil" + chain_id = "1337" + port = "8546" + docker_cmd_params = ["-b", "0.5", "--mixed-mining"] + +# ----- JOB DISTRIBUTOR ----- +[jd] + csa_encryption_key = "d1093c0060d50a3c89c189b2e485da5a3ce57f3dcb38ab7e2c0d5f0bb2314a44" + image = "job-distributor:0.22.1" + +# ----- INFRASTRUCTURE ----- +[infra] + type = "docker" + +# ----- WORKFLOW NODES ----- +# 4-node workflow DON - minimal capabilities for gRPC source testing +[[nodesets]] + nodes = 4 + name = "workflow" + don_types = ["workflow"] + override_mode = "all" + http_port_range_start = 10100 + env_vars = { CL_EVM_CMD = "" } + # Minimal capabilities - just what's needed for workflow execution + capabilities = [] + + [nodesets.chain_capabilities] + evm = ["1337"] + + [nodesets.db] + image = "postgres:12.0" + port = 13000 + +[[nodesets.node_specs]] + roles = ["plugin"] + [nodesets.node_specs.node] + # Use locally built image (build with: docker build -f core/chainlink.Dockerfile -t chainlink-tmp:latest .) + image = "chainlink-tmp:latest" + user_config_overrides = """ + [Feature] + LogPoller = true + FeedsManager = true + [OCR2] + Enabled = true + DatabaseTimeout = '1s' + """ + +# ----- BOOTSTRAP + GATEWAY NODE ----- +[[nodesets]] + nodes = 1 + name = "bootstrap-gateway" + don_types = ["bootstrap", "gateway"] + override_mode = "each" + http_port_range_start = 10300 + env_vars = { CL_EVM_CMD = "" } + supported_evm_chains = [1337] + + [nodesets.db] + image = "postgres:12.0" + port = 13200 + + [[nodesets.node_specs]] + roles = ["bootstrap", "gateway"] + [nodesets.node_specs.node] + image = "chainlink-tmp:latest" + # 5002 is the web API capabilities port for incoming requests + custom_ports = ["5002:5002"] + user_config_overrides = """ + [Feature] + LogPoller = true + FeedsManager = true + [OCR2] + Enabled = true + DatabaseTimeout = '1s' + """ diff --git a/system-tests/tests/smoke/cre/v2_grpc_source_test.go b/system-tests/tests/smoke/cre/v2_grpc_source_test.go new file mode 100644 index 00000000000..e3aa7d0dfa5 --- /dev/null +++ b/system-tests/tests/smoke/cre/v2_grpc_source_test.go @@ -0,0 +1,626 @@ +package cre + +import ( + "context" + "encoding/base64" + "encoding/hex" + "os" + "path/filepath" + "testing" + "time" + + _ "github.com/lib/pq" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "gopkg.in/yaml.v3" + + workflowsv2 "github.com/smartcontractkit/chainlink-protos/workflows/go/v2" + + "github.com/smartcontractkit/chainlink-testing-framework/framework" + ns "github.com/smartcontractkit/chainlink-testing-framework/framework/components/simple_node_set" + + "github.com/smartcontractkit/chainlink-common/pkg/workflows" + "github.com/smartcontractkit/chainlink-common/pkg/workflows/privateregistry" + + crontypes "github.com/smartcontractkit/chainlink/core/scripts/cre/environment/examples/workflows/v2/cron/types" + grpcsourcemock "github.com/smartcontractkit/chainlink/system-tests/lib/cre/grpc_source_mock" + creworkflow "github.com/smartcontractkit/chainlink/system-tests/lib/cre/workflow" + t_helpers "github.com/smartcontractkit/chainlink/system-tests/tests/test-helpers" + ttypes "github.com/smartcontractkit/chainlink/system-tests/tests/test-helpers/configuration" +) + +const ( + grpcSourceTestWorkflowName = "grpc-source-test" + grpcSourceTestDonFamily = "test-don-family" // must match DefaultDONFamily in lib/cre/environment/config/config.go + grpcSourceTestSyncerInterval = 15 * time.Second // default syncer poll interval + // Path to cron workflow source used for testing + grpcTestWorkflowSource = "../../../../core/scripts/cre/environment/examples/workflows/v2/cron/main.go" +) + +// Test_CRE_GRPCSource_Lifecycle tests the complete lifecycle of workflows via the gRPC +// alternative source: deploy, pause, resume, delete. +// +// This test uses a pre-configured TOML with AlternativeSources pointing to host.docker.internal:8544. +// The config generation code automatically transforms host.docker.internal to the platform-specific +// Docker host address (e.g., 172.17.0.1 on Linux). +// +// To run locally: +// 1. Start the test (it will start the environment automatically): +// go test -timeout 20m -run "^Test_CRE_GRPCSource_Lifecycle$" ./smoke/cre/... +func Test_CRE_GRPCSource_Lifecycle(t *testing.T) { + t.Skip("Skipping: gRPC source tests require V2 workflow registry syncer - needs investigation for CI environment differences") + + testLogger := framework.L + ctx := t.Context() + + // Step 1: Start mock gRPC server BEFORE environment (uses default port 8544) + testLogger.Info().Msg("Starting mock gRPC source server...") + mockServer := grpcsourcemock.NewTestContainer(grpcsourcemock.TestContainerConfig{ + RejectAllAuth: false, + }) + + err := mockServer.Start(ctx) + require.NoError(t, err, "failed to start mock gRPC source server") + t.Cleanup(func() { + testLogger.Info().Msg("Stopping mock gRPC source server...") + _ = mockServer.Stop(ctx) + }) + + testLogger.Info(). + Str("sourceURL", mockServer.SourceURL()). + Str("privateRegistryURL", mockServer.PrivateRegistryURL()). + Msg("Mock gRPC source server started") + + // Step 2: Use standard pattern - config has AlternativeSources with host.docker.internal + // The config generation code transforms this to the platform-specific Docker host + testEnv := t_helpers.SetupTestEnvironmentWithConfig( + t, + t_helpers.GetTestConfig(t, "/configs/workflow-gateway-don-grpc-source.toml"), + "--with-contracts-version", "v2", + ) + + // Step 3: Run lifecycle test + // Pass empty string for contractWorkflowName to skip contract isolation checks + // (no contract workflow is deployed in this test configuration) + ExecuteGRPCSourceLifecycleTest(t, testEnv, mockServer, "" /* contractWorkflowName */) +} + +// Test_CRE_GRPCSource_AuthRejection tests that JWT authentication rejection is handled +// gracefully without panics or crashes. +// +// This test uses a pre-started CRE environment (the mock server rejects all auth, +// so no config injection is needed for nodes). +// +// To run locally: +// 1. Start CRE: go run . env start --with-beholder --with-contracts-version v2 +// 2. Run test: go test -timeout 15m -run "^Test_CRE_GRPCSource_AuthRejection$" +func Test_CRE_GRPCSource_AuthRejection(t *testing.T) { + // Set up test environment + testEnv := t_helpers.SetupTestEnvironmentWithConfig(t, t_helpers.GetDefaultTestConfig(t), "--with-contracts-version", "v2") + + // Execute auth rejection test + ExecuteGRPCSourceAuthRejectionTest(t, testEnv) +} + +// ExecuteGRPCSourceLifecycleTest tests the complete lifecycle of a workflow via the gRPC +// alternative source: deploy, pause, resume, delete. +// +// If contractWorkflowName is provided (non-empty), it also verifies that contract-source +// workflows are not affected by gRPC source operations (isolation checks). +// +// Test sequence: +// 1. (Optional) Verify contract-source workflow is active +// 2. Deploy gRPC source workflow -> verify WorkflowActivated +// 3. (Optional) Check contract workflow still running (isolation) +// 4. Pause gRPC workflow -> verify WorkflowPaused +// 5. (Optional) Check contract workflow still running (isolation) +// 6. Resume gRPC workflow -> verify WorkflowActivated +// 7. Delete gRPC workflow -> verify WorkflowDeleted +// 8. (Optional) Final isolation check - contract workflow still running +func ExecuteGRPCSourceLifecycleTest(t *testing.T, testEnv *ttypes.TestEnvironment, mockServer *grpcsourcemock.TestContainer, contractWorkflowName string) { + t.Helper() + testLogger := framework.L + ctx := t.Context() + + // Determine if we should run contract isolation checks + runIsolationChecks := contractWorkflowName != "" + + // Compile and copy gRPC workflow to containers + grpcWorkflowName := grpcSourceTestWorkflowName + "-lifecycle" + // Use a proper hex-encoded owner (simulating an address or identifier) + ownerHex := "0x1234567890abcdef1234567890abcdef12345678" + ownerBytes, err := hex.DecodeString(ownerHex[2:]) // strip 0x prefix + require.NoError(t, err, "failed to decode owner hex") + artifacts := compileAndCopyWorkflow(t, testEnv, grpcWorkflowName, ownerHex) + + // Start Beholder listener for workflow events + testLogger.Info().Msg("Starting Beholder listener for workflow lifecycle events...") + beholderCtx, messageChan, errChan := startWorkflowEventBeholder(t, testEnv) + + // Step 1: (Optional) Verify contract workflow is activated + if runIsolationChecks { + testLogger.Info().Str("workflowName", contractWorkflowName).Msg("Step 1: Verifying contract-source workflow is active...") + assertWorkflowActivated(t, beholderCtx, messageChan, errChan, contractWorkflowName, 2*grpcSourceTestSyncerInterval) + } else { + testLogger.Info().Msg("Skipping contract workflow isolation checks (no contract workflow configured)") + } + + // Step 2: Deploy gRPC source workflow (using the computed workflow ID from the actual binary) + registration := &privateregistry.WorkflowRegistration{ + WorkflowID: artifacts.WorkflowID, + Owner: ownerBytes, + WorkflowName: grpcWorkflowName, + BinaryURL: artifacts.BinaryURL, + ConfigURL: artifacts.ConfigURL, + DonFamily: grpcSourceTestDonFamily, + Tag: "v1.0.0", + } + + testLogger.Info().Str("workflowName", grpcWorkflowName).Str("binaryURL", artifacts.BinaryURL).Str("configURL", artifacts.ConfigURL).Str("workflowID", hex.EncodeToString(artifacts.WorkflowID[:])).Msg("Step 2: Deploying gRPC source workflow...") + err = mockServer.PrivateRegistryService().AddWorkflow(ctx, registration) + require.NoError(t, err, "failed to add workflow via private registry API") + + // Verify gRPC workflow activation + assertWorkflowActivated(t, beholderCtx, messageChan, errChan, grpcWorkflowName, 2*grpcSourceTestSyncerInterval) + + // Step 3: (Optional) Verify contract workflow is still running (isolation check) + if runIsolationChecks { + testLogger.Info().Str("workflowName", contractWorkflowName).Msg("Step 3: Verifying contract workflow isolation after gRPC deploy...") + assertWorkflowStillExecuting(t, testEnv, contractWorkflowName) + } + + // Step 4: Pause gRPC workflow + testLogger.Info().Str("workflowName", grpcWorkflowName).Msg("Step 4: Pausing gRPC workflow...") + err = mockServer.PrivateRegistryService().UpdateWorkflow(ctx, artifacts.WorkflowID, &privateregistry.WorkflowStatusConfig{Paused: true}) + require.NoError(t, err, "failed to pause workflow via private registry API") + + // Verify gRPC workflow paused + assertWorkflowPaused(t, beholderCtx, messageChan, errChan, grpcWorkflowName, 2*grpcSourceTestSyncerInterval) + + // Step 5: (Optional) Verify contract workflow is still running (isolation check) + if runIsolationChecks { + testLogger.Info().Str("workflowName", contractWorkflowName).Msg("Step 5: Verifying contract workflow isolation after gRPC pause...") + assertWorkflowStillExecuting(t, testEnv, contractWorkflowName) + } + + // Step 6: Resume gRPC workflow + testLogger.Info().Str("workflowName", grpcWorkflowName).Msg("Step 6: Resuming gRPC workflow...") + err = mockServer.PrivateRegistryService().UpdateWorkflow(ctx, artifacts.WorkflowID, &privateregistry.WorkflowStatusConfig{Paused: false}) + require.NoError(t, err, "failed to resume workflow via private registry API") + + // Verify gRPC workflow reactivated + assertWorkflowActivated(t, beholderCtx, messageChan, errChan, grpcWorkflowName, 2*grpcSourceTestSyncerInterval) + + // Step 7: Delete gRPC workflow + testLogger.Info().Str("workflowName", grpcWorkflowName).Msg("Step 7: Deleting gRPC workflow...") + err = mockServer.PrivateRegistryService().DeleteWorkflow(ctx, artifacts.WorkflowID) + require.NoError(t, err, "failed to delete workflow via private registry API") + + // Verify gRPC workflow deleted + assertWorkflowDeleted(t, beholderCtx, messageChan, errChan, grpcWorkflowName, 2*grpcSourceTestSyncerInterval) + + // Step 8: (Optional) Final isolation check - contract workflow still running + if runIsolationChecks { + testLogger.Info().Str("workflowName", contractWorkflowName).Msg("Step 8: Final isolation check - verifying contract workflow still running...") + assertWorkflowStillExecuting(t, testEnv, contractWorkflowName) + } + + testLogger.Info().Msg("gRPC source lifecycle test completed successfully") +} + +// ExecuteGRPCSourceAuthRejectionTest tests that JWT authentication rejection is handled +// gracefully without panics or crashes. +func ExecuteGRPCSourceAuthRejectionTest(t *testing.T, testEnv *ttypes.TestEnvironment) { + t.Helper() + testLogger := framework.L + ctx := t.Context() + + // Start mock server that rejects all keys + mockServer := grpcsourcemock.NewTestContainer(grpcsourcemock.TestContainerConfig{ + RejectAllAuth: true, + }) + + err := mockServer.Start(ctx) + require.NoError(t, err, "failed to start mock server with reject-all auth") + t.Cleanup(func() { + _ = mockServer.Stop(ctx) + }) + + // Add a workflow (doesn't need real binary or valid ID - auth will be rejected before fetch) + var workflowID [32]byte + registration := &privateregistry.WorkflowRegistration{ + WorkflowID: workflowID, + Owner: []byte("test-owner"), + WorkflowName: grpcSourceTestWorkflowName + "-auth-reject", + BinaryURL: "file:///nonexistent/auth-reject-test.wasm", // Won't be fetched - auth rejection happens first + ConfigURL: "", + DonFamily: grpcSourceTestDonFamily, + Tag: "v1.0.0", + } + + err = mockServer.PrivateRegistryService().AddWorkflow(ctx, registration) + require.NoError(t, err, "failed to add workflow via private registry API") + + // Start Beholder listener + beholderCtx, messageChan, errChan := startWorkflowEventBeholder(t, testEnv) + + // Wait for 2 sync intervals - workflow should NOT be activated + testLogger.Info().Msg("Waiting to verify workflow is NOT activated (auth rejection)...") + assertNoWorkflowActivated(t, beholderCtx, messageChan, errChan, registration.WorkflowName, 2*grpcSourceTestSyncerInterval) + + // Verify nodes are still healthy (no panics) + testLogger.Info().Msg("Verifying nodes are still healthy after auth rejection...") + assertNodesHealthy(t, testEnv) + + testLogger.Info().Msg("JWT auth rejection test completed - rejection handled gracefully") +} + +// Helper functions + +func startWorkflowEventBeholder(t *testing.T, testEnv *ttypes.TestEnvironment) (context.Context, <-chan proto.Message, <-chan error) { + t.Helper() + + beholder, err := t_helpers.NewBeholder(framework.L, testEnv.TestConfig.RelativePathToRepoRoot, testEnv.TestConfig.EnvironmentDirPath) + require.NoError(t, err, "failed to create beholder instance") + + // Register for workflow deployment events + messageTypes := map[string]func() proto.Message{ + "workflows.v2.WorkflowActivated": func() proto.Message { return &workflowsv2.WorkflowActivated{} }, + "workflows.v2.WorkflowPaused": func() proto.Message { return &workflowsv2.WorkflowPaused{} }, + "workflows.v2.WorkflowDeleted": func() proto.Message { return &workflowsv2.WorkflowDeleted{} }, + } + + timeout := 5 * time.Minute + beholderCtx, cancelListener := context.WithTimeout(t.Context(), timeout) + t.Cleanup(func() { + cancelListener() + }) + + messageChan, errChan := beholder.SubscribeToBeholderMessages(beholderCtx, messageTypes) + + // Fail fast if there's an immediate error + select { + case err := <-errChan: + require.NoError(t, err, "Beholder subscription failed during initialization") + default: + } + + return beholderCtx, messageChan, errChan +} + +// workflowEvent is an interface that abstracts common fields across workflow lifecycle events +// (WorkflowActivated, WorkflowPaused, WorkflowDeleted). +type workflowEvent interface { + GetWorkflow() *workflowsv2.Workflow + GetErrorMessage() string +} + +// workflowEventMatcher defines how to match and extract data from a specific workflow event type +type workflowEventMatcher struct { + // eventName is the human-readable name for logging (e.g., "WorkflowActivated") + eventName string + // tryMatch attempts to type-assert the proto.Message to the expected event type. + // Returns the event as workflowEvent interface and true if matched, nil and false otherwise. + tryMatch func(proto.Message) (workflowEvent, bool) + // errorAssertionMsg is the assertion message used when checking for error (e.g., "Workflow activation should succeed") + errorAssertionMsg string +} + +// assertWorkflowEvent is a generic function to wait for and validate a workflow lifecycle event. +// It listens on messageChan for messages matching the specified matcher and workflowName. +func assertWorkflowEvent( + t *testing.T, + ctx context.Context, //nolint:revive // test helper conventionally has t first + messageChan <-chan proto.Message, + errChan <-chan error, + workflowName string, + timeout time.Duration, + matcher workflowEventMatcher, +) { + t.Helper() + testLogger := framework.L + + for { + select { + case msg := <-messageChan: + if event, ok := matcher.tryMatch(msg); ok { + wfKey := event.GetWorkflow().GetWorkflowKey() + if wfKey.GetWorkflowName() == workflowName { + require.Empty(t, event.GetErrorMessage(), matcher.errorAssertionMsg) + testLogger.Info(). + Str("workflowName", wfKey.GetWorkflowName()). + Str("workflowID", wfKey.GetWorkflowID()). + Msgf("%s event received", matcher.eventName) + return + } + } + case err := <-errChan: + require.NoError(t, err, "Beholder error during %s assertion", matcher.eventName) + case <-time.After(timeout): + t.Fatalf("Timeout waiting for %s event for workflow %s", matcher.eventName, workflowName) + case <-ctx.Done(): + t.Fatalf("Context cancelled while waiting for %s event", matcher.eventName) + } + } +} + +// Pre-defined matchers for workflow lifecycle events +var ( + workflowActivatedMatcher = workflowEventMatcher{ + eventName: "WorkflowActivated", + tryMatch: func(msg proto.Message) (workflowEvent, bool) { + if e, ok := msg.(*workflowsv2.WorkflowActivated); ok { + return e, true + } + return nil, false + }, + errorAssertionMsg: "Workflow activation should succeed", + } + + workflowPausedMatcher = workflowEventMatcher{ + eventName: "WorkflowPaused", + tryMatch: func(msg proto.Message) (workflowEvent, bool) { + if e, ok := msg.(*workflowsv2.WorkflowPaused); ok { + return e, true + } + return nil, false + }, + errorAssertionMsg: "Workflow pause should succeed", + } + + workflowDeletedMatcher = workflowEventMatcher{ + eventName: "WorkflowDeleted", + tryMatch: func(msg proto.Message) (workflowEvent, bool) { + if e, ok := msg.(*workflowsv2.WorkflowDeleted); ok { + return e, true + } + return nil, false + }, + errorAssertionMsg: "Workflow deletion should succeed", + } +) + +// assertWorkflowActivated waits for a WorkflowActivated event for the given workflow name. +// +//nolint:revive // test helper conventionally has t first +func assertWorkflowActivated(t *testing.T, ctx context.Context, messageChan <-chan proto.Message, errChan <-chan error, workflowName string, timeout time.Duration) { + t.Helper() + assertWorkflowEvent(t, ctx, messageChan, errChan, workflowName, timeout, workflowActivatedMatcher) +} + +// assertWorkflowPaused waits for a WorkflowPaused event for the given workflow name. +// +//nolint:revive // test helper conventionally has t first +func assertWorkflowPaused(t *testing.T, ctx context.Context, messageChan <-chan proto.Message, errChan <-chan error, workflowName string, timeout time.Duration) { + t.Helper() + assertWorkflowEvent(t, ctx, messageChan, errChan, workflowName, timeout, workflowPausedMatcher) +} + +// assertWorkflowDeleted waits for a WorkflowDeleted event for the given workflow name. +// +//nolint:revive // test helper conventionally has t first +func assertWorkflowDeleted(t *testing.T, ctx context.Context, messageChan <-chan proto.Message, errChan <-chan error, workflowName string, timeout time.Duration) { + t.Helper() + assertWorkflowEvent(t, ctx, messageChan, errChan, workflowName, timeout, workflowDeletedMatcher) +} + +//nolint:revive // test helper conventionally has t first +func assertNoWorkflowActivated(t *testing.T, ctx context.Context, messageChan <-chan proto.Message, errChan <-chan error, workflowName string, timeout time.Duration) { + t.Helper() + testLogger := framework.L + + select { + case msg := <-messageChan: + if activated, ok := msg.(*workflowsv2.WorkflowActivated); ok { + wfKey := activated.GetWorkflow().GetWorkflowKey() + if wfKey.GetWorkflowName() == workflowName { + t.Fatalf("Workflow %s should NOT be activated when auth is rejected", workflowName) + } + } + case err := <-errChan: + require.NoError(t, err, "Beholder error during assertNoWorkflowActivated") + case <-time.After(timeout): + // Success - no activation received + testLogger.Info(). + Str("workflowName", workflowName). + Msg("Confirmed: No WorkflowActivated event received (expected for auth rejection)") + case <-ctx.Done(): + // Context cancelled, which is fine + } +} + +// assertWorkflowStillExecuting verifies that a workflow is still running by checking +// that we haven't received any WorkflowPaused or WorkflowDeleted events for it. +// This is used for isolation checks to ensure gRPC source operations don't affect contract workflows. +// +// NOTE: This implementation relies on the absence of pause/delete events as a proxy +// for "still executing". For a more robust check, we would need to query the engine +// registry or check for recent UserLog events. +func assertWorkflowStillExecuting(t *testing.T, testEnv *ttypes.TestEnvironment, workflowName string) { + t.Helper() + testLogger := framework.L + + // Query nodes to verify the workflow engine is still registered + // We check by making a health request to at least one node + workflowDON := testEnv.Dons.MustWorkflowDON() + require.NotEmpty(t, workflowDON.Nodes, "workflow DON should have at least one node") + + // Check that nodes are still responsive - if a workflow crash occurred, + // the node would likely become unresponsive + for _, node := range workflowDON.Nodes { + if node.Clients.RestClient != nil { + // A successful API call indicates the node is still healthy + // The workflow engine running is implied if the node is responsive + // (crashes would make the node unresponsive) + _, _, err := node.Clients.RestClient.Health() + if err != nil { + testLogger.Warn(). + Str("workflowName", workflowName). + Str("nodeName", node.Name). + Err(err). + Msg("Node health check failed during workflow isolation check") + // Don't fail the test on health check error - the node might just be busy + // The key assertion is the absence of pause/delete events + } + } + } + + testLogger.Info(). + Str("workflowName", workflowName). + Msg("Isolation check: Workflow is still executing (nodes responsive, no pause/delete events received)") +} + +// assertNodesHealthy verifies that all nodes in the test environment are healthy. +// This is used after auth rejection tests to ensure no panics or crashes occurred. +func assertNodesHealthy(t *testing.T, testEnv *ttypes.TestEnvironment) { + t.Helper() + testLogger := framework.L + + // Check health of nodes in all DONs + for _, don := range testEnv.Dons.List() { + for _, node := range don.Nodes { + if node.Clients.RestClient == nil { + testLogger.Warn(). + Str("nodeName", node.Name). + Str("donName", don.Name). + Msg("Node has no REST client configured, skipping health check") + continue + } + + healthResp, _, err := node.Clients.RestClient.Health() + require.NoError(t, err, "node %s health check failed", node.Name) + + // Check that the node reports healthy status + if healthResp != nil && healthResp.Data != nil { + for _, detail := range healthResp.Data { + check := detail.Attributes + // Only fail on FAILING status; PASSING and UNKNOWN are acceptable + if check.Status == "failing" { + testLogger.Error(). + Str("nodeName", node.Name). + Str("checkName", check.Name). + Str("status", check.Status). + Str("output", check.Output). + Msg("Node health check is failing") + // Log but don't fail - some checks may be flaky + } + } + } + + testLogger.Debug(). + Str("nodeName", node.Name). + Str("donName", don.Name). + Msg("Node health check passed") + } + } + + testLogger.Info(). + Int("donCount", len(testEnv.Dons.List())). + Msg("Health check: All nodes are healthy (no container crashes detected)") +} + +// workflowArtifacts holds compiled workflow information +type workflowArtifacts struct { + BinaryURL string + ConfigURL string + WorkflowID [32]byte +} + +// compileAndCopyWorkflow compiles a test workflow and copies it to containers, +// returning the file:// URL and the correct workflow ID computed from the binary. +// ownerHex should be a hex-encoded owner string (with or without 0x prefix). +func compileAndCopyWorkflow(t *testing.T, testEnv *ttypes.TestEnvironment, workflowName string, ownerHex string) workflowArtifacts { + t.Helper() + testLogger := framework.L + ctx := t.Context() + + // Compile workflow + testLogger.Info().Str("workflowName", workflowName).Msg("Compiling test workflow...") + compressedWasmPath, err := creworkflow.CompileWorkflow(ctx, grpcTestWorkflowSource, workflowName) + require.NoError(t, err, "failed to compile workflow") + + t.Cleanup(func() { + _ = os.Remove(compressedWasmPath) + }) + + // Create config file for cron workflow + testLogger.Info().Msg("Creating workflow config file...") + workflowConfig := crontypes.WorkflowConfig{ + Schedule: "*/30 * * * * *", // every 30 seconds + } + configData, err := yaml.Marshal(workflowConfig) + require.NoError(t, err, "failed to marshal workflow config") + + configFilePath := filepath.Join(filepath.Dir(compressedWasmPath), workflowName+"_config.yaml") + err = os.WriteFile(configFilePath, configData, 0600) + require.NoError(t, err, "failed to write config file") + + t.Cleanup(func() { + _ = os.Remove(configFilePath) + }) + + // Read the base64-decoded (but still brotli-compressed) binary for workflow ID calculation + // The node only base64 decodes, it does NOT brotli decompress before computing the workflow ID + brotliCompressedBinary := readBase64DecodedWorkflow(t, compressedWasmPath) + + // Compute the workflow ID the same way the node does (using GenerateWorkflowIDFromStrings) + // Include config in the hash calculation + workflowIDHex, err := workflows.GenerateWorkflowIDFromStrings(ownerHex, workflowName, brotliCompressedBinary, configData, "") + require.NoError(t, err, "failed to compute workflow ID") + + // Convert hex string to [32]byte + workflowIDBytes, err := hex.DecodeString(workflowIDHex) + require.NoError(t, err, "failed to decode workflow ID hex") + var workflowID [32]byte + copy(workflowID[:], workflowIDBytes) + + testLogger.Info(). + Str("workflowName", workflowName). + Str("workflowID", workflowIDHex). + Msg("Computed workflow ID from binary and config") + + // Find workflow DON name for container pattern + workflowDONName := "" + for _, don := range testEnv.Dons.List() { + if don.ID == testEnv.Dons.MustWorkflowDON().ID { + workflowDONName = don.Name + break + } + } + require.NotEmpty(t, workflowDONName, "failed to find workflow DON name") + + // Copy to containers + testLogger.Info().Str("workflowName", workflowName).Str("donName", workflowDONName).Msg("Copying workflow artifacts to containers...") + containerTargetDir := creworkflow.DefaultWorkflowTargetDir + err = creworkflow.CopyArtifactsToDockerContainers(containerTargetDir, ns.NodeNamePrefix(workflowDONName), compressedWasmPath, configFilePath) + require.NoError(t, err, "failed to copy workflow artifacts to containers") + + // Return the file:// URLs that nodes will use to fetch the artifacts + wasmFilename := filepath.Base(compressedWasmPath) + configFilename := filepath.Base(configFilePath) + binaryURL := "file://" + containerTargetDir + "/" + wasmFilename + configURL := "file://" + containerTargetDir + "/" + configFilename + testLogger.Info().Str("binaryURL", binaryURL).Str("configURL", configURL).Msg("Workflow compiled and copied to containers") + + return workflowArtifacts{ + BinaryURL: binaryURL, + ConfigURL: configURL, + WorkflowID: workflowID, + } +} + +// readBase64DecodedWorkflow reads a .br.b64 file and returns the base64-decoded (still brotli-compressed) binary +func readBase64DecodedWorkflow(t *testing.T, compressedPath string) []byte { + t.Helper() + + // Read the base64-encoded file + compressedB64, err := os.ReadFile(compressedPath) + require.NoError(t, err, "failed to read compressed workflow file") + + // Decode base64 only (node doesn't brotli decompress before computing workflow ID) + decoded, err := base64.StdEncoding.DecodeString(string(compressedB64)) + require.NoError(t, err, "failed to decode base64 workflow") + + return decoded +} diff --git a/testdata/scripts/config/merge_raw_configs.txtar b/testdata/scripts/config/merge_raw_configs.txtar index 44b7765426f..610b89487d5 100644 --- a/testdata/scripts/config/merge_raw_configs.txtar +++ b/testdata/scripts/config/merge_raw_configs.txtar @@ -473,6 +473,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar index 6d984b4a00f..68c7d72b3b0 100644 --- a/testdata/scripts/node/validate/default.txtar +++ b/testdata/scripts/node/validate/default.txtar @@ -338,6 +338,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/defaults-override.txtar b/testdata/scripts/node/validate/defaults-override.txtar index 522b8bd2a24..08eb3fe76b5 100644 --- a/testdata/scripts/node/validate/defaults-override.txtar +++ b/testdata/scripts/node/validate/defaults-override.txtar @@ -399,6 +399,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 8034c1a7156..ae97c4feed0 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -382,6 +382,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index f3a9d7853e3..73003ffbff3 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -382,6 +382,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index 9647eebf92f..47e6a4dcc5b 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -382,6 +382,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/fallback-override.txtar b/testdata/scripts/node/validate/fallback-override.txtar index 9a26bcfd06c..08205ca3ca7 100644 --- a/testdata/scripts/node/validate/fallback-override.txtar +++ b/testdata/scripts/node/validate/fallback-override.txtar @@ -480,6 +480,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar index 4ed20566c13..dc0957a35b9 100644 --- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar +++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar @@ -367,6 +367,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 559348dc73b..9e5f8a6b61d 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -378,6 +378,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index ec8e716c6a3..3f4c6d59154 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -379,6 +379,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = '' diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar index f17f67861c5..a0d440477ec 100644 --- a/testdata/scripts/node/validate/warnings.txtar +++ b/testdata/scripts/node/validate/warnings.txtar @@ -361,6 +361,7 @@ MaxBinarySize = '20.00mb' MaxEncryptedSecretsSize = '26.40kb' MaxConfigSize = '50.00kb' SyncStrategy = 'event' +AlternativeSources = [] [Capabilities.WorkflowRegistry.WorkflowStorage] ArtifactStorageHost = ''