diff --git a/pkg/chain/ethereum/tbtc.go b/pkg/chain/ethereum/tbtc.go index 1f750abaec..d28151ffce 100644 --- a/pkg/chain/ethereum/tbtc.go +++ b/pkg/chain/ethereum/tbtc.go @@ -1469,13 +1469,22 @@ func (tc *TbtcChain) GetWallet( ) } + // Fetch wallet registry data on a best-effort basis. Legacy callers + // only use Bridge-sourced fields and never access MembersIDsHash, so a + // registry outage must not block them. The zero value signals that + // registry data is unavailable; downstream consumers that need it + // (e.g. signer_approval_certificate) already guard against this. + var membersIDsHash [32]byte + walletRegistryWallet, err := tc.walletRegistry.GetWallet(wallet.EcdsaWalletID) if err != nil { - return nil, fmt.Errorf( + logger.Warnf( "cannot get wallet registry data for wallet [0x%x]: [%v]", wallet.EcdsaWalletID, err, ) + } else { + membersIDsHash = walletRegistryWallet.MembersIdsHash } walletState, err := parseWalletState(wallet.State) @@ -1483,9 +1492,17 @@ func (tc *TbtcChain) GetWallet( return nil, fmt.Errorf("cannot parse wallet state: [%v]", err) } + return makeWalletChainData(wallet, membersIDsHash, walletState), nil +} + +func makeWalletChainData( + wallet tbtcabi.WalletsWallet, + membersIDsHash [32]byte, + walletState tbtc.WalletState, +) *tbtc.WalletChainData { return &tbtc.WalletChainData{ EcdsaWalletID: wallet.EcdsaWalletID, - MembersIDsHash: walletRegistryWallet.MembersIdsHash, + MembersIDsHash: membersIDsHash, MainUtxoHash: wallet.MainUtxoHash, PendingRedemptionsValue: wallet.PendingRedemptionsValue, CreatedAt: time.Unix(int64(wallet.CreatedAt), 0), @@ -1494,7 +1511,7 @@ func (tc *TbtcChain) GetWallet( PendingMovedFundsSweepRequestsCount: wallet.PendingMovedFundsSweepRequestsCount, State: walletState, MovingFundsTargetWalletsCommitmentHash: wallet.MovingFundsTargetWalletsCommitmentHash, - }, nil + } } func (tc *TbtcChain) OnWalletClosed( diff --git a/pkg/chain/ethereum/tbtc_test.go b/pkg/chain/ethereum/tbtc_test.go index e6c77914e3..7a9312e9c7 100644 --- a/pkg/chain/ethereum/tbtc_test.go +++ b/pkg/chain/ethereum/tbtc_test.go @@ -18,8 +18,10 @@ import ( commonEthereum "github.com/keep-network/keep-common/pkg/chain/ethereum" "github.com/keep-network/keep-core/internal/testutils" + tbtcabi "github.com/keep-network/keep-core/pkg/chain/ethereum/tbtc/gen/abi" "github.com/keep-network/keep-core/pkg/chain/local_v1" "github.com/keep-network/keep-core/pkg/protocol/group" + tbtcpkg "github.com/keep-network/keep-core/pkg/tbtc" ) func TestComputeOperatorsIDsHash(t *testing.T) { @@ -132,6 +134,68 @@ func TestConvertSignaturesToChainFormat(t *testing.T) { } } +func TestMakeWalletChainDataPreservesBridgeFieldsWhenRegistryDataUnavailable(t *testing.T) { + bridgeWallet := tbtcabi.WalletsWallet{ + EcdsaWalletID: [32]byte{0xaa}, + MainUtxoHash: [32]byte{0xbb}, + PendingRedemptionsValue: 12345, + CreatedAt: 1700000000, + MovingFundsRequestedAt: 1700000100, + ClosingStartedAt: 1700000200, + PendingMovedFundsSweepRequestsCount: 7, + MovingFundsTargetWalletsCommitmentHash: [32]byte{0xcc}, + } + + walletChainData := makeWalletChainData( + bridgeWallet, + [32]byte{}, + tbtcpkg.StateLive, + ) + + if walletChainData.MembersIDsHash != ([32]byte{}) { + t.Fatalf("expected zero members IDs hash, got [0x%x]", walletChainData.MembersIDsHash) + } + if walletChainData.EcdsaWalletID != bridgeWallet.EcdsaWalletID { + t.Fatalf("expected wallet ID [0x%x], got [0x%x]", bridgeWallet.EcdsaWalletID, walletChainData.EcdsaWalletID) + } + if walletChainData.MainUtxoHash != bridgeWallet.MainUtxoHash { + t.Fatalf("expected main UTXO hash [0x%x], got [0x%x]", bridgeWallet.MainUtxoHash, walletChainData.MainUtxoHash) + } + if walletChainData.PendingRedemptionsValue != bridgeWallet.PendingRedemptionsValue { + t.Fatalf( + "expected pending redemptions value [%v], got [%v]", + bridgeWallet.PendingRedemptionsValue, + walletChainData.PendingRedemptionsValue, + ) + } + if walletChainData.State != tbtcpkg.StateLive { + t.Fatalf("expected wallet state [%v], got [%v]", tbtcpkg.StateLive, walletChainData.State) + } +} + +func TestMakeWalletChainDataUsesWalletRegistryMembersIDsHashWhenAvailable(t *testing.T) { + membersIDsHash := [32]byte{0xdd} + + walletChainData := makeWalletChainData( + tbtcabi.WalletsWallet{ + EcdsaWalletID: [32]byte{0xee}, + }, + membersIDsHash, + tbtcpkg.StateMovingFunds, + ) + + if walletChainData.MembersIDsHash != membersIDsHash { + t.Fatalf("expected members IDs hash [0x%x], got [0x%x]", membersIDsHash, walletChainData.MembersIDsHash) + } + if walletChainData.State != tbtcpkg.StateMovingFunds { + t.Fatalf( + "expected wallet state [%v], got [%v]", + tbtcpkg.StateMovingFunds, + walletChainData.State, + ) + } +} + func TestConvertPubKeyToChainFormat(t *testing.T) { bytes30 := []byte{229, 19, 136, 216, 125, 157, 135, 142, 67, 130, 136, 13, 76, 188, 32, 218, 243, 134, 95, 73, 155, 24, 38, 73, 117, 90, diff --git a/pkg/covenantsigner/covenantsigner_test.go b/pkg/covenantsigner/covenantsigner_test.go index 78c6556dc3..e6c32830bf 100644 --- a/pkg/covenantsigner/covenantsigner_test.go +++ b/pkg/covenantsigner/covenantsigner_test.go @@ -5,9 +5,9 @@ import ( "context" "crypto/ecdsa" "crypto/ed25519" - "crypto/sha256" "crypto/elliptic" "crypto/rand" + "crypto/sha256" "crypto/x509" "encoding/hex" "encoding/json" @@ -104,6 +104,54 @@ func (fmh *faultingMemoryHandle) Delete(directory string, name string) error { return fmh.memoryHandle.Delete(directory, name) } +// faultingDescriptor wraps a memoryDescriptor and returns an injected error +// from Content(), allowing tests to simulate unreadable job files. +type faultingDescriptor struct { + name string + directory string + err error +} + +func (fd *faultingDescriptor) Name() string { return fd.name } +func (fd *faultingDescriptor) Directory() string { return fd.directory } +func (fd *faultingDescriptor) Content() ([]byte, error) { return nil, fd.err } + +// contentFaultingHandle extends memoryHandle by injecting faulting descriptors +// into the ReadAll channel alongside normal descriptors. This enables testing +// of load() behavior when individual file reads fail. +type contentFaultingHandle struct { + *memoryHandle + faultingDescriptors []*faultingDescriptor +} + +func newContentFaultingHandle() *contentFaultingHandle { + return &contentFaultingHandle{ + memoryHandle: newMemoryHandle(), + } +} + +func (cfh *contentFaultingHandle) AddFaultingDescriptor(name, directory string, err error) { + cfh.faultingDescriptors = append(cfh.faultingDescriptors, &faultingDescriptor{ + name: name, + directory: directory, + err: err, + }) +} + +func (cfh *contentFaultingHandle) ReadAll() (<-chan persistence.DataDescriptor, <-chan error) { + dataChan := make(chan persistence.DataDescriptor, len(cfh.items)+len(cfh.faultingDescriptors)) + errorChan := make(chan error) + for _, item := range cfh.items { + dataChan <- item + } + for _, fd := range cfh.faultingDescriptors { + dataChan <- fd + } + close(dataChan) + close(errorChan) + return dataChan, errorChan +} + type scriptedEngine struct { submit func(*Job) (*Transition, error) poll func(*Job) (*Transition, error) diff --git a/pkg/covenantsigner/store.go b/pkg/covenantsigner/store.go index fa8e74db29..a2c974e443 100644 --- a/pkg/covenantsigner/store.go +++ b/pkg/covenantsigner/store.go @@ -2,6 +2,7 @@ package covenantsigner import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -45,7 +46,9 @@ func NewStore(handle persistence.BasicHandle, dataDir string) (*Store, error) { if err := store.load(); err != nil { // Release the lock if loading fails after successful acquisition. - store.Close() + if closeErr := store.Close(); closeErr != nil { + return nil, errors.Join(err, fmt.Errorf("failed to release store lock: %w", closeErr)) + } return nil, err } @@ -57,8 +60,17 @@ func NewStore(handle persistence.BasicHandle, dataDir string) (*Store, error) { // kept open for the lifetime of the lock; closing it releases the lock. func acquireFileLock(dataDir string) (*os.File, error) { lockPath := filepath.Join(dataDir, jobsDirectory, lockFileName) + root, err := os.OpenRoot(dataDir) + if err != nil { + return nil, fmt.Errorf("cannot open data directory root [%s]: %w", dataDir, err) + } + defer func() { + if closeErr := root.Close(); closeErr != nil { + logger.Warnf("failed to close store root [%s]: [%v]", dataDir, closeErr) + } + }() - if err := os.MkdirAll(filepath.Dir(lockPath), 0700); err != nil { + if err := root.MkdirAll(jobsDirectory, 0700); err != nil { return nil, fmt.Errorf( "cannot create lock directory [%s]: %w", filepath.Dir(lockPath), @@ -66,7 +78,7 @@ func acquireFileLock(dataDir string) (*os.File, error) { ) } - lockFile, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0600) + lockFile, err := root.OpenFile(filepath.Join(jobsDirectory, lockFileName), os.O_CREATE|os.O_RDWR, 0600) if err != nil { return nil, fmt.Errorf( "cannot open lock file [%s]: %w", @@ -79,7 +91,9 @@ func acquireFileLock(dataDir string) (*os.File, error) { int(lockFile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB, ); err != nil { - lockFile.Close() + if closeErr := lockFile.Close(); closeErr != nil { + err = errors.Join(err, fmt.Errorf("failed to close lock file [%s]: %w", lockPath, closeErr)) + } return nil, fmt.Errorf( "cannot acquire exclusive lock on [%s]: "+ "another process may already own the store: %w", @@ -169,30 +183,62 @@ func (s *Store) load() error { content, err := descriptor.Content() if err != nil { - return err + return fmt.Errorf( + "cannot read persisted covenant signer job file [%s]: %w", + descriptor.Name(), + err, + ) } job := &Job{} if err := json.Unmarshal(content, job); err != nil { - return err + return fmt.Errorf( + "cannot parse persisted covenant signer job file [%s]: %w", + descriptor.Name(), + err, + ) } - existingID, ok := s.byRouteKey[routeKey(job.Route, job.RouteRequestID)] - if ok { - existing := s.byRequestID[existingID] - if existing != nil { + key := routeKey(job.Route, job.RouteRequestID) + + if existingID, ok := s.byRouteKey[key]; ok { + if existing := s.byRequestID[existingID]; existing != nil { existingIsNewerOrSame, err := isNewerOrSameJobRevision(existing, job) if err != nil { - return err - } - if existingIsNewerOrSame { + // When the timestamp comparison fails, prefer + // whichever job has a parseable timestamp. If the + // candidate's timestamp is valid, the failure is on + // the existing job -- replace it. Otherwise skip the + // candidate. + if _, parseErr := time.Parse(time.RFC3339Nano, job.UpdatedAt); parseErr != nil { + logger.Warnf( + "skipping job [%s] with invalid timestamp on duplicate route key [%s/%s]: [%v]", + job.RequestID, + job.Route, + job.RouteRequestID, + err, + ) + continue + } + logger.Warnf( + "replacing job [%s] with invalid timestamp on duplicate route key [%s/%s]: [%v]", + existing.RequestID, + job.Route, + job.RouteRequestID, + err, + ) + } else if existingIsNewerOrSame { continue } } + + if existingID != job.RequestID { + delete(s.byRequestID, existingID) + } } s.byRequestID[job.RequestID] = job - s.byRouteKey[routeKey(job.Route, job.RouteRequestID)] = job.RequestID + s.byRouteKey[key] = job.RequestID case err, ok := <-errorChan: if !ok { errorChan = nil diff --git a/pkg/covenantsigner/store_test.go b/pkg/covenantsigner/store_test.go index 9f6dc3cbad..3dac6dd65a 100644 --- a/pkg/covenantsigner/store_test.go +++ b/pkg/covenantsigner/store_test.go @@ -203,9 +203,15 @@ func TestStoreLoadSelectsNewestJobForDuplicateRouteKeys(t *testing.T) { if loaded.RequestID != newJob.RequestID { t.Fatalf("expected newest request ID %s, got %s", newJob.RequestID, loaded.RequestID) } + + if _, ok, err := store.GetByRequestID(oldJob.RequestID); err != nil { + t.Fatal(err) + } else if ok { + t.Fatalf("expected superseded request ID %s to be removed", oldJob.RequestID) + } } -func TestStoreLoadFailsOnInvalidUpdatedAtForDuplicateRouteKeys(t *testing.T) { +func TestStoreLoadKeepsBestAvailableJobWhenDuplicateUpdatedAtInvalid(t *testing.T) { handle := newMemoryHandle() first := &Job{ @@ -251,12 +257,176 @@ func TestStoreLoadFailsOnInvalidUpdatedAtForDuplicateRouteKeys(t *testing.T) { t.Fatal(err) } + store, err := NewStore(handle, "") + if err != nil { + t.Fatalf( + "expected store to load despite invalid timestamp on duplicate route key, got error: %v", + err, + ) + } + + loaded, ok, err := store.GetByRouteRequest(TemplateSelfV1, "ors_load_invalid_updated_at") + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("expected valid job to be loaded despite invalid-timestamp sibling") + } + if loaded.RequestID != first.RequestID { + t.Fatalf("expected request ID %s, got %s", first.RequestID, loaded.RequestID) + } + + if _, ok, err := store.GetByRequestID(second.RequestID); err != nil { + t.Fatal(err) + } else if ok { + t.Fatalf("expected invalid duplicate request ID %s to be removed", second.RequestID) + } +} + +func TestStoreLoadRejectsUnreadablePersistedJobFile(t *testing.T) { + handle := newContentFaultingHandle() + + validJob := &Job{ + RequestID: "kcs_self_valid_readable", + RouteRequestID: "ors_readable", + Route: TemplateSelfV1, + IdempotencyKey: "idem_readable", + FacadeRequestID: "rf_readable", + RequestDigest: "0xaaa", + State: JobStatePending, + Detail: "queued", + CreatedAt: "2026-03-09T00:00:00Z", + UpdatedAt: "2026-03-09T00:00:00Z", + Request: baseRequest(TemplateSelfV1), + } + + payload, err := json.Marshal(validJob) + if err != nil { + t.Fatal(err) + } + if err := handle.Save(payload, jobsDirectory, validJob.RequestID+".json"); err != nil { + t.Fatal(err) + } + + handle.AddFaultingDescriptor( + "corrupted_file.json", + jobsDirectory, + errors.New("simulated disk read error"), + ) + _, err = NewStore(handle, "") if err == nil { - t.Fatal("expected invalid UpdatedAt error") + t.Fatal("expected unreadable persisted job file to fail store load") } - if !strings.Contains(err.Error(), "cannot parse candidate job updatedAt") && - !strings.Contains(err.Error(), "cannot parse existing job updatedAt") { + if !strings.Contains(err.Error(), "cannot read persisted covenant signer job file [corrupted_file.json]") { t.Fatalf("unexpected error: %v", err) } } + +func TestStoreLoadRejectsMalformedPersistedJobFile(t *testing.T) { + handle := newMemoryHandle() + + validJob := &Job{ + RequestID: "kcs_self_valid_json", + RouteRequestID: "ors_valid_json", + Route: TemplateSelfV1, + IdempotencyKey: "idem_valid_json", + FacadeRequestID: "rf_valid_json", + RequestDigest: "0xbbb", + State: JobStatePending, + Detail: "queued", + CreatedAt: "2026-03-09T00:00:00Z", + UpdatedAt: "2026-03-09T00:00:00Z", + Request: baseRequest(TemplateSelfV1), + } + + validPayload, err := json.Marshal(validJob) + if err != nil { + t.Fatal(err) + } + if err := handle.Save(validPayload, jobsDirectory, validJob.RequestID+".json"); err != nil { + t.Fatal(err) + } + + if err := handle.Save([]byte("not valid json content"), jobsDirectory, "malformed.json"); err != nil { + t.Fatal(err) + } + + _, err = NewStore(handle, "") + if err == nil { + t.Fatal("expected malformed persisted job file to fail store load") + } + if !strings.Contains(err.Error(), "cannot parse persisted covenant signer job file [malformed.json]") { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestStoreLoadKeepsNewestValidTimestampJobWhenDuplicateTimestampInvalid(t *testing.T) { + handle := newMemoryHandle() + + validJob := &Job{ + RequestID: "kcs_self_valid_ts", + RouteRequestID: "ors_ts_dupe", + Route: TemplateSelfV1, + IdempotencyKey: "idem_valid_ts", + FacadeRequestID: "rf_valid_ts", + RequestDigest: "0xccc", + State: JobStatePending, + Detail: "queued", + CreatedAt: "2026-03-09T00:00:00Z", + UpdatedAt: "2026-03-09T00:00:00Z", + Request: baseRequest(TemplateSelfV1), + } + + badTimestampJob := &Job{ + RequestID: "kcs_self_bad_ts", + RouteRequestID: "ors_ts_dupe", + Route: TemplateSelfV1, + IdempotencyKey: "idem_bad_ts", + FacadeRequestID: "rf_bad_ts", + RequestDigest: "0xddd", + State: JobStatePending, + Detail: "queued", + CreatedAt: "2026-03-10T00:00:00Z", + UpdatedAt: "invalid-timestamp", + Request: baseRequest(TemplateSelfV1), + } + + validPayload, err := json.Marshal(validJob) + if err != nil { + t.Fatal(err) + } + badPayload, err := json.Marshal(badTimestampJob) + if err != nil { + t.Fatal(err) + } + + if err := handle.Save(validPayload, jobsDirectory, validJob.RequestID+".json"); err != nil { + t.Fatal(err) + } + if err := handle.Save(badPayload, jobsDirectory, badTimestampJob.RequestID+".json"); err != nil { + t.Fatal(err) + } + + store, err := NewStore(handle, "") + if err != nil { + t.Fatalf("expected store to load despite invalid timestamp on duplicate route key, got error: %v", err) + } + + loaded, ok, err := store.GetByRequestID("kcs_self_valid_ts") + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("expected valid job to be accessible after skipping bad-timestamp sibling") + } + if loaded.RequestID != validJob.RequestID { + t.Fatalf("expected request ID %s, got %s", validJob.RequestID, loaded.RequestID) + } + + if _, ok, err := store.GetByRequestID(badTimestampJob.RequestID); err != nil { + t.Fatal(err) + } else if ok { + t.Fatalf("expected invalid duplicate request ID %s to be removed", badTimestampJob.RequestID) + } +} diff --git a/pkg/tbtc/chain.go b/pkg/tbtc/chain.go index c70e4b73c0..5475c3b4ab 100644 --- a/pkg/tbtc/chain.go +++ b/pkg/tbtc/chain.go @@ -416,7 +416,10 @@ type DepositChainRequest struct { // WalletChainData represents wallet data stored on-chain. type WalletChainData struct { - EcdsaWalletID [32]byte + EcdsaWalletID [32]byte + // MembersIDsHash is populated from the wallet registry rather than the + // Bridge. A zero value indicates GetWallet returned Bridge data while the + // wallet registry lookup was unavailable. MembersIDsHash [32]byte MainUtxoHash [32]byte PendingRedemptionsValue uint64 diff --git a/pkg/tbtc/chain_test.go b/pkg/tbtc/chain_test.go index cbd59b5221..ff07e903f9 100644 --- a/pkg/tbtc/chain_test.go +++ b/pkg/tbtc/chain_test.go @@ -66,8 +66,9 @@ type localChain struct { dkgResult *DKGChainResult dkgResultValid bool - walletsMutex sync.Mutex - wallets map[[20]byte]*WalletChainData + walletsMutex sync.Mutex + wallets map[[20]byte]*WalletChainData + walletRegistryErrs map[[20]byte]error inactivityNonceMutex sync.Mutex inactivityNonces map[[32]byte]uint64 @@ -889,6 +890,15 @@ func (lc *localChain) GetWallet(walletPublicKeyHash [20]byte) ( return nil, fmt.Errorf("%w for given PKH", ErrWalletNotFound) } + // When a registry error is configured for this wallet, return + // Bridge-sourced data with a zero MembersIDsHash -- mirroring the + // fault-isolation behavior of the real Ethereum adapter. + if _, hasErr := lc.walletRegistryErrs[walletPublicKeyHash]; hasErr { + degraded := *walletChainData + degraded.MembersIDsHash = [32]byte{} + return °raded, nil + } + return walletChainData, nil } @@ -919,6 +929,16 @@ func (lc *localChain) setWallet( lc.wallets[walletPublicKeyHash] = walletChainData } +func (lc *localChain) setWalletRegistryErr( + walletPublicKeyHash [20]byte, + err error, +) { + lc.walletsMutex.Lock() + defer lc.walletsMutex.Unlock() + + lc.walletRegistryErrs[walletPublicKeyHash] = err +} + func (lc *localChain) OnWalletClosed( handler func(event *WalletClosedEvent), ) subscription.EventSubscription { @@ -1463,6 +1483,7 @@ func ConnectWithKey( map[int]func(submission *InactivityClaimedEvent), ), wallets: make(map[[20]byte]*WalletChainData), + walletRegistryErrs: make(map[[20]byte]error), inactivityNonces: make(map[[32]byte]uint64), blocksByTimestamp: make(map[uint64]uint64), blocksHashesByNumber: make(map[uint64][32]byte), diff --git a/pkg/tbtc/covenant_signer.go b/pkg/tbtc/covenant_signer.go index f0104a3e33..2824a2d090 100644 --- a/pkg/tbtc/covenant_signer.go +++ b/pkg/tbtc/covenant_signer.go @@ -132,6 +132,12 @@ func (cse *covenantSignerEngine) VerifySignerApproval( err, ) } + if err := ensureWalletRegistryDataAvailable( + walletChainData, + "verify signer approval", + ); err != nil { + return err + } expectedSignerSetHash, err := computeSignerApprovalCertificateSignerSetHash( signerPublicKey, diff --git a/pkg/tbtc/get_wallet_fault_isolation_test.go b/pkg/tbtc/get_wallet_fault_isolation_test.go new file mode 100644 index 0000000000..efe3af4402 --- /dev/null +++ b/pkg/tbtc/get_wallet_fault_isolation_test.go @@ -0,0 +1,285 @@ +package tbtc + +import ( + "crypto/sha256" + "fmt" + "testing" + "time" + + "github.com/keep-network/keep-core/internal/testutils" +) + +// TestGetWalletReturnsDataWhenRegistryFails verifies that GetWallet returns +// valid Bridge fields with a zero-valued MembersIDsHash when the wallet +// registry is unavailable. This validates that downstream callers relying +// only on Bridge data (State, timestamps, etc.) are not disrupted by a +// transient registry failure. +func TestGetWalletReturnsDataWhenRegistryFails(t *testing.T) { + chain := Connect() + + walletPublicKeyHash := [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} + walletID := [32]byte{0xaa, 0xbb, 0xcc} + mainUtxoHash := sha256.Sum256([]byte("main-utxo")) + createdAt := time.Unix(1700000000, 0) + movingFundsRequestedAt := time.Unix(1700001000, 0) + closingStartedAt := time.Unix(1700002000, 0) + + chain.setWallet(walletPublicKeyHash, &WalletChainData{ + EcdsaWalletID: walletID, + MembersIDsHash: [32]byte{}, // zero -- simulating registry unavailable + MainUtxoHash: mainUtxoHash, + PendingRedemptionsValue: 500000, + CreatedAt: createdAt, + MovingFundsRequestedAt: movingFundsRequestedAt, + ClosingStartedAt: closingStartedAt, + PendingMovedFundsSweepRequestsCount: 3, + State: StateLive, + MovingFundsTargetWalletsCommitmentHash: sha256.Sum256( + []byte("commitment"), + ), + }) + + // Simulate a wallet registry error so the mock degrades + // gracefully, returning Bridge-only data with zero MembersIDsHash. + chain.setWalletRegistryErr(walletPublicKeyHash, fmt.Errorf( + "rpc: wallet registry unavailable", + )) + + walletData, err := chain.GetWallet(walletPublicKeyHash) + if err != nil { + t.Fatalf( + "GetWallet should not return error on registry failure; got: [%v]", + err, + ) + } + + if walletData == nil { + t.Fatal("GetWallet should return non-nil WalletChainData on registry failure") + } + + // Verify MembersIDsHash is zero when registry is unavailable. + if walletData.MembersIDsHash != ([32]byte{}) { + t.Errorf( + "unexpected MembersIDsHash\nexpected: zero [32]byte\nactual: [0x%x]", + walletData.MembersIDsHash, + ) + } + + // Verify Bridge-sourced fields are fully populated. + if walletData.EcdsaWalletID != walletID { + t.Errorf( + "unexpected EcdsaWalletID\nexpected: [0x%x]\nactual: [0x%x]", + walletID, + walletData.EcdsaWalletID, + ) + } + + if walletData.MainUtxoHash != mainUtxoHash { + t.Errorf( + "unexpected MainUtxoHash\nexpected: [0x%x]\nactual: [0x%x]", + mainUtxoHash, + walletData.MainUtxoHash, + ) + } + + testutils.AssertUintsEqual( + t, + "PendingRedemptionsValue", + 500000, + walletData.PendingRedemptionsValue, + ) + + if !walletData.CreatedAt.Equal(createdAt) { + t.Errorf( + "unexpected CreatedAt\nexpected: [%v]\nactual: [%v]", + createdAt, + walletData.CreatedAt, + ) + } + + if walletData.State != StateLive { + t.Errorf( + "unexpected State\nexpected: [%v]\nactual: [%v]", + StateLive, + walletData.State, + ) + } + + if !walletData.MovingFundsRequestedAt.Equal(movingFundsRequestedAt) { + t.Errorf( + "unexpected MovingFundsRequestedAt\nexpected: [%v]\nactual: [%v]", + movingFundsRequestedAt, + walletData.MovingFundsRequestedAt, + ) + } + + if !walletData.ClosingStartedAt.Equal(closingStartedAt) { + t.Errorf( + "unexpected ClosingStartedAt\nexpected: [%v]\nactual: [%v]", + closingStartedAt, + walletData.ClosingStartedAt, + ) + } + + testutils.AssertUintsEqual( + t, + "PendingMovedFundsSweepRequestsCount", + 3, + uint64(walletData.PendingMovedFundsSweepRequestsCount), + ) + + commitmentHash := sha256.Sum256([]byte("commitment")) + if walletData.MovingFundsTargetWalletsCommitmentHash != commitmentHash { + t.Errorf( + "unexpected MovingFundsTargetWalletsCommitmentHash\n"+ + "expected: [0x%x]\nactual: [0x%x]", + commitmentHash, + walletData.MovingFundsTargetWalletsCommitmentHash, + ) + } +} + +// TestGetWalletReturnsFullDataWhenRegistrySucceeds verifies that GetWallet +// returns complete data including a non-zero MembersIDsHash when both the +// Bridge and wallet registry calls succeed. This is the baseline behavior +// that must be preserved after introducing fault isolation. +func TestGetWalletReturnsFullDataWhenRegistrySucceeds(t *testing.T) { + chain := Connect() + + walletPublicKeyHash := [20]byte{21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40} + walletID := [32]byte{0xdd, 0xee, 0xff} + membersIDsHash := sha256.Sum256([]byte("test-members-ids")) + mainUtxoHash := sha256.Sum256([]byte("main-utxo-success")) + createdAt := time.Unix(1700000000, 0) + movingFundsRequestedAt := time.Unix(1700003000, 0) + closingStartedAt := time.Unix(1700004000, 0) + commitmentHash := sha256.Sum256([]byte("commitment-success")) + + chain.setWallet(walletPublicKeyHash, &WalletChainData{ + EcdsaWalletID: walletID, + MembersIDsHash: membersIDsHash, + MainUtxoHash: mainUtxoHash, + PendingRedemptionsValue: 1000000, + CreatedAt: createdAt, + MovingFundsRequestedAt: movingFundsRequestedAt, + ClosingStartedAt: closingStartedAt, + PendingMovedFundsSweepRequestsCount: 7, + State: StateMovingFunds, + MovingFundsTargetWalletsCommitmentHash: commitmentHash, + }) + + walletData, err := chain.GetWallet(walletPublicKeyHash) + if err != nil { + t.Fatalf("GetWallet should not return error; got: [%v]", err) + } + + if walletData == nil { + t.Fatal("GetWallet should return non-nil WalletChainData") + } + + // Verify MembersIDsHash is the expected non-zero value. + if walletData.MembersIDsHash != membersIDsHash { + t.Errorf( + "unexpected MembersIDsHash\nexpected: [0x%x]\nactual: [0x%x]", + membersIDsHash, + walletData.MembersIDsHash, + ) + } + + if walletData.EcdsaWalletID != walletID { + t.Errorf( + "unexpected EcdsaWalletID\nexpected: [0x%x]\nactual: [0x%x]", + walletID, + walletData.EcdsaWalletID, + ) + } + + if walletData.MainUtxoHash != mainUtxoHash { + t.Errorf( + "unexpected MainUtxoHash\nexpected: [0x%x]\nactual: [0x%x]", + mainUtxoHash, + walletData.MainUtxoHash, + ) + } + + testutils.AssertUintsEqual( + t, + "PendingRedemptionsValue", + 1000000, + walletData.PendingRedemptionsValue, + ) + + if walletData.State != StateMovingFunds { + t.Errorf( + "unexpected State\nexpected: [%v]\nactual: [%v]", + StateMovingFunds, + walletData.State, + ) + } + + if !walletData.CreatedAt.Equal(createdAt) { + t.Errorf( + "unexpected CreatedAt\nexpected: [%v]\nactual: [%v]", + createdAt, + walletData.CreatedAt, + ) + } + + if !walletData.MovingFundsRequestedAt.Equal(movingFundsRequestedAt) { + t.Errorf( + "unexpected MovingFundsRequestedAt\nexpected: [%v]\nactual: [%v]", + movingFundsRequestedAt, + walletData.MovingFundsRequestedAt, + ) + } + + if !walletData.ClosingStartedAt.Equal(closingStartedAt) { + t.Errorf( + "unexpected ClosingStartedAt\nexpected: [%v]\nactual: [%v]", + closingStartedAt, + walletData.ClosingStartedAt, + ) + } + + testutils.AssertUintsEqual( + t, + "PendingMovedFundsSweepRequestsCount", + 7, + uint64(walletData.PendingMovedFundsSweepRequestsCount), + ) + + if walletData.MovingFundsTargetWalletsCommitmentHash != commitmentHash { + t.Errorf( + "unexpected MovingFundsTargetWalletsCommitmentHash\n"+ + "expected: [0x%x]\nactual: [0x%x]", + commitmentHash, + walletData.MovingFundsTargetWalletsCommitmentHash, + ) + } +} + +// TestGetWalletBridgeFailureStillReturnsError verifies that GetWallet +// continues to return an error when the wallet is not found (Bridge-level +// failure). The fault isolation change must NOT alter the behavior for +// Bridge failures. +func TestGetWalletBridgeFailureStillReturnsError(t *testing.T) { + chain := Connect() + + unknownPKH := [20]byte{99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99} + + walletData, err := chain.GetWallet(unknownPKH) + + if err == nil { + t.Fatal("GetWallet should return error for unknown wallet") + } + + if walletData != nil { + t.Errorf( + "GetWallet should return nil data for unknown wallet; got: [%+v]", + walletData, + ) + } +} diff --git a/pkg/tbtc/signer_approval_certificate.go b/pkg/tbtc/signer_approval_certificate.go index acf891a22e..0dbf8ee7ef 100644 --- a/pkg/tbtc/signer_approval_certificate.go +++ b/pkg/tbtc/signer_approval_certificate.go @@ -31,6 +31,25 @@ type signerApprovalCertificateSignerSetPayload struct { HonestThreshold int `json:"honestThreshold"` } +func ensureWalletRegistryDataAvailable( + walletChainData *WalletChainData, + action string, +) error { + if walletChainData == nil { + return fmt.Errorf("cannot %s: wallet chain data is required", action) + } + + if walletChainData.MembersIDsHash == ([32]byte{}) { + return fmt.Errorf( + "cannot %s while wallet registry data is unavailable for wallet [0x%x]", + action, + walletChainData.EcdsaWalletID, + ) + } + + return nil +} + func (se *signingExecutor) issueSignerApprovalCertificate( ctx context.Context, approvalDigest []byte, @@ -51,6 +70,12 @@ func (se *signingExecutor) issueSignerApprovalCertificate( err, ) } + if err := ensureWalletRegistryDataAvailable( + walletChainData, + "issue signer approval certificate", + ); err != nil { + return nil, err + } signature, activityReport, endBlock, err := se.sign( ctx, diff --git a/pkg/tbtc/signer_approval_certificate_test.go b/pkg/tbtc/signer_approval_certificate_test.go index 60ece03cb7..a65538f61f 100644 --- a/pkg/tbtc/signer_approval_certificate_test.go +++ b/pkg/tbtc/signer_approval_certificate_test.go @@ -7,6 +7,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "errors" "strings" "testing" @@ -207,6 +208,45 @@ func TestSigningExecutorCanIssueSignerApprovalCertificateForArbitraryDigest(t *t } } +func TestSigningExecutorIssueSignerApprovalCertificateFailsWhenWalletRegistryUnavailable(t *testing.T) { + node, _, walletPublicKey := setupCovenantSignerTestNode(t) + + executor, ok, err := node.getSigningExecutor(walletPublicKey) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Fatal("node is supposed to control wallet signers") + } + + startBlock, err := executor.getCurrentBlockFn() + if err != nil { + t.Fatal(err) + } + + localChain, ok := executor.chain.(*localChain) + if !ok { + t.Fatal("expected local chain implementation") + } + localChain.setWalletRegistryErr( + bitcoin.PublicKeyHash(executor.wallet().publicKey), + errors.New("wallet registry unavailable"), + ) + + approvalDigest := sha256.Sum256([]byte("registry-unavailable")) + _, err = executor.issueSignerApprovalCertificate( + context.Background(), + approvalDigest[:], + startBlock, + ) + if err == nil || !strings.Contains( + err.Error(), + "cannot issue signer approval certificate while wallet registry data is unavailable", + ) { + t.Fatalf("expected wallet registry unavailable error, got %v", err) + } +} + func TestSignerApprovalCertificateVerificationRejectsTamperedDigest(t *testing.T) { node, _, walletPublicKey := setupCovenantSignerTestNode(t) @@ -424,6 +464,33 @@ func TestCovenantSignerEngineVerifySignerApprovalRejectsMissingOnChainWallet(t * } } +func TestCovenantSignerEngineVerifySignerApprovalFailsWhenWalletRegistryUnavailable(t *testing.T) { + node, _, walletPublicKey := setupCovenantSignerTestNode(t) + request := validStructuredSignerApprovalVerificationRequest( + t, + node, + walletPublicKey, + covenantsigner.TemplateSelfV1, + ) + + localChain, ok := node.chain.(*localChain) + if !ok { + t.Fatal("expected local chain implementation") + } + localChain.setWalletRegistryErr( + bitcoin.PublicKeyHash(walletPublicKey), + errors.New("wallet registry unavailable"), + ) + + err := (&covenantSignerEngine{node: node}).VerifySignerApproval(request) + if err == nil || !strings.Contains( + err.Error(), + "cannot verify signer approval while wallet registry data is unavailable", + ) { + t.Fatalf("expected wallet registry unavailable error, got %v", err) + } +} + func TestVerifySignerApprovalCertificateRejectsEmptyExpectedSignerSetHash(t *testing.T) { node, _, walletPublicKey := setupCovenantSignerTestNode(t) request := validStructuredSignerApprovalVerificationRequest(