From 2d62ace93f34eb446c108d5025bcc8f7d3fcd1b7 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 21 May 2025 20:50:35 -0400 Subject: [PATCH 1/9] chore: wip active leaves update --- .../active_leaves_update.go | 120 ++++++++++++++++++ .../statement-distribution/muxed_message.go | 50 ++++++++ .../statement-distribution/state_v2.go | 49 ++++++- .../statement_distribution.go | 66 +++++----- 4 files changed, 249 insertions(+), 36 deletions(-) create mode 100644 dot/parachain/statement-distribution/active_leaves_update.go create mode 100644 dot/parachain/statement-distribution/muxed_message.go diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go new file mode 100644 index 0000000000..84d45efe4f --- /dev/null +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -0,0 +1,120 @@ +package statementdistribution + +import ( + "fmt" + "maps" + + "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/events" + "github.com/ChainSafe/gossamer/lib/common" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" +) + +func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.ActivatedLeaf) error { + err := s.state.implicitView.ActivateLeaf(leaf.Hash, s.SubSystemToOverseer) + if err != nil { + return fmt.Errorf("implicit view activating leaf: %w", err) + } + + newRelayParents := s.state.implicitView.AllAllowedRelayParents() + + for _, nrp := range newRelayParents { + if _, ok := s.state.perRelayParent[nrp]; ok { + continue + } + + if err := s.handleActiveLeafUpdate(nrp); err != nil { + logger.Warnf("failed to handle active leaf %s: %s", nrp.String(), err.Error()) + } + } + + logger.Debugf("activated leaves. Now tracking %d relay-parent across %d sessions", + len(s.state.perRelayParent), len(s.state.perSession)) + + // Reconcile all peers' views with the active leaf and any relay parents + // it implies. If they learned about the block before we did, this reconciliation will give + // non-empty results and we should send them messages concerning all activated relay-parents. + updatePeers := make(map[string][]common.Hash) + for pid, pState := range s.state.peers { + fresh := pState.reconcileActiveLeaf(leaf.Hash, newRelayParents) + if len(fresh) > 0 { + updatePeers[pid] = fresh + } + } + + for pid, fresh := range updatePeers { + for _, freshRp := range fresh { + s.sendPeerMessageForRelayParent(pid, freshRp) + } + } + + s.newLeafFragmentChainUpdates(leaf.Hash) + + return nil +} + +func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { + return nil +} + +// handleDeactivatedLeaves deactivate leaves in the implicit view +func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { + for _, l := range leaves { + pruned := s.state.implicitView.DeactivateLeaf(l) + + for _, prunedRp := range pruned { + // clean up per-relay-parent data based on everything removed. + rpInfo, ok := s.state.perRelayParent[prunedRp] + if !ok { + continue + } + + delete(s.state.perRelayParent, prunedRp) + + if activeValidatorState := rpInfo.activeValidatorState(); activeValidatorState != nil { + activeValidatorState.clusterTracker.warningIfTooManyPendingStatements(prunedRp) + } + + // clean up requests related to this relay parent. + s.state.requestManager.removeByRelayParent(prunedRp) + } + } + + s.state.candidates.onDeactivateLeaves(leaves, func(h common.Hash) bool { + _, ok := s.state.perRelayParent[h] + return ok + }) + + // clean up sessions based on everything remaining. + sessions := make(map[parachaintypes.SessionIndex]struct{}) + for _, v := range s.state.perRelayParent { + sessions[v.session] = struct{}{} + } + + maps.DeleteFunc(s.state.perSession, func(key parachaintypes.SessionIndex, _value perSessionState) bool { + _, ok := sessions[key] + return !ok + }) + + var lastSessionIndex *parachaintypes.SessionIndex + for k := range s.state.unusedTopologies { + if lastSessionIndex == nil || k > *lastSessionIndex { + //pin so we don't get the address of a looping variable + sessionIdx := k + lastSessionIndex = &sessionIdx + } + } + + // Do not clean-up the last saved toplogy unless we moved to the next session + // This is needed because handle_deactive_leaves, gets also called when + // prospective_parachains APIs are not present, so we would actually remove + // the topology without using it because `perRelayParent` is empty until + // prospective_parachains gets enabled + maps.DeleteFunc(s.state.unusedTopologies, func(s parachaintypes.SessionIndex, _v events.NewGossipTopology) bool { + _, ok := sessions[s] + // delete if: + // The session index does not exists in the sessions map + // Or the session index exists BUT is not the lastSessionIndex + return !ok || (lastSessionIndex != nil && *lastSessionIndex != s) + }) +} diff --git a/dot/parachain/statement-distribution/muxed_message.go b/dot/parachain/statement-distribution/muxed_message.go new file mode 100644 index 0000000000..c47d2f1f01 --- /dev/null +++ b/dot/parachain/statement-distribution/muxed_message.go @@ -0,0 +1,50 @@ +package statementdistribution + +import "time" + +// MuxedMessage represents the kinds of messages +// the statement distribution can handle, these messages +// can have different origin and types, so the interface +// acts like a union that helps the subsytem to handle +// each message properly +type MuxedMessage interface { + isMuxedMessage() +} + +type overseerMessage struct { + inner any +} + +func (*overseerMessage) isMuxedMessage() {} + +// responderMessage is a message from the request handler +// indicating we received a request and we should produce +// a proper response and send it back +type responderMessage struct { + inner any // should be replaced with AttestedCandidateRequest type +} + +func (*responderMessage) isMuxedMessage() {} + +// reputationChangeMessage is a message indicating we should +// batch the reputation changes to the network bridge via +// Reputation Aggregator +type reputationChangeMessage struct{} + +func (*reputationChangeMessage) isMuxedMessage() {} + +// awaitMessageFrom waits for messages from either the overseerToSubSystem, responderCh, or reputationDelay +func (s *StatementDistribution) awaitMessageFrom( + overseerToSubSystem <-chan any, + responderCh chan any, + reputationDelay <-chan time.Time, +) MuxedMessage { + select { + case msg := <-overseerToSubSystem: + return &overseerMessage{inner: msg} + case msg := <-responderCh: + return &responderMessage{inner: msg} + case <-reputationDelay: + return &reputationChangeMessage{} + } +} diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 9513ac9d44..37fcb70128 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -11,6 +11,24 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" ) +// groupTracker interface exports methods +// enabling the statement distribution +// to track validator peers that belong +// to the same validation group +type groupTracker interface { + warningIfTooManyPendingStatements(rp common.Hash) +} + +// requestManager defines the interface that manages +// outgoing requests +type requestManager interface { + removeByRelayParent(rp common.Hash) +} + +type candidatesTracker interface { + onDeactivateLeaves(leaves []common.Hash, rpLiveFn func(common.Hash) bool) +} + type perRelayParentState struct { localValidator *localValidatorStore statementStore any // TODO #4719: Create statement store @@ -21,6 +39,14 @@ type perRelayParentState struct { disabledValidators map[parachaintypes.ValidatorIndex]struct{} } +func (p *perRelayParentState) activeValidatorState() *activeValidatorState { + if p.localValidator != nil { + return p.localValidator.active + } + + return nil +} + // isDisabled returns `true` if the given validator is disabled in the context of the relay parent. func (p *perRelayParentState) isDisabled(vIdx parachaintypes.ValidatorIndex) bool { _, ok := p.disabledValidators[vIdx] @@ -50,7 +76,7 @@ type activeValidatorState struct { index parachaintypes.ValidatorIndex groupIndex parachaintypes.GroupIndex assignments []parachaintypes.ParaID - clusterTracker any // TODO: use cluster tracker implementation (#4713) + clusterTracker groupTracker // TODO: use cluster tracker implementation (#4713) } type perSessionState struct { @@ -188,13 +214,28 @@ func (p *peerState) iterKnownDiscoveryIDs() []parachaintypes.AuthorityDiscoveryI type v2State struct { implicitView parachainutil.ImplicitView - candidates any // TODO #4718: Create Candidates Tracker + candidates candidatesTracker // TODO #4718: Create Candidates Tracker perRelayParent map[common.Hash]perRelayParentState perSession map[parachaintypes.SessionIndex]perSessionState unusedTopologies map[parachaintypes.SessionIndex]events.NewGossipTopology peers map[string]peerState keystore keystore.Keystore authorities map[parachaintypes.AuthorityDiscoveryID]string - requestManager any // TODO: #4377 - responseManager any // TODO: #4378 + requestManager requestManager // TODO: #4377 + responseManager any // TODO: #4378 +} + +func newV2State(ks keystore.Keystore, iv parachainutil.ImplicitView) *v2State { + return &v2State{ + implicitView: iv, + candidates: nil, + perRelayParent: map[common.Hash]perRelayParentState{}, + perSession: map[parachaintypes.SessionIndex]perSessionState{}, + unusedTopologies: map[parachaintypes.SessionIndex]events.NewGossipTopology{}, + peers: map[string]peerState{}, + keystore: ks, + authorities: map[parachaintypes.AuthorityDiscoveryID]string{}, + requestManager: nil, + responseManager: nil, + } } diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index 8843e36988..15e0a98d9e 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -8,36 +8,26 @@ import ( "fmt" "time" + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" parachainutil "github.com/ChainSafe/gossamer/dot/parachain/util" "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/lib/keystore" ) var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-statement-distribution")) type StatementDistribution struct { SubSystemToOverseer chan<- any + state *v2State } -type MuxedMessage interface { - isMuxedMessage() -} - -type overseerMessage struct { - inner any -} - -func (*overseerMessage) isMuxedMessage() {} - -type responderMessage struct { - inner any // should be replaced with AttestedCandidateRequest type +func New(overseerChan chan<- any, ks keystore.Keystore, blockState parachainutil.BlockState) *StatementDistribution { + return &StatementDistribution{ + SubSystemToOverseer: overseerChan, + state: newV2State(ks, parachainutil.NewBackingImplicitView(blockState, nil)), + } } -func (*responderMessage) isMuxedMessage() {} - -type reputationChangeMessage struct{} - -func (*reputationChangeMessage) isMuxedMessage() {} - // Run just receives the ctx and a channel from the overseer to subsystem func (s *StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-chan any) { // Inside the method Run, we spawn a goroutine to handle network incoming requests @@ -55,26 +45,38 @@ func (s *StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-c switch innerMessage := message.(type) { case *reputationChangeMessage: logger.Info("Reputation change triggered.") + case *overseerMessage: + shouldStop, err := s.handleSubsystemMessage(innerMessage.inner) + if err != nil { + logger.Errorf("handling subsystem message: %s", err.Error()) + } + + if shouldStop { + logger.Warn("handling subsystem message: should stop statement distribution") + break + } default: logger.Warn("Unhandled message type: " + fmt.Sprintf("%v", innerMessage)) } } } -func taskResponder(responderCh chan any) {} +func (s *StatementDistribution) handleSubsystemMessage(overseerMessage any) (bool, error) { + switch message := overseerMessage.(type) { + case parachaintypes.ActiveLeavesUpdateSignal: + if message.Activated != nil { + if err := s.handleActiveLeavesUpdate(message.Activated); err != nil { + return false, fmt.Errorf("handling active leaves update: %w", err) + } + } + s.handleDeactivatedLeaves(message.Deactivated) -// awaitMessageFrom waits for messages from either the overseerToSubSystem, responderCh, or reputationDelay -func (s *StatementDistribution) awaitMessageFrom( - overseerToSubSystem <-chan any, - responderCh chan any, - reputationDelay <-chan time.Time, -) MuxedMessage { - select { - case msg := <-overseerToSubSystem: - return &overseerMessage{inner: msg} - case msg := <-responderCh: - return &responderMessage{inner: msg} - case <-reputationDelay: - return &reputationChangeMessage{} + case parachaintypes.Conclude: + return true, nil } + + return false, nil } + +// TODO: https://github.com/ChainSafe/gossamer/issues/4285 +func taskResponder(responderCh chan any) {} From 83de2be4762be476b04dd3495da35f5ffe93f9fc Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 22 May 2025 16:08:51 -0400 Subject: [PATCH 2/9] chore: implementation done, time for testing --- .../active_leaves_update.go | 140 +++++++++++++++++- .../statement-distribution/state_v2.go | 16 +- .../statement_distribution.go | 18 +++ dot/parachain/types/types.go | 29 ++-- 4 files changed, 183 insertions(+), 20 deletions(-) diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index 84d45efe4f..e8a6e0d078 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -3,6 +3,7 @@ package statementdistribution import ( "fmt" "maps" + "slices" "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/events" "github.com/ChainSafe/gossamer/lib/common" @@ -54,9 +55,146 @@ func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.Ac } func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { + rt, err := s.blockState.GetRuntime(rp) + if err != nil { + return fmt.Errorf("getting runtime: %w", err) + } + + disabledValidators, err := rt.ParachainHostDisabledValidators() + if err != nil { + return fmt.Errorf("querying disabled validators: %w", err) + } + + disableValidatorsSet := make(map[parachaintypes.ValidatorIndex]struct{}, len(disabledValidators)) + for _, dv := range disabledValidators { + disableValidatorsSet[dv] = struct{}{} + } + + sessionIdx, err := rt.ParachainHostSessionIndexForChild() + if err != nil { + return fmt.Errorf("querying session index for child: %w", err) + } + + if _, ok := s.state.perSession[sessionIdx]; !ok { + sessionInfo, err := rt.ParachainHostSessionInfo(sessionIdx) + if err != nil { + return fmt.Errorf("querying session info: %w", err) + } + + if sessionInfo == nil { + logger.Warnf("no session info provided for session %d, relay parent=%s", sessionIdx, rp) + return nil + } + + minBackingVotes, err := rt.ParachainHostMinimumBackingVotes() + if err != nil { + return fmt.Errorf("querying minimum backing votes: %w", err) + } + + nodeFeatures, err := rt.ParachainHostNodeFeatures() + if err != nil { + return fmt.Errorf("querying node features: %w", err) + } + + allowV2Descriptor, err := nodeFeatures.Get(uint(parachaintypes.CandidateReceiptV2Feature)) + if err != nil { + return fmt.Errorf("getting candidate receipt v2 feature: %w", err) + } + + perSessionState := newPerSessionState( + sessionInfo, s.state.keystore, minBackingVotes, allowV2Descriptor) + + if top, ok := s.state.unusedTopologies[sessionIdx]; ok { + delete(s.state.unusedTopologies, sessionIdx) + perSessionState.supplyTopology(top.Topology, top.LocalIndex) + } + + s.state.perSession[sessionIdx] = perSessionState + } + + perSession := s.state.perSession[sessionIdx] + if perSession == nil { + panic("either existed or just inserted; qed.") + } + + if len(disableValidatorsSet) > 0 { + disabled := make([]parachaintypes.ValidatorIndex, len(disableValidatorsSet)) + for d := range disableValidatorsSet { + disabled = append(disabled, d) + } + + logger.Debugf("disabled validators detected: %v, "+ + "session index=%v, relay parent=%s", disabled, sessionIdx, rp.String()) + } + + validatorGroups, err := rt.ParachainHostValidatorGroups() + if err != nil { + return fmt.Errorf("querying validator groups: %w", err) + } + + claimQueue, err := rt.ParachainHostClaimQueue() + if err != nil { + return fmt.Errorf("querying host claim queue") + } + + groupsPerPara, assignmentsPerGroup := determineGroupAssignment( + len(perSession.groups.all()), + &validatorGroups.GroupRotationInfo, + &claimQueue, + ) + + transposedCq := claimQueue.ToTransposed() + + s.state.perRelayParent[rp] = &perRelayParentState{ + localValidator: nil, //todo + statementStore: nil, // todo + session: sessionIdx, + groupsPerPara: groupsPerPara, + disabledValidators: disableValidatorsSet, + transposedClaimQueue: transposedCq, + assignmentsPerGroup: assignmentsPerGroup, + } + return nil } +// Utility function to populate: +// - per relay parent `ParaId` to `GroupIndex` mappings. +// - per `GroupIndex` claim queue assignments +func determineGroupAssignment(numCores int, + groupRotationInfo *parachaintypes.GroupRotationInfo, + claimQueue *parachaintypes.ClaimQueue, +) (map[parachaintypes.ParaID][]parachaintypes.GroupIndex, map[parachaintypes.GroupIndex][]parachaintypes.ParaID) { + // Determine the core indices occupied by each para at the current relay parent. To support + // on-demand parachains we also consider the core indices at next blocks. + ordered := claimQueue.Ordered() + + schedule := make(map[parachaintypes.CoreIndex][]parachaintypes.ParaID) + for _, cqEntry := range ordered { + schedule[cqEntry.Core] = cqEntry.Paras + } + + groupsPerPara := make(map[parachaintypes.ParaID][]parachaintypes.GroupIndex) + assignmentsPerGroup := make(map[parachaintypes.GroupIndex][]parachaintypes.ParaID, len(schedule)) + + for coreIdx, paras := range schedule { + groupIdx := groupRotationInfo.GroupForCore(coreIdx, uint(numCores)) + assignmentsPerGroup[groupIdx] = slices.Clone(paras) + + for _, para := range paras { + groups, ok := groupsPerPara[para] + if !ok { + groups = make([]parachaintypes.GroupIndex, 0) + groupsPerPara[para] = groups + } + + groups = append(groups, groupIdx) + } + } + + return groupsPerPara, assignmentsPerGroup +} + // handleDeactivatedLeaves deactivate leaves in the implicit view func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { for _, l := range leaves { @@ -91,7 +229,7 @@ func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { sessions[v.session] = struct{}{} } - maps.DeleteFunc(s.state.perSession, func(key parachaintypes.SessionIndex, _value perSessionState) bool { + maps.DeleteFunc(s.state.perSession, func(key parachaintypes.SessionIndex, _value *perSessionState) bool { _, ok := sessions[key] return !ok }) diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 37fcb70128..2d036844a7 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -2,7 +2,6 @@ package statementdistribution import ( - "github.com/ChainSafe/gossamer/dot/parachain/grid" parachainnetwork "github.com/ChainSafe/gossamer/dot/parachain/network" "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/events" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" @@ -37,6 +36,7 @@ type perRelayParentState struct { transposedClaimQueue parachaintypes.TransposedClaimQueue groupsPerPara map[parachaintypes.ParaID][]parachaintypes.GroupIndex disabledValidators map[parachaintypes.ValidatorIndex]struct{} + assignmentsPerGroup map[parachaintypes.GroupIndex][]parachaintypes.ParaID } func (p *perRelayParentState) activeValidatorState() *activeValidatorState { @@ -80,7 +80,7 @@ type activeValidatorState struct { } type perSessionState struct { - sessionInfo parachaintypes.SessionInfo + sessionInfo *parachaintypes.SessionInfo groups *groups authLookup map[parachaintypes.AuthorityDiscoveryID]parachaintypes.ValidatorIndex gridView any // TODO: use SessionTopologyView from statement-distribution grid (#4576) @@ -90,7 +90,7 @@ type perSessionState struct { allowV2Descriptors bool } -func newPerSessionState(sessionInfo parachaintypes.SessionInfo, +func newPerSessionState(sessionInfo *parachaintypes.SessionInfo, keystore keystore.Keystore, backingThreshold uint32, allowV2Descriptor bool, @@ -120,7 +120,7 @@ func newPerSessionState(sessionInfo parachaintypes.SessionInfo, // Note: we use the local index rather than the `perSessionState.localValidator` as the // former may be not nil when the latter is nil, due to the set of nodes in // discovery being a superset of the active validators for consensus. -func (s *perSessionState) supplyTopology(topology *grid.SessionGridTopology, localIdx *parachaintypes.ValidatorIndex) { +func (s *perSessionState) supplyTopology(topology events.SessionGridTopology, localIdx *parachaintypes.ValidatorIndex) { // TODO #4373: implement once buildSessionTopology is done // gridView := buildSessionTopology( // s.sessionInfo.ValidatorGroups, @@ -215,8 +215,8 @@ func (p *peerState) iterKnownDiscoveryIDs() []parachaintypes.AuthorityDiscoveryI type v2State struct { implicitView parachainutil.ImplicitView candidates candidatesTracker // TODO #4718: Create Candidates Tracker - perRelayParent map[common.Hash]perRelayParentState - perSession map[parachaintypes.SessionIndex]perSessionState + perRelayParent map[common.Hash]*perRelayParentState + perSession map[parachaintypes.SessionIndex]*perSessionState unusedTopologies map[parachaintypes.SessionIndex]events.NewGossipTopology peers map[string]peerState keystore keystore.Keystore @@ -229,8 +229,8 @@ func newV2State(ks keystore.Keystore, iv parachainutil.ImplicitView) *v2State { return &v2State{ implicitView: iv, candidates: nil, - perRelayParent: map[common.Hash]perRelayParentState{}, - perSession: map[parachaintypes.SessionIndex]perSessionState{}, + perRelayParent: map[common.Hash]*perRelayParentState{}, + perSession: map[parachaintypes.SessionIndex]*perSessionState{}, unusedTopologies: map[parachaintypes.SessionIndex]events.NewGossipTopology{}, peers: map[string]peerState{}, keystore: ks, diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index 15e0a98d9e..c04d6179d3 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -10,13 +10,22 @@ import ( parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" parachainutil "github.com/ChainSafe/gossamer/dot/parachain/util" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/ChainSafe/gossamer/lib/runtime" ) var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-statement-distribution")) +type BlockState interface { + GetHeader(hash common.Hash) (header *types.Header, err error) + GetRuntime(blockHash common.Hash) (instance runtime.Instance, err error) +} + type StatementDistribution struct { + blockState BlockState SubSystemToOverseer chan<- any state *v2State } @@ -24,6 +33,7 @@ type StatementDistribution struct { func New(overseerChan chan<- any, ks keystore.Keystore, blockState parachainutil.BlockState) *StatementDistribution { return &StatementDistribution{ SubSystemToOverseer: overseerChan, + blockState: blockState, state: newV2State(ks, parachainutil.NewBackingImplicitView(blockState, nil)), } } @@ -78,5 +88,13 @@ func (s *StatementDistribution) handleSubsystemMessage(overseerMessage any) (boo return false, nil } +func (s *StatementDistribution) sendPeerMessageForRelayParent(pid string, rp common.Hash) { + panic("unimplemented") +} + +func (s *StatementDistribution) newLeafFragmentChainUpdates(rp common.Hash) { + panic("unimplemented") +} + // TODO: https://github.com/ChainSafe/gossamer/issues/4285 func taskResponder(responderCh chan any) {} diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index b526413597..f64dd10c83 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -924,20 +924,27 @@ type Subsystem interface { Stop() } -// AvailabilityChunkMapping tells if the chunk mapping feature is enabled. -// Enables the implementation of -// [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). -// Must not be enabled unless all validators and collators have stopped using `req_chunk` -// protocol version 1. If it is enabled, validators can start systematic chunk recovery. -const AvailabilityChunkMapping NodeFeatureIndex = 2 - // NodeFeatureIndex represents the index of a feature in a bitvector of node features fetched from runtime. type NodeFeatureIndex byte -// This feature enables the extension of `BackedCandidate.ValidatorIndices` by 8 bits. -// The value stored there represents the assumed core index where the candidates -// are backed. This is needed for the elastic scaling MVP. -const ElasticScalingMVP NodeFeatureIndex = 1 +const ( + // This feature enables the extension of `BackedCandidate.ValidatorIndices` by 8 bits. + // The value stored there represents the assumed core index where the candidates + // are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP NodeFeatureIndex = 1 + + // AvailabilityChunkMapping tells if the chunk mapping feature is enabled. + // Enables the implementation of + // [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + // Must not be enabled unless all validators and collators have stopped using `req_chunk` + // protocol version 1. If it is enabled, validators can start systematic chunk recovery. + AvailabilityChunkMapping NodeFeatureIndex = 2 + + // Enables node side support of `CoreIndex` committed candidate receipts. + // See [RFC-103](https://github.com/polkadot-fellows/RFCs/pull/103) for details. + // Only enable if at least 2/3 of nodes support the feature. + CandidateReceiptV2Feature NodeFeatureIndex = 3 +) // TransposedClaimQueue represents a mapping between ParaID and the cores assigned per depth type TransposedClaimQueue map[ParaID]map[uint8]map[CoreIndex]struct{} From 19a3f6e691b04fb03ef598989b910d92296b5e2f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 26 May 2025 17:16:50 -0400 Subject: [PATCH 3/9] chore: adjust sd protocol message types --- .../active_leaves_update.go | 4 +- .../active_leaves_update_test.go | 1 + .../statement-distribution/candidates.go | 14 ++ .../statement-distribution/state_v2.go | 26 +- .../statement_distribution.go | 228 +++++++++++++++++- dot/parachain/types/overseer_message.go | 5 - dot/parachain/types/statement.go | 94 +++++++- dot/parachain/types/statement_test.go | 34 ++- .../statement_distribution_message.go | 57 +++++ 9 files changed, 420 insertions(+), 43 deletions(-) create mode 100644 dot/parachain/statement-distribution/active_leaves_update_test.go create mode 100644 dot/parachain/statement-distribution/candidates.go diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index e8a6e0d078..e96f2869b1 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -45,11 +45,11 @@ func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.Ac for pid, fresh := range updatePeers { for _, freshRp := range fresh { - s.sendPeerMessageForRelayParent(pid, freshRp) + s.sendPeerMessagesForRelayParent(pid, freshRp) } } - s.newLeafFragmentChainUpdates(leaf.Hash) + s.fragmentChainUpdateInner(&leaf.Hash, nil, nil, nil) return nil } diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go new file mode 100644 index 0000000000..7a2af639d1 --- /dev/null +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -0,0 +1 @@ +package statementdistribution diff --git a/dot/parachain/statement-distribution/candidates.go b/dot/parachain/statement-distribution/candidates.go new file mode 100644 index 0000000000..54720fb6b8 --- /dev/null +++ b/dot/parachain/statement-distribution/candidates.go @@ -0,0 +1,14 @@ +package statementdistribution + +import ( + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +type confirmedCandidate struct { + receipt parachaintypes.CommittedCandidateReceiptV2 + pvd parachaintypes.PersistedValidationData + assignedGroup parachaintypes.GroupIndex + parentHash common.Hash + importableUnder map[common.Hash]struct{} +} diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 2d036844a7..f86a30f905 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -10,12 +10,32 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" ) +type compactType uint8 + +const ( + compactValid compactType = iota + compactSeconded +) + +type pendingStmt struct { + validadorIdx parachaintypes.ValidatorIndex + compact parachaintypes.CompactStatement +} + +type statementStore interface { + // Get the full statement of this kind issued by this validator, if it is known. + // TODO: need to support a signed compact statement + validatorStatement(stmt pendingStmt) *parachaintypes.SignedStatement +} + // groupTracker interface exports methods // enabling the statement distribution // to track validator peers that belong // to the same validation group type groupTracker interface { warningIfTooManyPendingStatements(rp common.Hash) + pendingStatementsFor(target parachaintypes.ValidatorIndex) []pendingStmt + noteSend(target, originator parachaintypes.ValidatorIndex, stmt parachaintypes.CompactStatement) } // requestManager defines the interface that manages @@ -25,12 +45,16 @@ type requestManager interface { } type candidatesTracker interface { + frontierHypotheticals(*common.Hash, *parachaintypes.ParaID) []parachaintypes.HypotheticalCandidate onDeactivateLeaves(leaves []common.Hash, rpLiveFn func(common.Hash) bool) + noteImportableUnder(hypo parachaintypes.HypotheticalCandidate, leaf common.Hash) + getConfirmed(candidateHash parachaintypes.CandidateHash) *confirmedCandidate + isConfirmed(candidateHash parachaintypes.CandidateHash) bool } type perRelayParentState struct { localValidator *localValidatorStore - statementStore any // TODO #4719: Create statement store + statementStore statementStore // TODO #4719: Create statement store secondingLimit uint session parachaintypes.SessionIndex transposedClaimQueue parachaintypes.TransposedClaimQueue diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index c04d6179d3..848a4bf445 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -6,15 +6,25 @@ package statementdistribution import ( "context" "fmt" + "slices" "time" + parachainnetwork "github.com/ChainSafe/gossamer/dot/parachain/network" + networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" + prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" parachainutil "github.com/ChainSafe/gossamer/dot/parachain/util" + validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/lib/runtime" + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + HypotheticalMembershipTimeout = 2 * time.Second ) var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-statement-distribution")) @@ -88,13 +98,225 @@ func (s *StatementDistribution) handleSubsystemMessage(overseerMessage any) (boo return false, nil } -func (s *StatementDistribution) sendPeerMessageForRelayParent(pid string, rp common.Hash) { - panic("unimplemented") +// Send a peer, apparently just becoming aware of a relay-parent, all messages +// concerning that relay-parent. +// +// In particular, we send all statements pertaining to our common cluster, +// as well as all manifests, acknowledgements, or other grid statements. +// +// Note that due to the way we handle views, our knowledge of peers' relay parents +// may "oscillate" with relay parents repeatedly leaving and entering the +// view of a peer based on the implicit view of active leaves. +// +// This function is designed to be cheap and not to send duplicate messages in repeated +// cases. +func (s *StatementDistribution) sendPeerMessagesForRelayParent(pid string, rp common.Hash) { + peerData, ok := s.state.peers[pid] + if !ok { + return + } + + rpState, ok := s.state.perRelayParent[rp] + if !ok { + return + } + + perSessionState, ok := s.state.perSession[rpState.session] + if !ok { + return + } + + for _, vID := range peerData.iterKnownDiscoveryIDs() { + vIndex, ok := perSessionState.authLookup[vID] + if !ok { + continue + } + + active := rpState.activeValidatorState() + if active != nil { + s.sendPendingClusterStatements(rp, + peer.ID(pid), peerData.protocolVersion, + vIndex, + active.clusterTracker, + s.state.candidates, + rpState.statementStore, + ) + } + + s.sendPendingGridStatements(rp, + peer.ID(pid), peerData.protocolVersion, + vIndex, + perSessionState.groups, + rpState, + s.state.candidates, + ) + } } -func (s *StatementDistribution) newLeafFragmentChainUpdates(rp common.Hash) { +func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, + requiredParentHash *common.Hash, requiredParentParaID *parachaintypes.ParaID, + knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { + + // 1. get hypothetical candidates + var hypotheticals []parachaintypes.HypotheticalCandidate + + if knowHypotheticals != nil { + hypotheticals = *knowHypotheticals + } else { + s.state.candidates.frontierHypotheticals(requiredParentHash, requiredParentParaID) + } + + // 2. find out which are in the frontier + response := make(chan []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem) + hypotheticalMembershipRequest := prospectiveparachainsmessages.GetHypotheticalMembership{ + Candidates: hypotheticals, + FragmentChainRelayParent: rp, + Response: response, + } + + s.SubSystemToOverseer <- hypotheticalMembershipRequest + + var candidateMemberships []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem + select { + case <-time.After(HypotheticalMembershipTimeout): + logger.Warnf("timed out waiting for hypothetical membership response for relay-parent %s", rp.String()) + return + case resp := <-response: + candidateMemberships = resp + + } + + // 3. note that they are importable under a given leaf hash. + for _, item := range candidateMemberships { + // skip parablocks which aren't potential candidates + if len(item.HypotheticalMembership) == 0 { + continue + } + + for _, leafHash := range item.HypotheticalMembership { + s.state.candidates.noteImportableUnder(item.HypotheticalCandidate, leafHash) + } + + // 4. for confirmed candidates, send all statements which are new to backing. + if complete, ok := item.HypotheticalCandidate.(*parachaintypes.HypotheticalCandidateComplete); ok { + confirmedCandidate := s.state.candidates.getConfirmed(complete.ClaimedCandidateHash) + perRelayParentState, ok := s.state.perRelayParent[complete.CommittedCandidateReceipt.Descriptor.RelayParent] + if confirmedCandidate == nil || !ok { + continue + } + + groupIndex := confirmedCandidate.assignedGroup + perSessionState, ok := s.state.perSession[perRelayParentState.session] + if !ok { + continue + } + + // Sanity check if group_index is valid for this para at relay parent. + expectedGroups, ok := perRelayParentState.groupsPerPara[complete.CommittedCandidateReceipt.Descriptor.ParaID] + if !ok { + continue + } + + if !slices.Contains(expectedGroups, groupIndex) { + logger.Warnf("group index %d not found for para %d at relay parent %s", + groupIndex, complete.CommittedCandidateReceipt.Descriptor.ParaID, rp.String()) + continue + } + + s.sendBackingFreshStatements( + complete.ClaimedCandidateHash, + confirmedCandidate.assignedGroup, + complete.CommittedCandidateReceipt.Descriptor.RelayParent, + perRelayParentState, + confirmedCandidate, + perSessionState, + ) + } + } + panic("unimplemented") } +// Send a peer all pending cluster statements for a relay parent. +func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, + peerID peer.ID, validationVersion parachainnetwork.ValidationVersion, + peerValidatorID parachaintypes.ValidatorIndex, + clusterTracker groupTracker, + candidates candidatesTracker, + statementStore statementStore, +) { + pendingStmts := clusterTracker.pendingStatementsFor(peerValidatorID) + for _, stmt := range pendingStmts { + if !candidates.isConfirmed(stmt.compact.CandidateHash()) { + continue + } + + msg := pendingStatementNetworkMessage(statementStore, rp, peerID, validationVersion, stmt) + if msg != nil { + clusterTracker.noteSend(peerValidatorID, stmt.validadorIdx, stmt.compact) + // TODO: create a SendValidationMessages to send a batch of messages + s.SubSystemToOverseer <- msg + } + } +} + +func pendingStatementNetworkMessage( + stmtStore statementStore, + rp common.Hash, + peerID peer.ID, validationVersion parachainnetwork.ValidationVersion, + pending pendingStmt, +) *networkbridgemessages.SendValidationMessage { + if validationVersion == parachainnetwork.ValidationVersionV3 { + signed := stmtStore.validatorStatement(pending) + if signed == nil { + return nil + } + + sdmV3 := validationprotocol.NewStatementDistributionMessageV3() + err := sdmV3.SetValue(validationprotocol.StatementV3{ + Hash: rp, + UncheckedSignedStatement: parachaintypes.UncheckedSignedCompactStatement(*signed), + }) + if err != nil { + panic(fmt.Sprintf("unexpected error setting value in StatementDistributionMessageV3: %s", err)) + } + + // TODO: this will panic as validation protocol does not support V3 yet + vp := validationprotocol.NewValidationProtocolVDT() + err = vp.SetValue(sdmV3) + if err != nil { + panic(fmt.Sprintf("unexpected error setting value in NewValidationProtocolVDT: %s", err)) + } + + return &networkbridgemessages.SendValidationMessage{ + To: []peer.ID{peerID}, + ValidationProtocolMessage: vp, + } + } + + return nil +} + +// Send a peer all pending grid messages / acknowledgements / follow up statements +// upon learning about a new relay parent. +func (s *StatementDistribution) sendPendingGridStatements(relayParent common.Hash, + peerID peer.ID, validationVersion parachainnetwork.ValidationVersion, + peerValidatorID parachaintypes.ValidatorIndex, + groups *groups, + rpState *perRelayParentState, + candidates candidatesTracker) { + panic("unimplemented issue #4730") +} + +// Send backing fresh statements. This should only be performed on importable & confirmed candidates +func (s *StatementDistribution) sendBackingFreshStatements(candidateHash parachaintypes.CandidateHash, + groupIndex parachaintypes.GroupIndex, + relayParent common.Hash, + rpState *perRelayParentState, + confirmed *confirmedCandidate, + perSessionState *perSessionState) { + panic("unimplemented issue #4419") +} + // TODO: https://github.com/ChainSafe/gossamer/issues/4285 func taskResponder(responderCh chan any) {} diff --git a/dot/parachain/types/overseer_message.go b/dot/parachain/types/overseer_message.go index 336d1ecab5..77fc861c4c 100644 --- a/dot/parachain/types/overseer_message.go +++ b/dot/parachain/types/overseer_message.go @@ -5,11 +5,6 @@ package parachaintypes import "github.com/ChainSafe/gossamer/lib/common" -var ( - _ HypotheticalCandidate = (*HypotheticalCandidateIncomplete)(nil) - _ HypotheticalCandidate = (*HypotheticalCandidateComplete)(nil) -) - // OverseerFuncRes is a result of an overseer function type OverseerFuncRes[T any] struct { Err error diff --git a/dot/parachain/types/statement.go b/dot/parachain/types/statement.go index d7eaadbdd5..1423a65520 100644 --- a/dot/parachain/types/statement.go +++ b/dot/parachain/types/statement.go @@ -87,13 +87,13 @@ type Valid CandidateHash func (s StatementVDT) CompactStatement() (any, error) { switch s := s.inner.(type) { case Valid: - return CompactStatement[Valid]{Value: s}, nil + return &CompactValid{inner: s}, nil case Seconded: hash, err := GetCandidateHash(CommittedCandidateReceiptV2(s)) if err != nil { return nil, fmt.Errorf("getting candidate hash: %w", err) } - return CompactStatement[SecondedCandidateHash]{Value: SecondedCandidateHash(hash)}, nil + return &CompactSeconded{inner: SecondedCandidateHash(hash)}, nil } return nil, fmt.Errorf("unsupported type") } @@ -179,6 +179,21 @@ type SignedFullStatementWithPVD struct { PersistedValidationData *PersistedValidationData } +type UncheckedSignedCompactStatement struct { + // The payload is part of the signed data. The rest is the signing context, + // which is known both at signing and at validation. + Payload CompactStatement `scale:"1"` + + // The index of the validator signing this statement. + ValidatorIndex ValidatorIndex `scale:"2"` + + // The signature by the validator of the signed payload. + Signature ValidatorSignature `scale:"3"` +} + +// SignedStatement represents a signed compact statement, suitable to be sent to the chain. +type SignedStatement UncheckedSignedCompactStatement + type SecondedCandidateHash CandidateHash type CompactStatementValues interface { @@ -234,13 +249,65 @@ func (mvdt compactStatementInner) ValueAt(index uint) (value any, err error) { // CompactStatement is a compact representation of a statement that can be made about parachain candidates. // this is the actual value that is signed. -type CompactStatement[T CompactStatementValues] struct { - Value T +type CompactStatement interface { + CandidateHash() CandidateHash + MarshalSCALE() ([]byte, error) + UnmarshalSCALE(reader io.Reader) error +} + +var ( + _ CompactStatement = (*CompactValid)(nil) + _ CompactStatement = (*CompactSeconded)(nil) +) + +type CompactValid struct { + inner Valid +} + +func (v *CompactValid) CandidateHash() CandidateHash { + return CandidateHash(v.inner) +} + +func (v *CompactValid) MarshalSCALE() ([]byte, error) { + return compactMarshalSCALE(v.inner) +} + +func (v *CompactValid) UnmarshalSCALE(reader io.Reader) error { + decoded, err := compactUnmarshalSCALE[Valid](reader) + if err != nil { + return err + } + + v.inner = decoded + return nil +} + +type CompactSeconded struct { + inner SecondedCandidateHash +} + +func (sch *CompactSeconded) CandidateHash() CandidateHash { + return CandidateHash(sch.inner) +} + +func (sch *CompactSeconded) MarshalSCALE() ([]byte, error) { + return compactMarshalSCALE(sch.inner) } -func (c CompactStatement[CompactStatementValues]) MarshalSCALE() ([]byte, error) { +func (sch *CompactSeconded) UnmarshalSCALE(reader io.Reader) error { + fmt.Printf("seconded unmarshal\n") + decoded, err := compactUnmarshalSCALE[SecondedCandidateHash](reader) + if err != nil { + return err + } + + sch.inner = decoded + return nil +} + +func compactMarshalSCALE[T CompactStatementValues](v T) ([]byte, error) { inner := compactStatementInner{} - err := inner.SetValue(c.Value) + err := inner.SetValue(v) if err != nil { return nil, fmt.Errorf("setting value: %w", err) } @@ -256,30 +323,31 @@ func (c CompactStatement[CompactStatementValues]) MarshalSCALE() ([]byte, error) return buffer.Bytes(), nil } -func (c *CompactStatement[CompactStatementValues]) UnmarshalSCALE(reader io.Reader) error { +func compactUnmarshalSCALE[T CompactStatementValues](reader io.Reader) (T, error) { decoder := scale.NewDecoder(reader) var magicBytes [4]byte err := decoder.Decode(&magicBytes) if err != nil { - return err + return *new(T), err } + fmt.Println("magic bytes:", magicBytes) + if !bytes.Equal(magicBytes[:], backingStatementMagic[:]) { - return fmt.Errorf("invalid magic bytes") + return *new(T), fmt.Errorf("invalid magic bytes") } var inner compactStatementInner err = decoder.Decode(&inner) if err != nil { - return fmt.Errorf("decoding compactStatementInner: %w", err) + return *new(T), fmt.Errorf("decoding compactStatementInner: %w", err) } value, err := inner.Value() if err != nil { - return fmt.Errorf("getting value: %w", err) + return *new(T), fmt.Errorf("getting value: %w", err) } - c.Value = value.(CompactStatementValues) - return nil + return value.(T), nil } diff --git a/dot/parachain/types/statement_test.go b/dot/parachain/types/statement_test.go index 24bedc044d..8029c3b1be 100644 --- a/dot/parachain/types/statement_test.go +++ b/dot/parachain/types/statement_test.go @@ -180,31 +180,27 @@ func TestCompactStatement(t *testing.T) { testCases := []struct { name string - compactStatement any + compactStatement CompactStatement encodingValue []byte - expectedErr error }{ { - name: "SecondedCandidateHash", - compactStatement: CompactStatement[SecondedCandidateHash]{ - Value: SecondedCandidateHash{Value: getDummyHash(6)}, - }, + name: "SecondedCandidateHash", + compactStatement: &CompactSeconded{SecondedCandidateHash{Value: getDummyHash(6)}}, encodingValue: []byte{66, 75, 78, 71, 1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}, }, { - name: "Valid", - compactStatement: CompactStatement[Valid]{ - Value: Valid{Value: getDummyHash(7)}, - }, - encodingValue: []byte{ - 66, 75, 78, 71, 2, + name: "Valid", + compactStatement: &CompactValid{Valid{Value: getDummyHash(7)}}, + encodingValue: []byte{66, 75, 78, 71, 2, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}, }, } for _, c := range testCases { c := c + fmt.Println("spawning test for", c.name) + t.Run(c.name, func(t *testing.T) { t.Parallel() @@ -220,19 +216,19 @@ func TestCompactStatement(t *testing.T) { t.Parallel() switch expectedSatetement := c.compactStatement.(type) { - case CompactStatement[Valid]: - var actualStatement CompactStatement[Valid] + case *CompactValid: + var actualStatement CompactValid err := scale.Unmarshal(c.encodingValue, &actualStatement) require.NoError(t, err) - require.EqualValues(t, expectedSatetement, actualStatement) - case CompactStatement[SecondedCandidateHash]: - var actualStatement CompactStatement[SecondedCandidateHash] + require.EqualValues(t, *expectedSatetement, actualStatement) + case *CompactSeconded: + fmt.Println("testing sch...") + var actualStatement CompactSeconded err := scale.Unmarshal(c.encodingValue, &actualStatement) require.NoError(t, err) - require.EqualValues(t, expectedSatetement, actualStatement) + require.EqualValues(t, *expectedSatetement, actualStatement) } }) - }) } } diff --git a/dot/parachain/validation-protocol/statement_distribution_message.go b/dot/parachain/validation-protocol/statement_distribution_message.go index 632b5fae37..0b3bd81c23 100644 --- a/dot/parachain/validation-protocol/statement_distribution_message.go +++ b/dot/parachain/validation-protocol/statement_distribution_message.go @@ -102,3 +102,60 @@ type StatementMetadata struct { // Signature of seconding validator. Signature parachaintypes.ValidatorSignature `scale:"4"` } + +// StatementV3 represents a signed compact statement under a given relay-parent. +// present in protocol V3 +type StatementV3 struct { + Hash common.Hash `scale:"1"` + UncheckedSignedStatement parachaintypes.UncheckedSignedCompactStatement `scale:"2"` +} + +// StatementDistributionMessageV3 defines the network messages used by +// the statement distribution subsystem. +type StatementDistributionMessageV3Values interface { + Statement +} + +type StatementDistributionMessageV3 struct { + inner any +} + +func NewStatementDistributionMessageV3() StatementDistributionMessageV3 { + return StatementDistributionMessageV3{} +} + +func setStatementDistributionMessageV3[Value StatementDistributionMessageV3Values]( + mvdt *StatementDistributionMessageV3, value Value) { + mvdt.inner = value +} + +func (mvdt *StatementDistributionMessageV3) SetValue(value any) (err error) { + switch value := value.(type) { + case Statement: + setStatementDistributionMessageV3(mvdt, value) + return + default: + return fmt.Errorf("unsupported type") + } +} + +func (mvdt StatementDistributionMessageV3) IndexValue() (index uint, value any, err error) { + switch mvdt.inner.(type) { + case Statement: + return 0, mvdt.inner, nil + } + return 0, nil, scale.ErrUnsupportedVaryingDataTypeValue +} + +func (mvdt StatementDistributionMessageV3) Value() (value any, err error) { + _, value, err = mvdt.IndexValue() + return +} + +func (mvdt StatementDistributionMessageV3) ValueAt(index uint) (value any, err error) { + switch index { + case 0: + return Statement{}, nil + } + return nil, scale.ErrUnknownVaryingDataTypeValue +} From d42aa3922c60ac9e655bb197cedd757e8d2a36e9 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 11 Jun 2025 11:50:25 -0400 Subject: [PATCH 4/9] chore: wip tests --- .../active_leaves_update.go | 58 +- .../active_leaves_update_test.go | 189 +++++ .../mocks_block_state_test.go | 73 ++ .../mocks_candidates_store_test.go | 56 -- .../mocks_candidates_tracker_test.go | 109 +++ .../mocks_cluster_tracker_test.go | 80 ++ .../mocks_generate_test.go | 6 +- .../mocks_instance_test.go | 756 ++++++++++++++++++ .../mocks_req_manager_test.go | 53 ++ .../statement-distribution/state_v2.go | 8 +- .../statement_distribution.go | 11 +- .../statement_distribution_test.go | 14 +- 12 files changed, 1335 insertions(+), 78 deletions(-) create mode 100644 dot/parachain/statement-distribution/mocks_block_state_test.go delete mode 100644 dot/parachain/statement-distribution/mocks_candidates_store_test.go create mode 100644 dot/parachain/statement-distribution/mocks_candidates_tracker_test.go create mode 100644 dot/parachain/statement-distribution/mocks_cluster_tracker_test.go create mode 100644 dot/parachain/statement-distribution/mocks_instance_test.go create mode 100644 dot/parachain/statement-distribution/mocks_req_manager_test.go diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index 3972d719b0..597d0065a2 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -49,8 +49,8 @@ func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.Ac } } + fmt.Println("calling fragmentChainUpdateInner", &leaf.Hash) s.fragmentChainUpdateInner(&leaf.Hash, nil, nil, nil) - return nil } @@ -143,11 +143,25 @@ func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { &claimQueue, ) + var localValidator *localValidatorState + if perSession.localValidator != nil { + localValidator = findActiveValidatorState( + *perSession.localValidator, + perSession.groups, + assignmentsPerGroup, + ) + } else { + localValidator = &localValidatorState{ + gridTracker: newGridTracker(), + active: nil, + } + } + transposedCq := claimQueue.ToTransposed() s.state.perRelayParent[rp] = &perRelayParentState{ - localValidator: nil, //todo - statementStore: nil, // todo + localValidator: localValidator, + statementStore: nil, // todo use statement store (#4719) session: sessionIdx, groupsPerPara: groupsPerPara, disabledValidators: disableValidatorsSet, @@ -185,10 +199,10 @@ func determineGroupAssignment(numCores int, groups, ok := groupsPerPara[para] if !ok { groups = make([]parachaintypes.GroupIndex, 0) - groupsPerPara[para] = groups } groups = append(groups, groupIdx) + groupsPerPara[para] = groups } } @@ -256,3 +270,39 @@ func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { return !ok || (lastSessionIndex != nil && *lastSessionIndex != s) }) } + +func findActiveValidatorState( + validatorIdx parachaintypes.ValidatorIndex, + groups *groups, + assignmentsPerGroup map[parachaintypes.GroupIndex][]parachaintypes.ParaID, +) *localValidatorState { + if len(groups.all()) == 0 { + return nil + } + + ourGroup := groups.byValidatorIndex(validatorIdx) + if ourGroup == nil { + logger.Warnf("no group found for validator %d, assignmentsPerGroup=%v", validatorIdx, assignmentsPerGroup) + return nil + } + + groupValidators := groups.get(*ourGroup) + if len(groupValidators) == 0 { + logger.Warnf("no validators found in group %d, assignmentsPerGroup=%v", *ourGroup, assignmentsPerGroup) + return nil + } + + parasAssignedToCore := assignmentsPerGroup[*ourGroup] + // TODO: use cluster tracker implementation (#4713) + // secondingLimit := len(parasAssignedToCore) + + return &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: validatorIdx, + groupIndex: *ourGroup, + assignments: slices.Clone(parasAssignedToCore), + clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + }, + } +} diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go index 7a2af639d1..bd5b131e5c 100644 --- a/dot/parachain/statement-distribution/active_leaves_update_test.go +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -1 +1,190 @@ package statementdistribution + +import ( + "sync" + "testing" + + prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" + keystore "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +// TestHandleActiveLeavesUpdate_HappyPath tests the happy path for handleActiveLeavesUpdate. +func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + leafHash := common.MustBlake2bHash([]byte("leaf1")) + activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + ActivateLeaf(leafHash, gomock.Any()). + Return(nil) + implicitViewMock.EXPECT(). + AllAllowedRelayParents(). + Return([]common.Hash{leafHash}) + + rtInstanceMock := NewMockInstance(ctrl) + rtInstanceMock.EXPECT(). + ParachainHostDisabledValidators(). + Return([]parachaintypes.ValidatorIndex{}, nil) + + // as the returned session index does not exists + // in the perSession map, it will be created by handleActiveLeafUpdate + // the next mocks are needed to ensure the creation of the session state + rtInstanceMock.EXPECT(). + ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil) + + dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") + kp, err := sr25519.GenerateKeypair() + require.NoError(t, err) + + err = dummyKeystore.Insert(kp) + require.NoError(t, err) + + dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) + + sessionInfoDummy := parachaintypes.SessionInfo{ + ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1}, + RandomSeed: [32]byte{}, + DisputePeriod: parachaintypes.SessionIndex(10), + Validators: []parachaintypes.ValidatorID{dummyPubKey, {2}}, + DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{3}, {4}}, + AssignmentKeys: []parachaintypes.AssignmentID{{5}, {6}}, + ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}}, + NCores: 2, + ZerothDelayTrancheWidth: 1, + RelayVRFModuloSamples: 1, + NDelayTranches: 1, + NoShowSlots: 1, + NeededApprovals: 1, + } + + rtInstanceMock.EXPECT(). + ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). + Return(&sessionInfoDummy, nil) + + rtInstanceMock.EXPECT(). + ParachainHostMinimumBackingVotes(). + Return(uint32(3), nil) + + featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) + require.NoError(t, err) + + rtInstanceMock.EXPECT(). + ParachainHostNodeFeatures(). + Return(featuresBitVec, nil) + + // the next runtime instances are needed to create the + // per relay parent state + dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ + Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, + GroupRotationInfo: parachaintypes.GroupRotationInfo{ + SessionStartBlock: parachaintypes.BlockNumber(100), + GroupRotationFrequency: parachaintypes.BlockNumber(10), + Now: parachaintypes.BlockNumber(105), + }, + } + rtInstanceMock.EXPECT(). + ParachainHostValidatorGroups(). + Return(dummyValidatorGroups, nil) + + dummyClaimQueue := parachaintypes.ClaimQueue{ + parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + } + transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() + + rtInstanceMock.EXPECT(). + ParachainHostClaimQueue(). + Return(dummyClaimQueue, nil) + + blockStateMock := NewMockblockState(ctrl) + blockStateMock.EXPECT(). + GetRuntime(leafHash). + Return(rtInstanceMock, nil) + + candidatesMock := NewMockcandidatesTracker(ctrl) + candidatesMock.EXPECT(). + frontierHypotheticals(nil, nil). + Return([]parachaintypes.HypotheticalCandidate{}) + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: make(map[common.Hash]*perRelayParentState), + perSession: make(map[parachaintypes.SessionIndex]*perSessionState), + peers: map[string]peerState{}, + keystore: dummyKeystore, + candidates: candidatesMock, // No candidates tracker needed for this test + } + + overseerCh := make(chan any, 1) + sd := &StatementDistribution{ + state: state, + blockState: blockStateMock, + SubSystemToOverseer: overseerCh, + } + + // start a goroutine to handle the overseer subsystem + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + hypotheticalMsg := <-overseerCh + msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) + require.True(t, ok) + + // just return an empty slice + msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} + }() + + // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. + // Instead, we just ensure no panic and no error. + err = sd.handleActiveLeavesUpdate(activatedLeaf) + require.NoError(t, err) + wg.Wait() + + // assertions + expectedPerRelayParentState := &perRelayParentState{ + localValidator: &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + }, + }, + statementStore: nil, + session: parachaintypes.SessionIndex(1), + transposedClaimQueue: transposedDummyClaimQueue, + groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ + parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, + parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, + }, + disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), + assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ + parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + }, + } + require.Len(t, state.perRelayParent, 1) + require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) + + expectedSessionState := newPerSessionState( + &sessionInfoDummy, + dummyKeystore, + 3, + true, + ) + require.Len(t, state.perSession, 1) + require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) +} diff --git a/dot/parachain/statement-distribution/mocks_block_state_test.go b/dot/parachain/statement-distribution/mocks_block_state_test.go new file mode 100644 index 0000000000..8cea19c6b7 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_block_state_test.go @@ -0,0 +1,73 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: blockState) +// +// Generated by this command: +// +// mockgen -destination=mocks_block_state_test.go -package=statementdistribution . blockState +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + gomock "go.uber.org/mock/gomock" +) + +// MockblockState is a mock of blockState interface. +type MockblockState struct { + ctrl *gomock.Controller + recorder *MockblockStateMockRecorder + isgomock struct{} +} + +// MockblockStateMockRecorder is the mock recorder for MockblockState. +type MockblockStateMockRecorder struct { + mock *MockblockState +} + +// NewMockblockState creates a new mock instance. +func NewMockblockState(ctrl *gomock.Controller) *MockblockState { + mock := &MockblockState{ctrl: ctrl} + mock.recorder = &MockblockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockblockState) EXPECT() *MockblockStateMockRecorder { + return m.recorder +} + +// GetHeader mocks base method. +func (m *MockblockState) GetHeader(hash common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", hash) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockblockStateMockRecorder) GetHeader(hash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockblockState)(nil).GetHeader), hash) +} + +// GetRuntime mocks base method. +func (m *MockblockState) GetRuntime(blockHash common.Hash) (runtime.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRuntime", blockHash) + ret0, _ := ret[0].(runtime.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRuntime indicates an expected call of GetRuntime. +func (mr *MockblockStateMockRecorder) GetRuntime(blockHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockblockState)(nil).GetRuntime), blockHash) +} diff --git a/dot/parachain/statement-distribution/mocks_candidates_store_test.go b/dot/parachain/statement-distribution/mocks_candidates_store_test.go deleted file mode 100644 index b488193c54..0000000000 --- a/dot/parachain/statement-distribution/mocks_candidates_store_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: candidatesStore) -// -// Generated by this command: -// -// mockgen -destination=mocks_candidates_store_test.go -package=statementdistribution . candidatesStore -// - -// Package statementdistribution is a generated GoMock package. -package statementdistribution - -import ( - reflect "reflect" - - parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - gomock "go.uber.org/mock/gomock" -) - -// MockcandidatesStore is a mock of candidatesStore interface. -type MockcandidatesStore struct { - ctrl *gomock.Controller - recorder *MockcandidatesStoreMockRecorder - isgomock struct{} -} - -// MockcandidatesStoreMockRecorder is the mock recorder for MockcandidatesStore. -type MockcandidatesStoreMockRecorder struct { - mock *MockcandidatesStore -} - -// NewMockcandidatesStore creates a new mock instance. -func NewMockcandidatesStore(ctrl *gomock.Controller) *MockcandidatesStore { - mock := &MockcandidatesStore{ctrl: ctrl} - mock.recorder = &MockcandidatesStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockcandidatesStore) EXPECT() *MockcandidatesStoreMockRecorder { - return m.recorder -} - -// getConfirmed mocks base method. -func (m *MockcandidatesStore) getConfirmed(candidateHash parachaintypes.CandidateHash) (*confirmedCandidate, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getConfirmed", candidateHash) - ret0, _ := ret[0].(*confirmedCandidate) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// getConfirmed indicates an expected call of getConfirmed. -func (mr *MockcandidatesStoreMockRecorder) getConfirmed(candidateHash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getConfirmed", reflect.TypeOf((*MockcandidatesStore)(nil).getConfirmed), candidateHash) -} diff --git a/dot/parachain/statement-distribution/mocks_candidates_tracker_test.go b/dot/parachain/statement-distribution/mocks_candidates_tracker_test.go new file mode 100644 index 0000000000..32d1b87230 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_candidates_tracker_test.go @@ -0,0 +1,109 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: candidatesTracker) +// +// Generated by this command: +// +// mockgen -destination=mocks_candidates_tracker_test.go -package=statementdistribution . candidatesTracker +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + common "github.com/ChainSafe/gossamer/lib/common" + gomock "go.uber.org/mock/gomock" +) + +// MockcandidatesTracker is a mock of candidatesTracker interface. +type MockcandidatesTracker struct { + ctrl *gomock.Controller + recorder *MockcandidatesTrackerMockRecorder + isgomock struct{} +} + +// MockcandidatesTrackerMockRecorder is the mock recorder for MockcandidatesTracker. +type MockcandidatesTrackerMockRecorder struct { + mock *MockcandidatesTracker +} + +// NewMockcandidatesTracker creates a new mock instance. +func NewMockcandidatesTracker(ctrl *gomock.Controller) *MockcandidatesTracker { + mock := &MockcandidatesTracker{ctrl: ctrl} + mock.recorder = &MockcandidatesTrackerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockcandidatesTracker) EXPECT() *MockcandidatesTrackerMockRecorder { + return m.recorder +} + +// frontierHypotheticals mocks base method. +func (m *MockcandidatesTracker) frontierHypotheticals(arg0 *common.Hash, arg1 *parachaintypes.ParaID) []parachaintypes.HypotheticalCandidate { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "frontierHypotheticals", arg0, arg1) + ret0, _ := ret[0].([]parachaintypes.HypotheticalCandidate) + return ret0 +} + +// frontierHypotheticals indicates an expected call of frontierHypotheticals. +func (mr *MockcandidatesTrackerMockRecorder) frontierHypotheticals(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "frontierHypotheticals", reflect.TypeOf((*MockcandidatesTracker)(nil).frontierHypotheticals), arg0, arg1) +} + +// getConfirmed mocks base method. +func (m *MockcandidatesTracker) getConfirmed(candidateHash parachaintypes.CandidateHash) (*confirmedCandidate, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getConfirmed", candidateHash) + ret0, _ := ret[0].(*confirmedCandidate) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// getConfirmed indicates an expected call of getConfirmed. +func (mr *MockcandidatesTrackerMockRecorder) getConfirmed(candidateHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getConfirmed", reflect.TypeOf((*MockcandidatesTracker)(nil).getConfirmed), candidateHash) +} + +// isConfirmed mocks base method. +func (m *MockcandidatesTracker) isConfirmed(candidateHash parachaintypes.CandidateHash) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "isConfirmed", candidateHash) + ret0, _ := ret[0].(bool) + return ret0 +} + +// isConfirmed indicates an expected call of isConfirmed. +func (mr *MockcandidatesTrackerMockRecorder) isConfirmed(candidateHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isConfirmed", reflect.TypeOf((*MockcandidatesTracker)(nil).isConfirmed), candidateHash) +} + +// noteImportableUnder mocks base method. +func (m *MockcandidatesTracker) noteImportableUnder(hypo parachaintypes.HypotheticalCandidate, leaf common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "noteImportableUnder", hypo, leaf) +} + +// noteImportableUnder indicates an expected call of noteImportableUnder. +func (mr *MockcandidatesTrackerMockRecorder) noteImportableUnder(hypo, leaf any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "noteImportableUnder", reflect.TypeOf((*MockcandidatesTracker)(nil).noteImportableUnder), hypo, leaf) +} + +// onDeactivateLeaves mocks base method. +func (m *MockcandidatesTracker) onDeactivateLeaves(leaves []common.Hash, rpLiveFn func(common.Hash) bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "onDeactivateLeaves", leaves, rpLiveFn) +} + +// onDeactivateLeaves indicates an expected call of onDeactivateLeaves. +func (mr *MockcandidatesTrackerMockRecorder) onDeactivateLeaves(leaves, rpLiveFn any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onDeactivateLeaves", reflect.TypeOf((*MockcandidatesTracker)(nil).onDeactivateLeaves), leaves, rpLiveFn) +} diff --git a/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go b/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go new file mode 100644 index 0000000000..07f91e1959 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: clusterTracker) +// +// Generated by this command: +// +// mockgen -destination=mocks_cluster_tracker_test.go -package=statementdistribution . clusterTracker +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + common "github.com/ChainSafe/gossamer/lib/common" + gomock "go.uber.org/mock/gomock" +) + +// MockclusterTracker is a mock of clusterTracker interface. +type MockclusterTracker struct { + ctrl *gomock.Controller + recorder *MockclusterTrackerMockRecorder + isgomock struct{} +} + +// MockclusterTrackerMockRecorder is the mock recorder for MockclusterTracker. +type MockclusterTrackerMockRecorder struct { + mock *MockclusterTracker +} + +// NewMockclusterTracker creates a new mock instance. +func NewMockclusterTracker(ctrl *gomock.Controller) *MockclusterTracker { + mock := &MockclusterTracker{ctrl: ctrl} + mock.recorder = &MockclusterTrackerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockclusterTracker) EXPECT() *MockclusterTrackerMockRecorder { + return m.recorder +} + +// noteSend mocks base method. +func (m *MockclusterTracker) noteSend(target, originator parachaintypes.ValidatorIndex, stmt parachaintypes.CompactStatement) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "noteSend", target, originator, stmt) +} + +// noteSend indicates an expected call of noteSend. +func (mr *MockclusterTrackerMockRecorder) noteSend(target, originator, stmt any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "noteSend", reflect.TypeOf((*MockclusterTracker)(nil).noteSend), target, originator, stmt) +} + +// pendingStatementsFor mocks base method. +func (m *MockclusterTracker) pendingStatementsFor(target parachaintypes.ValidatorIndex) []originatorStatementPair { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "pendingStatementsFor", target) + ret0, _ := ret[0].([]originatorStatementPair) + return ret0 +} + +// pendingStatementsFor indicates an expected call of pendingStatementsFor. +func (mr *MockclusterTrackerMockRecorder) pendingStatementsFor(target any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pendingStatementsFor", reflect.TypeOf((*MockclusterTracker)(nil).pendingStatementsFor), target) +} + +// warningIfTooManyPendingStatements mocks base method. +func (m *MockclusterTracker) warningIfTooManyPendingStatements(rp common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "warningIfTooManyPendingStatements", rp) +} + +// warningIfTooManyPendingStatements indicates an expected call of warningIfTooManyPendingStatements. +func (mr *MockclusterTrackerMockRecorder) warningIfTooManyPendingStatements(rp any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "warningIfTooManyPendingStatements", reflect.TypeOf((*MockclusterTracker)(nil).warningIfTooManyPendingStatements), rp) +} diff --git a/dot/parachain/statement-distribution/mocks_generate_test.go b/dot/parachain/statement-distribution/mocks_generate_test.go index 284b640ea1..0c54ee7685 100644 --- a/dot/parachain/statement-distribution/mocks_generate_test.go +++ b/dot/parachain/statement-distribution/mocks_generate_test.go @@ -5,4 +5,8 @@ package statementdistribution //go:generate mockgen -destination=mocks_implicitview_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/dot/parachain/util ImplicitView //go:generate mockgen -destination=mocks_statement_store_test.go -package=$GOPACKAGE . statementStore -//go:generate mockgen -destination=mocks_candidates_store_test.go -package=$GOPACKAGE . candidatesStore +//go:generate mockgen -destination=mocks_candidates_tracker_test.go -package=$GOPACKAGE . candidatesTracker +//go:generate mockgen -destination=mocks_cluster_tracker_test.go -package=$GOPACKAGE . clusterTracker +//go:generate mockgen -destination=mocks_req_manager_test.go -package=$GOPACKAGE . requestManager +//go:generate mockgen -destination=mocks_block_state_test.go -package=$GOPACKAGE . blockState +//go:generate mockgen -destination=mocks_instance_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance diff --git a/dot/parachain/statement-distribution/mocks_instance_test.go b/dot/parachain/statement-distribution/mocks_instance_test.go new file mode 100644 index 0000000000..ce94510bb6 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_instance_test.go @@ -0,0 +1,756 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/runtime (interfaces: Instance) +// +// Generated by this command: +// +// mockgen -destination=mocks_instance_test.go -package=statementdistribution github.com/ChainSafe/gossamer/lib/runtime Instance +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + ed25519 "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + keystore "github.com/ChainSafe/gossamer/lib/keystore" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + transaction "github.com/ChainSafe/gossamer/lib/transaction" + gomock "go.uber.org/mock/gomock" +) + +// MockInstance is a mock of Instance interface. +type MockInstance struct { + ctrl *gomock.Controller + recorder *MockInstanceMockRecorder + isgomock struct{} +} + +// MockInstanceMockRecorder is the mock recorder for MockInstance. +type MockInstanceMockRecorder struct { + mock *MockInstance +} + +// NewMockInstance creates a new mock instance. +func NewMockInstance(ctrl *gomock.Controller) *MockInstance { + mock := &MockInstance{ctrl: ctrl} + mock.recorder = &MockInstanceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInstance) EXPECT() *MockInstanceMockRecorder { + return m.recorder +} + +// ApplyExtrinsic mocks base method. +func (m *MockInstance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyExtrinsic", data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyExtrinsic indicates an expected call of ApplyExtrinsic. +func (mr *MockInstanceMockRecorder) ApplyExtrinsic(data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyExtrinsic", reflect.TypeOf((*MockInstance)(nil).ApplyExtrinsic), data) +} + +// BabeConfiguration mocks base method. +func (m *MockInstance) BabeConfiguration() (*types.BabeConfiguration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeConfiguration") + ret0, _ := ret[0].(*types.BabeConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BabeConfiguration indicates an expected call of BabeConfiguration. +func (mr *MockInstanceMockRecorder) BabeConfiguration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeConfiguration", reflect.TypeOf((*MockInstance)(nil).BabeConfiguration)) +} + +// BabeGenerateKeyOwnershipProof mocks base method. +func (m *MockInstance) BabeGenerateKeyOwnershipProof(slot uint64, authorityID [32]byte) (types.OpaqueKeyOwnershipProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeGenerateKeyOwnershipProof", slot, authorityID) + ret0, _ := ret[0].(types.OpaqueKeyOwnershipProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BabeGenerateKeyOwnershipProof indicates an expected call of BabeGenerateKeyOwnershipProof. +func (mr *MockInstanceMockRecorder) BabeGenerateKeyOwnershipProof(slot, authorityID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).BabeGenerateKeyOwnershipProof), slot, authorityID) +} + +// BabeSubmitReportEquivocationUnsignedExtrinsic mocks base method. +func (m *MockInstance) BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof types.BabeEquivocationProof, keyOwnershipProof types.OpaqueKeyOwnershipProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeSubmitReportEquivocationUnsignedExtrinsic", equivocationProof, keyOwnershipProof) + ret0, _ := ret[0].(error) + return ret0 +} + +// BabeSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of BabeSubmitReportEquivocationUnsignedExtrinsic. +func (mr *MockInstanceMockRecorder) BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).BabeSubmitReportEquivocationUnsignedExtrinsic), equivocationProof, keyOwnershipProof) +} + +// CheckInherents mocks base method. +func (m *MockInstance) CheckInherents() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CheckInherents") +} + +// CheckInherents indicates an expected call of CheckInherents. +func (mr *MockInstanceMockRecorder) CheckInherents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckInherents", reflect.TypeOf((*MockInstance)(nil).CheckInherents)) +} + +// DecodeSessionKeys mocks base method. +func (m *MockInstance) DecodeSessionKeys(enc []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecodeSessionKeys", enc) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DecodeSessionKeys indicates an expected call of DecodeSessionKeys. +func (mr *MockInstanceMockRecorder) DecodeSessionKeys(enc any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeSessionKeys", reflect.TypeOf((*MockInstance)(nil).DecodeSessionKeys), enc) +} + +// Exec mocks base method. +func (m *MockInstance) Exec(function string, data []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exec", function, data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exec indicates an expected call of Exec. +func (mr *MockInstanceMockRecorder) Exec(function, data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockInstance)(nil).Exec), function, data) +} + +// ExecuteBlock mocks base method. +func (m *MockInstance) ExecuteBlock(block *types.Block) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteBlock", block) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteBlock indicates an expected call of ExecuteBlock. +func (mr *MockInstanceMockRecorder) ExecuteBlock(block any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockInstance)(nil).ExecuteBlock), block) +} + +// FinalizeBlock mocks base method. +func (m *MockInstance) FinalizeBlock() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizeBlock") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinalizeBlock indicates an expected call of FinalizeBlock. +func (mr *MockInstanceMockRecorder) FinalizeBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeBlock", reflect.TypeOf((*MockInstance)(nil).FinalizeBlock)) +} + +// GenerateSessionKeys mocks base method. +func (m *MockInstance) GenerateSessionKeys() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GenerateSessionKeys") +} + +// GenerateSessionKeys indicates an expected call of GenerateSessionKeys. +func (mr *MockInstanceMockRecorder) GenerateSessionKeys() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSessionKeys", reflect.TypeOf((*MockInstance)(nil).GenerateSessionKeys)) +} + +// GetCodeHash mocks base method. +func (m *MockInstance) GetCodeHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCodeHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GetCodeHash indicates an expected call of GetCodeHash. +func (mr *MockInstanceMockRecorder) GetCodeHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCodeHash", reflect.TypeOf((*MockInstance)(nil).GetCodeHash)) +} + +// GrandpaAuthorities mocks base method. +func (m *MockInstance) GrandpaAuthorities() ([]types.Authority, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaAuthorities") + ret0, _ := ret[0].([]types.Authority) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GrandpaAuthorities indicates an expected call of GrandpaAuthorities. +func (mr *MockInstanceMockRecorder) GrandpaAuthorities() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaAuthorities", reflect.TypeOf((*MockInstance)(nil).GrandpaAuthorities)) +} + +// GrandpaGenerateKeyOwnershipProof mocks base method. +func (m *MockInstance) GrandpaGenerateKeyOwnershipProof(authSetID uint64, authorityID ed25519.PublicKeyBytes) (types.GrandpaOpaqueKeyOwnershipProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaGenerateKeyOwnershipProof", authSetID, authorityID) + ret0, _ := ret[0].(types.GrandpaOpaqueKeyOwnershipProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GrandpaGenerateKeyOwnershipProof indicates an expected call of GrandpaGenerateKeyOwnershipProof. +func (mr *MockInstanceMockRecorder) GrandpaGenerateKeyOwnershipProof(authSetID, authorityID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).GrandpaGenerateKeyOwnershipProof), authSetID, authorityID) +} + +// GrandpaSubmitReportEquivocationUnsignedExtrinsic mocks base method. +func (m *MockInstance) GrandpaSubmitReportEquivocationUnsignedExtrinsic(equivocationProof types.GrandpaEquivocationProof, keyOwnershipProof types.GrandpaOpaqueKeyOwnershipProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", equivocationProof, keyOwnershipProof) + ret0, _ := ret[0].(error) + return ret0 +} + +// GrandpaSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of GrandpaSubmitReportEquivocationUnsignedExtrinsic. +func (mr *MockInstanceMockRecorder) GrandpaSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).GrandpaSubmitReportEquivocationUnsignedExtrinsic), equivocationProof, keyOwnershipProof) +} + +// InherentExtrinsics mocks base method. +func (m *MockInstance) InherentExtrinsics(data []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InherentExtrinsics", data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InherentExtrinsics indicates an expected call of InherentExtrinsics. +func (mr *MockInstanceMockRecorder) InherentExtrinsics(data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InherentExtrinsics", reflect.TypeOf((*MockInstance)(nil).InherentExtrinsics), data) +} + +// InitializeBlock mocks base method. +func (m *MockInstance) InitializeBlock(header *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeBlock", header) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeBlock indicates an expected call of InitializeBlock. +func (mr *MockInstanceMockRecorder) InitializeBlock(header any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeBlock", reflect.TypeOf((*MockInstance)(nil).InitializeBlock), header) +} + +// Keystore mocks base method. +func (m *MockInstance) Keystore() *keystore.GlobalKeystore { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Keystore") + ret0, _ := ret[0].(*keystore.GlobalKeystore) + return ret0 +} + +// Keystore indicates an expected call of Keystore. +func (mr *MockInstanceMockRecorder) Keystore() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keystore", reflect.TypeOf((*MockInstance)(nil).Keystore)) +} + +// Metadata mocks base method. +func (m *MockInstance) Metadata() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Metadata") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Metadata indicates an expected call of Metadata. +func (mr *MockInstanceMockRecorder) Metadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockInstance)(nil).Metadata)) +} + +// NetworkService mocks base method. +func (m *MockInstance) NetworkService() runtime.BasicNetwork { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkService") + ret0, _ := ret[0].(runtime.BasicNetwork) + return ret0 +} + +// NetworkService indicates an expected call of NetworkService. +func (mr *MockInstanceMockRecorder) NetworkService() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkService", reflect.TypeOf((*MockInstance)(nil).NetworkService)) +} + +// NodeStorage mocks base method. +func (m *MockInstance) NodeStorage() runtime.NodeStorage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStorage") + ret0, _ := ret[0].(runtime.NodeStorage) + return ret0 +} + +// NodeStorage indicates an expected call of NodeStorage. +func (mr *MockInstanceMockRecorder) NodeStorage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStorage", reflect.TypeOf((*MockInstance)(nil).NodeStorage)) +} + +// OffchainWorker mocks base method. +func (m *MockInstance) OffchainWorker() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OffchainWorker") +} + +// OffchainWorker indicates an expected call of OffchainWorker. +func (mr *MockInstanceMockRecorder) OffchainWorker() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OffchainWorker", reflect.TypeOf((*MockInstance)(nil).OffchainWorker)) +} + +// ParachainHostAsyncBackingParams mocks base method. +func (m *MockInstance) ParachainHostAsyncBackingParams() (*parachaintypes.AsyncBackingParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostAsyncBackingParams") + ret0, _ := ret[0].(*parachaintypes.AsyncBackingParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostAsyncBackingParams indicates an expected call of ParachainHostAsyncBackingParams. +func (mr *MockInstanceMockRecorder) ParachainHostAsyncBackingParams() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostAsyncBackingParams", reflect.TypeOf((*MockInstance)(nil).ParachainHostAsyncBackingParams)) +} + +// ParachainHostAvailabilityCores mocks base method. +func (m *MockInstance) ParachainHostAvailabilityCores() ([]parachaintypes.CoreState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostAvailabilityCores") + ret0, _ := ret[0].([]parachaintypes.CoreState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostAvailabilityCores indicates an expected call of ParachainHostAvailabilityCores. +func (mr *MockInstanceMockRecorder) ParachainHostAvailabilityCores() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostAvailabilityCores", reflect.TypeOf((*MockInstance)(nil).ParachainHostAvailabilityCores)) +} + +// ParachainHostBackingConstraints mocks base method. +func (m *MockInstance) ParachainHostBackingConstraints(paraID parachaintypes.ParaID) (*parachaintypes.VStagingConstraints, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostBackingConstraints", paraID) + ret0, _ := ret[0].(*parachaintypes.VStagingConstraints) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostBackingConstraints indicates an expected call of ParachainHostBackingConstraints. +func (mr *MockInstanceMockRecorder) ParachainHostBackingConstraints(paraID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostBackingConstraints", reflect.TypeOf((*MockInstance)(nil).ParachainHostBackingConstraints), paraID) +} + +// ParachainHostCandidateEvents mocks base method. +func (m *MockInstance) ParachainHostCandidateEvents() ([]parachaintypes.CandidateEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidateEvents") + ret0, _ := ret[0].([]parachaintypes.CandidateEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidateEvents indicates an expected call of ParachainHostCandidateEvents. +func (mr *MockInstanceMockRecorder) ParachainHostCandidateEvents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidateEvents", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidateEvents)) +} + +// ParachainHostCandidatePendingAvailability mocks base method. +func (m *MockInstance) ParachainHostCandidatePendingAvailability(parachainID parachaintypes.ParaID) (*parachaintypes.CommittedCandidateReceiptV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidatePendingAvailability", parachainID) + ret0, _ := ret[0].(*parachaintypes.CommittedCandidateReceiptV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidatePendingAvailability indicates an expected call of ParachainHostCandidatePendingAvailability. +func (mr *MockInstanceMockRecorder) ParachainHostCandidatePendingAvailability(parachainID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidatePendingAvailability", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidatePendingAvailability), parachainID) +} + +// ParachainHostCandidatesPendingAvailability mocks base method. +func (m *MockInstance) ParachainHostCandidatesPendingAvailability(paraID parachaintypes.ParaID) ([]parachaintypes.CommittedCandidateReceiptV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidatesPendingAvailability", paraID) + ret0, _ := ret[0].([]parachaintypes.CommittedCandidateReceiptV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidatesPendingAvailability indicates an expected call of ParachainHostCandidatesPendingAvailability. +func (mr *MockInstanceMockRecorder) ParachainHostCandidatesPendingAvailability(paraID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidatesPendingAvailability", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidatesPendingAvailability), paraID) +} + +// ParachainHostCheckValidationOutputs mocks base method. +func (m *MockInstance) ParachainHostCheckValidationOutputs(parachainID parachaintypes.ParaID, outputs parachaintypes.CandidateCommitments) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCheckValidationOutputs", parachainID, outputs) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCheckValidationOutputs indicates an expected call of ParachainHostCheckValidationOutputs. +func (mr *MockInstanceMockRecorder) ParachainHostCheckValidationOutputs(parachainID, outputs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCheckValidationOutputs", reflect.TypeOf((*MockInstance)(nil).ParachainHostCheckValidationOutputs), parachainID, outputs) +} + +// ParachainHostClaimQueue mocks base method. +func (m *MockInstance) ParachainHostClaimQueue() (parachaintypes.ClaimQueue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostClaimQueue") + ret0, _ := ret[0].(parachaintypes.ClaimQueue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostClaimQueue indicates an expected call of ParachainHostClaimQueue. +func (mr *MockInstanceMockRecorder) ParachainHostClaimQueue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostClaimQueue", reflect.TypeOf((*MockInstance)(nil).ParachainHostClaimQueue)) +} + +// ParachainHostDisabledValidators mocks base method. +func (m *MockInstance) ParachainHostDisabledValidators() ([]parachaintypes.ValidatorIndex, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostDisabledValidators") + ret0, _ := ret[0].([]parachaintypes.ValidatorIndex) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostDisabledValidators indicates an expected call of ParachainHostDisabledValidators. +func (mr *MockInstanceMockRecorder) ParachainHostDisabledValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostDisabledValidators", reflect.TypeOf((*MockInstance)(nil).ParachainHostDisabledValidators)) +} + +// ParachainHostDisputes mocks base method. +func (m *MockInstance) ParachainHostDisputes() (map[parachaintypes.DisputeKey]parachaintypes.DisputeState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostDisputes") + ret0, _ := ret[0].(map[parachaintypes.DisputeKey]parachaintypes.DisputeState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostDisputes indicates an expected call of ParachainHostDisputes. +func (mr *MockInstanceMockRecorder) ParachainHostDisputes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostDisputes", reflect.TypeOf((*MockInstance)(nil).ParachainHostDisputes)) +} + +// ParachainHostMinimumBackingVotes mocks base method. +func (m *MockInstance) ParachainHostMinimumBackingVotes() (uint32, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostMinimumBackingVotes") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostMinimumBackingVotes indicates an expected call of ParachainHostMinimumBackingVotes. +func (mr *MockInstanceMockRecorder) ParachainHostMinimumBackingVotes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostMinimumBackingVotes", reflect.TypeOf((*MockInstance)(nil).ParachainHostMinimumBackingVotes)) +} + +// ParachainHostNodeFeatures mocks base method. +func (m *MockInstance) ParachainHostNodeFeatures() (parachaintypes.BitVec, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostNodeFeatures") + ret0, _ := ret[0].(parachaintypes.BitVec) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostNodeFeatures indicates an expected call of ParachainHostNodeFeatures. +func (mr *MockInstanceMockRecorder) ParachainHostNodeFeatures() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostNodeFeatures", reflect.TypeOf((*MockInstance)(nil).ParachainHostNodeFeatures)) +} + +// ParachainHostPersistedValidationData mocks base method. +func (m *MockInstance) ParachainHostPersistedValidationData(parachaidID parachaintypes.ParaID, assumption parachaintypes.OccupiedCoreAssumption) (*parachaintypes.PersistedValidationData, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostPersistedValidationData", parachaidID, assumption) + ret0, _ := ret[0].(*parachaintypes.PersistedValidationData) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostPersistedValidationData indicates an expected call of ParachainHostPersistedValidationData. +func (mr *MockInstanceMockRecorder) ParachainHostPersistedValidationData(parachaidID, assumption any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostPersistedValidationData", reflect.TypeOf((*MockInstance)(nil).ParachainHostPersistedValidationData), parachaidID, assumption) +} + +// ParachainHostSchedulingLookAhead mocks base method. +func (m *MockInstance) ParachainHostSchedulingLookAhead() (uint32, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSchedulingLookAhead") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSchedulingLookAhead indicates an expected call of ParachainHostSchedulingLookAhead. +func (mr *MockInstanceMockRecorder) ParachainHostSchedulingLookAhead() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSchedulingLookAhead", reflect.TypeOf((*MockInstance)(nil).ParachainHostSchedulingLookAhead)) +} + +// ParachainHostSessionExecutorParams mocks base method. +func (m *MockInstance) ParachainHostSessionExecutorParams(index parachaintypes.SessionIndex) (*parachaintypes.ExecutorParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionExecutorParams", index) + ret0, _ := ret[0].(*parachaintypes.ExecutorParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionExecutorParams indicates an expected call of ParachainHostSessionExecutorParams. +func (mr *MockInstanceMockRecorder) ParachainHostSessionExecutorParams(index any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionExecutorParams", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionExecutorParams), index) +} + +// ParachainHostSessionIndexForChild mocks base method. +func (m *MockInstance) ParachainHostSessionIndexForChild() (parachaintypes.SessionIndex, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionIndexForChild") + ret0, _ := ret[0].(parachaintypes.SessionIndex) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionIndexForChild indicates an expected call of ParachainHostSessionIndexForChild. +func (mr *MockInstanceMockRecorder) ParachainHostSessionIndexForChild() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionIndexForChild", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionIndexForChild)) +} + +// ParachainHostSessionInfo mocks base method. +func (m *MockInstance) ParachainHostSessionInfo(sessionIndex parachaintypes.SessionIndex) (*parachaintypes.SessionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionInfo", sessionIndex) + ret0, _ := ret[0].(*parachaintypes.SessionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionInfo indicates an expected call of ParachainHostSessionInfo. +func (mr *MockInstanceMockRecorder) ParachainHostSessionInfo(sessionIndex any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionInfo", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionInfo), sessionIndex) +} + +// ParachainHostValidationCode mocks base method. +func (m *MockInstance) ParachainHostValidationCode(parachaidID parachaintypes.ParaID, assumption parachaintypes.OccupiedCoreAssumption) (*parachaintypes.ValidationCode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidationCode", parachaidID, assumption) + ret0, _ := ret[0].(*parachaintypes.ValidationCode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidationCode indicates an expected call of ParachainHostValidationCode. +func (mr *MockInstanceMockRecorder) ParachainHostValidationCode(parachaidID, assumption any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidationCode", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidationCode), parachaidID, assumption) +} + +// ParachainHostValidationCodeByHash mocks base method. +func (m *MockInstance) ParachainHostValidationCodeByHash(validationCodeHash common.Hash) (*parachaintypes.ValidationCode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidationCodeByHash", validationCodeHash) + ret0, _ := ret[0].(*parachaintypes.ValidationCode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidationCodeByHash indicates an expected call of ParachainHostValidationCodeByHash. +func (mr *MockInstanceMockRecorder) ParachainHostValidationCodeByHash(validationCodeHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidationCodeByHash", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidationCodeByHash), validationCodeHash) +} + +// ParachainHostValidatorGroups mocks base method. +func (m *MockInstance) ParachainHostValidatorGroups() (*parachaintypes.ValidatorGroups, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidatorGroups") + ret0, _ := ret[0].(*parachaintypes.ValidatorGroups) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidatorGroups indicates an expected call of ParachainHostValidatorGroups. +func (mr *MockInstanceMockRecorder) ParachainHostValidatorGroups() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidatorGroups", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidatorGroups)) +} + +// ParachainHostValidators mocks base method. +func (m *MockInstance) ParachainHostValidators() ([]parachaintypes.ValidatorID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidators") + ret0, _ := ret[0].([]parachaintypes.ValidatorID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidators indicates an expected call of ParachainHostValidators. +func (mr *MockInstanceMockRecorder) ParachainHostValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidators", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidators)) +} + +// PaymentQueryInfo mocks base method. +func (m *MockInstance) PaymentQueryInfo(ext []byte) (*types.RuntimeDispatchInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaymentQueryInfo", ext) + ret0, _ := ret[0].(*types.RuntimeDispatchInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaymentQueryInfo indicates an expected call of PaymentQueryInfo. +func (mr *MockInstanceMockRecorder) PaymentQueryInfo(ext any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaymentQueryInfo", reflect.TypeOf((*MockInstance)(nil).PaymentQueryInfo), ext) +} + +// RandomSeed mocks base method. +func (m *MockInstance) RandomSeed() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RandomSeed") +} + +// RandomSeed indicates an expected call of RandomSeed. +func (mr *MockInstanceMockRecorder) RandomSeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RandomSeed", reflect.TypeOf((*MockInstance)(nil).RandomSeed)) +} + +// SetContextStorage mocks base method. +func (m *MockInstance) SetContextStorage(s runtime.Storage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetContextStorage", s) +} + +// SetContextStorage indicates an expected call of SetContextStorage. +func (mr *MockInstanceMockRecorder) SetContextStorage(s any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContextStorage", reflect.TypeOf((*MockInstance)(nil).SetContextStorage), s) +} + +// Stop mocks base method. +func (m *MockInstance) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockInstanceMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockInstance)(nil).Stop)) +} + +// ValidateTransaction mocks base method. +func (m *MockInstance) ValidateTransaction(e types.Extrinsic) (*transaction.Validity, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateTransaction", e) + ret0, _ := ret[0].(*transaction.Validity) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateTransaction indicates an expected call of ValidateTransaction. +func (mr *MockInstanceMockRecorder) ValidateTransaction(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTransaction", reflect.TypeOf((*MockInstance)(nil).ValidateTransaction), e) +} + +// Validator mocks base method. +func (m *MockInstance) Validator() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validator") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Validator indicates an expected call of Validator. +func (mr *MockInstanceMockRecorder) Validator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validator", reflect.TypeOf((*MockInstance)(nil).Validator)) +} + +// Version mocks base method. +func (m *MockInstance) Version() (runtime.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(runtime.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockInstanceMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockInstance)(nil).Version)) +} diff --git a/dot/parachain/statement-distribution/mocks_req_manager_test.go b/dot/parachain/statement-distribution/mocks_req_manager_test.go new file mode 100644 index 0000000000..d602054035 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_req_manager_test.go @@ -0,0 +1,53 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: requestManager) +// +// Generated by this command: +// +// mockgen -destination=mocks_req_manager_test.go -package=statementdistribution . requestManager +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + common "github.com/ChainSafe/gossamer/lib/common" + gomock "go.uber.org/mock/gomock" +) + +// MockrequestManager is a mock of requestManager interface. +type MockrequestManager struct { + ctrl *gomock.Controller + recorder *MockrequestManagerMockRecorder + isgomock struct{} +} + +// MockrequestManagerMockRecorder is the mock recorder for MockrequestManager. +type MockrequestManagerMockRecorder struct { + mock *MockrequestManager +} + +// NewMockrequestManager creates a new mock instance. +func NewMockrequestManager(ctrl *gomock.Controller) *MockrequestManager { + mock := &MockrequestManager{ctrl: ctrl} + mock.recorder = &MockrequestManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockrequestManager) EXPECT() *MockrequestManagerMockRecorder { + return m.recorder +} + +// removeByRelayParent mocks base method. +func (m *MockrequestManager) removeByRelayParent(rp common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "removeByRelayParent", rp) +} + +// removeByRelayParent indicates an expected call of removeByRelayParent. +func (mr *MockrequestManagerMockRecorder) removeByRelayParent(rp any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeByRelayParent", reflect.TypeOf((*MockrequestManager)(nil).removeByRelayParent), rp) +} diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index c91e965db2..12e3bb07f1 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -52,9 +52,8 @@ type statementStore interface { // skipcq:SCC-U1000 type perRelayParentState struct { - localValidator *localValidatorStore + localValidator *localValidatorState statementStore statementStore // TODO #4719: Create statement store - secondingLimit uint session parachaintypes.SessionIndex transposedClaimQueue parachaintypes.TransposedClaimQueue groupsPerPara map[parachaintypes.ParaID][]parachaintypes.GroupIndex @@ -90,7 +89,7 @@ func (p *perRelayParentState) disableBitmask(group []parachaintypes.ValidatorInd return bm, err } -type localValidatorStore struct { +type localValidatorState struct { gridTracker *gridTracker active *activeValidatorState // skipcq:SCC-U1000 } @@ -115,7 +114,8 @@ type perSessionState struct { allowV2Descriptors bool } -func newPerSessionState(sessionInfo *parachaintypes.SessionInfo, +func newPerSessionState( + sessionInfo *parachaintypes.SessionInfo, keystore keystore.Keystore, backingThreshold uint32, allowV2Descriptor bool, diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index f71966237c..c4dafe9328 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -37,13 +37,13 @@ var ( var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-statement-distribution")) -type BlockState interface { +type blockState interface { GetHeader(hash common.Hash) (header *types.Header, err error) GetRuntime(blockHash common.Hash) (instance runtime.Instance, err error) } type StatementDistribution struct { - blockState BlockState + blockState blockState SubSystemToOverseer chan<- any state *v2State } @@ -165,13 +165,15 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, requiredParentHash *common.Hash, requiredParentParaID *parachaintypes.ParaID, knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { + fmt.Println("fragmentChainUpdateInner", rp) + // 1. get hypothetical candidates var hypotheticals []parachaintypes.HypotheticalCandidate if knowHypotheticals != nil { hypotheticals = *knowHypotheticals } else { - s.state.candidates.frontierHypotheticals(requiredParentHash, requiredParentParaID) + hypotheticals = s.state.candidates.frontierHypotheticals(requiredParentHash, requiredParentParaID) } // 2. find out which are in the frontier @@ -191,7 +193,6 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, return case resp := <-response: candidateMemberships = resp - } // 3. note that they are importable under a given leaf hash. @@ -245,8 +246,6 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, ) } } - - panic("unimplemented") } // Send a peer all pending cluster statements for a relay parent. diff --git a/dot/parachain/statement-distribution/statement_distribution_test.go b/dot/parachain/statement-distribution/statement_distribution_test.go index 271b041e8a..abc7ce051a 100644 --- a/dot/parachain/statement-distribution/statement_distribution_test.go +++ b/dot/parachain/statement-distribution/statement_distribution_test.go @@ -227,7 +227,7 @@ func TestSendPendingGridMessages(t *testing.T) { validationVersion := validationprotocol.ValidationVersionV3 peerValidatorID := parachaintypes.ValidatorIndex(0) rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, } @@ -257,12 +257,12 @@ func TestSendPendingGridMessages(t *testing.T) { peerID := peer.ID("peer-ex") validationVersion := validationprotocol.ValidationVersionV3 rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, } - candidatesMock := NewMockcandidatesStore(ctrl) + candidatesMock := NewMockcandidatesTracker(ctrl) candidatesMock.EXPECT(). getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). Return(nil, false) @@ -291,7 +291,7 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesStore(ctrl) + candidatesMock := NewMockcandidatesTracker(ctrl) candidatesMock.EXPECT(). getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). Return(&confirmedCandidate{ @@ -330,7 +330,7 @@ func TestSendPendingGridMessages(t *testing.T) { }) rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, statementStore: stmtStoreMock, @@ -389,7 +389,7 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesStore(ctrl) + candidatesMock := NewMockcandidatesTracker(ctrl) candidatesMock.EXPECT(). getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). Return(&confirmedCandidate{ @@ -449,7 +449,7 @@ func TestSendPendingGridMessages(t *testing.T) { }) rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, statementStore: stmtStoreMock, From d389227f9e8e2f6d0e38ea83ea8de5f3c73a3822 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 11 Jun 2025 15:49:55 -0400 Subject: [PATCH 5/9] wip: more complex test for active leaves --- .../active_leaves_update.go | 14 +- .../active_leaves_update_test.go | 306 +++++++++++++++++- .../statement-distribution/state_v2.go | 2 +- .../statement_distribution.go | 9 +- 4 files changed, 312 insertions(+), 19 deletions(-) diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index 597d0065a2..30f756c6de 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -49,7 +49,6 @@ func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.Ac } } - fmt.Println("calling fragmentChainUpdateInner", &leaf.Hash) s.fragmentChainUpdateInner(&leaf.Hash, nil, nil, nil) return nil } @@ -175,7 +174,8 @@ func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { // Utility function to populate: // - per relay parent `ParaId` to `GroupIndex` mappings. // - per `GroupIndex` claim queue assignments -func determineGroupAssignment(numCores int, +func determineGroupAssignment( + numCores int, groupRotationInfo *parachaintypes.GroupRotationInfo, claimQueue *parachaintypes.ClaimQueue, ) (map[parachaintypes.ParaID][]parachaintypes.GroupIndex, map[parachaintypes.GroupIndex][]parachaintypes.ParaID) { @@ -193,6 +193,7 @@ func determineGroupAssignment(numCores int, for coreIdx, paras := range schedule { groupIdx := groupRotationInfo.GroupForCore(coreIdx, uint(numCores)) + assignmentsPerGroup[groupIdx] = slices.Clone(paras) for _, para := range paras { @@ -264,10 +265,11 @@ func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { // prospective_parachains gets enabled maps.DeleteFunc(s.state.unusedTopologies, func(s parachaintypes.SessionIndex, _v events.NewGossipTopology) bool { _, ok := sessions[s] - // delete if: - // The session index does not exists in the sessions map - // Or the session index exists BUT is not the lastSessionIndex - return !ok || (lastSessionIndex != nil && *lastSessionIndex != s) + if ok || lastSessionIndex != nil && *lastSessionIndex == s { + return false + } + + return true }) } diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go index bd5b131e5c..d472bccde4 100644 --- a/dot/parachain/statement-distribution/active_leaves_update_test.go +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -13,7 +13,6 @@ import ( "go.uber.org/mock/gomock" ) -// TestHandleActiveLeavesUpdate_HappyPath tests the happy path for handleActiveLeavesUpdate. func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -51,13 +50,13 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) sessionInfoDummy := parachaintypes.SessionInfo{ - ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1}, + ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, RandomSeed: [32]byte{}, DisputePeriod: parachaintypes.SessionIndex(10), - Validators: []parachaintypes.ValidatorID{dummyPubKey, {2}}, - DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{3}, {4}}, - AssignmentKeys: []parachaintypes.AssignmentID{{5}, {6}}, - ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}}, + Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, + DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, + AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, + ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, NCores: 2, ZerothDelayTrancheWidth: 1, RelayVRFModuloSamples: 1, @@ -188,3 +187,298 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { require.Len(t, state.perSession, 1) require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) } + +// func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent (t *testing.T) { +// ctrl := gomock.NewController(t) +// defer ctrl.Finish() + +// leafHash := common.MustBlake2bHash([]byte("leaf1")) +// activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} + +// implicitViewMock := NewMockImplicitView(ctrl) +// implicitViewMock.EXPECT(). +// ActivateLeaf(leafHash, gomock.Any()). +// Return(nil) +// implicitViewMock.EXPECT(). +// AllAllowedRelayParents(). +// Return([]common.Hash{leafHash}) + +// rtInstanceMock := NewMockInstance(ctrl) +// rtInstanceMock.EXPECT(). +// ParachainHostDisabledValidators(). +// Return([]parachaintypes.ValidatorIndex{}, nil) + +// // as the returned session index does not exists +// // in the perSession map, it will be created by handleActiveLeafUpdate +// // the next mocks are needed to ensure the creation of the session state +// rtInstanceMock.EXPECT(). +// ParachainHostSessionIndexForChild(). +// Return(parachaintypes.SessionIndex(1), nil) + +// dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") +// kp, err := sr25519.GenerateKeypair() +// require.NoError(t, err) + +// err = dummyKeystore.Insert(kp) +// require.NoError(t, err) + +// dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) + +// sessionInfoDummy := parachaintypes.SessionInfo{ +// ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, +// RandomSeed: [32]byte{}, +// DisputePeriod: parachaintypes.SessionIndex(10), +// Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, +// DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, +// AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, +// ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, +// NCores: 2, +// ZerothDelayTrancheWidth: 1, +// RelayVRFModuloSamples: 1, +// NDelayTranches: 1, +// NoShowSlots: 1, +// NeededApprovals: 1, +// } + +// rtInstanceMock.EXPECT(). +// ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). +// Return(&sessionInfoDummy, nil) + +// rtInstanceMock.EXPECT(). +// ParachainHostMinimumBackingVotes(). +// Return(uint32(3), nil) + +// featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) +// require.NoError(t, err) + +// rtInstanceMock.EXPECT(). +// ParachainHostNodeFeatures(). +// Return(featuresBitVec, nil) + +// // the next runtime instances are needed to create the +// // per relay parent state +// dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ +// Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, +// GroupRotationInfo: parachaintypes.GroupRotationInfo{ +// SessionStartBlock: parachaintypes.BlockNumber(100), +// GroupRotationFrequency: parachaintypes.BlockNumber(10), +// Now: parachaintypes.BlockNumber(105), +// }, +// } +// rtInstanceMock.EXPECT(). +// ParachainHostValidatorGroups(). +// Return(dummyValidatorGroups, nil) + +// dummyClaimQueue := parachaintypes.ClaimQueue{ +// parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, +// parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, +// } +// transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() + +// rtInstanceMock.EXPECT(). +// ParachainHostClaimQueue(). +// Return(dummyClaimQueue, nil) + +// blockStateMock := NewMockblockState(ctrl) +// blockStateMock.EXPECT(). +// GetRuntime(leafHash). +// Return(rtInstanceMock, nil) + +// candidatesMock := NewMockcandidatesTracker(ctrl) +// candidatesMock.EXPECT(). +// frontierHypotheticals(nil, nil). +// Return([]parachaintypes.HypotheticalCandidate{}) + +// candidatesMock.EXPECT(). +// isConfirmed(parachaintypes.CandidateHash{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}). +// Return(true) + +// compactStmtToSend := ¶chaintypes.CompactValid{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))} +// validatorStmtPair := []originatorStatementPair{ +// { +// validatorIndex: parachaintypes.ValidatorIndex(1), +// compactStmt: compactStmtToSend, +// }, +// } + +// var signature [64]byte +// copy(signature[:], bytes.Repeat([]byte{0x01}, 64)) +// signedStmt := ¶chaintypes.SignedStatement{ +// Payload: *compactStmtToSend.ToEncodable(), +// ValidatorIndex: parachaintypes.ValidatorIndex(2), +// Signature: parachaintypes.ValidatorSignature(parachaintypes.Signature(signature)), +// } + +// stmtStoreMock := NewMockstatementStore(ctrl) +// stmtStoreMock.EXPECT(). +// validatorStatement(validatorStmtPair). +// Return(signedStmt) + +// // new mocks related to sendPendingClusterStatements function +// clusterTrackerMock := NewMockclusterTracker(ctrl) +// clusterTrackerMock.EXPECT(). +// pendingStatementsFor(parachaintypes.ValidatorIndex(2)). +// Return(validatorStmtPair) + +// peer1AuthDiscoveryID := parachaintypes.AuthorityDiscoveryID{2} + +// state := &v2State{ +// implicitView: implicitViewMock, +// perRelayParent: make(map[common.Hash]*perRelayParentState), +// perSession: make(map[parachaintypes.SessionIndex]*perSessionState), +// peers: map[string]peerState{ +// "peer1": { +// view: parachaintypes.View{ +// Heads: []common.Hash{leafHash}, +// }, +// protocolVersion: validationprotocol.ValidationVersionV3, +// implicitView: map[common.Hash]struct{}{}, +// discoveryIds: &map[parachaintypes.AuthorityDiscoveryID]struct{}{ +// peer1AuthDiscoveryID: {}, +// }, +// }, +// }, +// keystore: dummyKeystore, +// candidates: candidatesMock, +// } + +// overseerCh := make(chan any, 1) +// sd := &StatementDistribution{ +// state: state, +// blockState: blockStateMock, +// SubSystemToOverseer: overseerCh, +// } + +// // start a goroutine to handle the overseer subsystem +// wg := sync.WaitGroup{} +// wg.Add(1) +// go func() { +// defer wg.Done() + +// // first message is a hypothetical membership request +// hypotheticalMsg := <-overseerCh +// msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) +// require.True(t, ok) + +// // just return an empty slice +// msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} + +// // second message is a statement distribution message +// // we don't expect a third message about pending grid statements +// // because the grid tracker is empty and no pending statements are available +// // on a new active leaf update +// sendValidationMsg := <-overseerCh +// validationMsg, ok := sendValidationMsg.(*networkbridgemessages.SendValidationMessage) +// require.True(t, ok) + +// require.Equal(t, []peer.ID{peer.ID("peer1")}, validationMsg.To) + +// expectedSDMV3 := validationprotocol.NewStatementDistributionMessage() +// err := expectedSDMV3.SetValue(validationprotocol.Statement{ +// RelayParent: leafHash, +// Compact: parachaintypes.UncheckedSignedCompactStatement(*signedStmt), +// }) +// require.NoError(t, err) + +// expectedMsg := validationprotocol.NewValidationProtocolVDT() +// err = expectedMsg.SetValue(validationprotocol.StatementDistribution{ +// StatementDistributionMessage: expectedSDMV3}) +// require.NoError(t, err) + +// require.Equal(t, expectedMsg, validationMsg.ValidationProtocolMessage) +// }() + +// // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. +// // Instead, we just ensure no panic and no error. +// err = sd.handleActiveLeavesUpdate(activatedLeaf) +// require.NoError(t, err) +// wg.Wait() + +// // assertions +// expectedPerRelayParentState := &perRelayParentState{ +// localValidator: &localValidatorState{ +// gridTracker: newGridTracker(), +// active: &activeValidatorState{ +// index: parachaintypes.ValidatorIndex(0), +// groupIndex: parachaintypes.GroupIndex(0), +// assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, +// clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) +// }, +// }, +// statementStore: nil, +// session: parachaintypes.SessionIndex(1), +// transposedClaimQueue: transposedDummyClaimQueue, +// groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ +// parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, +// parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, +// parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, +// parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, +// }, +// disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), +// assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ +// parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, +// parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, +// }, +// } +// require.Len(t, state.perRelayParent, 1) +// require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) + +// expectedSessionState := newPerSessionState( +// &sessionInfoDummy, +// dummyKeystore, +// 3, +// true, +// ) +// require.Len(t, state.perSession, 1) +// require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) +// } + +func TestHandleDeactivatedLeaves(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + DeactivateLeaf(common.Hash{0xcd}). + Return([]common.Hash{{0xab}, {0xcd}}) + + reqManagerMock := NewMockrequestManager(ctrl) + reqManagerMock.EXPECT(). + removeByRelayParent(common.Hash{0xcd}) + + reqManagerMock.EXPECT(). + removeByRelayParent(common.Hash{0xab}) + + candidatesMock := NewMockcandidatesTracker(ctrl) + candidatesMock.EXPECT(). + onDeactivateLeaves([]common.Hash{{0xcd}}, gomock.Any()) + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: map[common.Hash]*perRelayParentState{ + {0xab}: {}, + {0xcd}: {}, + {0xef}: {session: parachaintypes.SessionIndex(2)}, + }, + perSession: map[parachaintypes.SessionIndex]*perSessionState{ + parachaintypes.SessionIndex(1): nil, + parachaintypes.SessionIndex(2): nil, + }, + requestManager: reqManagerMock, + candidates: candidatesMock, + } + + sd := &StatementDistribution{ + state: state, + } + + sd.handleDeactivatedLeaves([]common.Hash{{0xcd}}) + + require.Len(t, sd.state.perRelayParent, 1) + require.Equal(t, &perRelayParentState{session: parachaintypes.SessionIndex(2)}, + sd.state.perRelayParent[common.Hash{0xef}]) + + require.Len(t, sd.state.perSession, 1) + _, ok := sd.state.perSession[parachaintypes.SessionIndex(2)] + require.True(t, ok) +} diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 12e3bb07f1..5bca3b3798 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -107,7 +107,7 @@ type perSessionState struct { sessionInfo *parachaintypes.SessionInfo groups *groups authLookup map[parachaintypes.AuthorityDiscoveryID]parachaintypes.ValidatorIndex - gridView any // TODO: use SessionTopologyView from statement-distribution grid (#4576) + gridView *sessionTopologyView // when localValidator is nil means it is inactive localValidator *parachaintypes.ValidatorIndex diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index c4dafe9328..5fd2a4e455 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -165,8 +165,6 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, requiredParentHash *common.Hash, requiredParentParaID *parachaintypes.ParaID, knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { - fmt.Println("fragmentChainUpdateInner", rp) - // 1. get hypothetical candidates var hypotheticals []parachaintypes.HypotheticalCandidate @@ -251,12 +249,12 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, // Send a peer all pending cluster statements for a relay parent. func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, peerID peer.ID, validationVersion validationprotocol.ValidationVersion, - peerValidatorID parachaintypes.ValidatorIndex, + peerValidatorIdx parachaintypes.ValidatorIndex, clusterTracker clusterTracker, candidates candidatesTracker, statementStore statementStore, ) { - pendingStmts := clusterTracker.pendingStatementsFor(peerValidatorID) + pendingStmts := clusterTracker.pendingStatementsFor(peerValidatorIdx) for _, stmt := range pendingStmts { if !candidates.isConfirmed(stmt.compactStmt.CandidateHash()) { continue @@ -264,7 +262,7 @@ func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, msg := pendingStatementNetworkMessage(statementStore, rp, peerID, validationVersion, stmt) if msg != nil { - clusterTracker.noteSend(peerValidatorID, stmt.validatorIndex, stmt.compactStmt) + clusterTracker.noteSend(peerValidatorIdx, stmt.validatorIndex, stmt.compactStmt) // TODO: create a SendValidationMessages to send a batch of messages s.SubSystemToOverseer <- msg } @@ -659,7 +657,6 @@ func pendingStatementNetworkMessage( panic(fmt.Sprintf("unexpected error setting value in StatementDistributionMessageV3: %s", err)) } - // TODO: this will panic as validation protocol does not support V3 yet vp := validationprotocol.NewValidationProtocolVDT() err = vp.SetValue(validationprotocol.StatementDistribution{StatementDistributionMessage: sdmV3}) if err != nil { From 51fa1fb99bc7715201e8668c6b68eef4784b6a7a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Jun 2025 11:00:16 -0400 Subject: [PATCH 6/9] chore: include missing methods to cluster tracker impl --- .../active_leaves_update.go | 2 +- .../active_leaves_update_test.go | 486 +++++++++--------- .../statement-distribution/cluster_tracker.go | 78 +++ .../cluster_tracker_test.go | 98 ++++ .../statement-distribution/grid_tracker.go | 2 +- .../mocks_cluster_tracker_test.go | 80 --- .../mocks_generate_test.go | 1 - .../mocks_statement_store_test.go | 13 +- .../statement-distribution/state_v2.go | 14 +- .../statement_distribution.go | 12 +- .../statement-distribution/statement_store.go | 9 +- 11 files changed, 441 insertions(+), 354 deletions(-) delete mode 100644 dot/parachain/statement-distribution/mocks_cluster_tracker_test.go diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index 30f756c6de..e32acb501c 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -160,7 +160,7 @@ func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { s.state.perRelayParent[rp] = &perRelayParentState{ localValidator: localValidator, - statementStore: nil, // todo use statement store (#4719) + statementStore: newStatementStore(perSession.groups), session: sessionIdx, groupsPerPara: groupsPerPara, disabledValidators: disableValidatorsSet, diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go index d472bccde4..8143bfbf87 100644 --- a/dot/parachain/statement-distribution/active_leaves_update_test.go +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -1,14 +1,18 @@ package statementdistribution import ( + "bytes" "sync" "testing" + networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" keystore "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) @@ -188,250 +192,244 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) } -// func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent (t *testing.T) { -// ctrl := gomock.NewController(t) -// defer ctrl.Finish() - -// leafHash := common.MustBlake2bHash([]byte("leaf1")) -// activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} - -// implicitViewMock := NewMockImplicitView(ctrl) -// implicitViewMock.EXPECT(). -// ActivateLeaf(leafHash, gomock.Any()). -// Return(nil) -// implicitViewMock.EXPECT(). -// AllAllowedRelayParents(). -// Return([]common.Hash{leafHash}) - -// rtInstanceMock := NewMockInstance(ctrl) -// rtInstanceMock.EXPECT(). -// ParachainHostDisabledValidators(). -// Return([]parachaintypes.ValidatorIndex{}, nil) - -// // as the returned session index does not exists -// // in the perSession map, it will be created by handleActiveLeafUpdate -// // the next mocks are needed to ensure the creation of the session state -// rtInstanceMock.EXPECT(). -// ParachainHostSessionIndexForChild(). -// Return(parachaintypes.SessionIndex(1), nil) - -// dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") -// kp, err := sr25519.GenerateKeypair() -// require.NoError(t, err) - -// err = dummyKeystore.Insert(kp) -// require.NoError(t, err) - -// dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) - -// sessionInfoDummy := parachaintypes.SessionInfo{ -// ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, -// RandomSeed: [32]byte{}, -// DisputePeriod: parachaintypes.SessionIndex(10), -// Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, -// DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, -// AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, -// ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, -// NCores: 2, -// ZerothDelayTrancheWidth: 1, -// RelayVRFModuloSamples: 1, -// NDelayTranches: 1, -// NoShowSlots: 1, -// NeededApprovals: 1, -// } - -// rtInstanceMock.EXPECT(). -// ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). -// Return(&sessionInfoDummy, nil) - -// rtInstanceMock.EXPECT(). -// ParachainHostMinimumBackingVotes(). -// Return(uint32(3), nil) - -// featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) -// require.NoError(t, err) - -// rtInstanceMock.EXPECT(). -// ParachainHostNodeFeatures(). -// Return(featuresBitVec, nil) - -// // the next runtime instances are needed to create the -// // per relay parent state -// dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ -// Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, -// GroupRotationInfo: parachaintypes.GroupRotationInfo{ -// SessionStartBlock: parachaintypes.BlockNumber(100), -// GroupRotationFrequency: parachaintypes.BlockNumber(10), -// Now: parachaintypes.BlockNumber(105), -// }, -// } -// rtInstanceMock.EXPECT(). -// ParachainHostValidatorGroups(). -// Return(dummyValidatorGroups, nil) - -// dummyClaimQueue := parachaintypes.ClaimQueue{ -// parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, -// parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, -// } -// transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() - -// rtInstanceMock.EXPECT(). -// ParachainHostClaimQueue(). -// Return(dummyClaimQueue, nil) - -// blockStateMock := NewMockblockState(ctrl) -// blockStateMock.EXPECT(). -// GetRuntime(leafHash). -// Return(rtInstanceMock, nil) - -// candidatesMock := NewMockcandidatesTracker(ctrl) -// candidatesMock.EXPECT(). -// frontierHypotheticals(nil, nil). -// Return([]parachaintypes.HypotheticalCandidate{}) - -// candidatesMock.EXPECT(). -// isConfirmed(parachaintypes.CandidateHash{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}). -// Return(true) - -// compactStmtToSend := ¶chaintypes.CompactValid{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))} -// validatorStmtPair := []originatorStatementPair{ -// { -// validatorIndex: parachaintypes.ValidatorIndex(1), -// compactStmt: compactStmtToSend, -// }, -// } - -// var signature [64]byte -// copy(signature[:], bytes.Repeat([]byte{0x01}, 64)) -// signedStmt := ¶chaintypes.SignedStatement{ -// Payload: *compactStmtToSend.ToEncodable(), -// ValidatorIndex: parachaintypes.ValidatorIndex(2), -// Signature: parachaintypes.ValidatorSignature(parachaintypes.Signature(signature)), -// } - -// stmtStoreMock := NewMockstatementStore(ctrl) -// stmtStoreMock.EXPECT(). -// validatorStatement(validatorStmtPair). -// Return(signedStmt) - -// // new mocks related to sendPendingClusterStatements function -// clusterTrackerMock := NewMockclusterTracker(ctrl) -// clusterTrackerMock.EXPECT(). -// pendingStatementsFor(parachaintypes.ValidatorIndex(2)). -// Return(validatorStmtPair) - -// peer1AuthDiscoveryID := parachaintypes.AuthorityDiscoveryID{2} - -// state := &v2State{ -// implicitView: implicitViewMock, -// perRelayParent: make(map[common.Hash]*perRelayParentState), -// perSession: make(map[parachaintypes.SessionIndex]*perSessionState), -// peers: map[string]peerState{ -// "peer1": { -// view: parachaintypes.View{ -// Heads: []common.Hash{leafHash}, -// }, -// protocolVersion: validationprotocol.ValidationVersionV3, -// implicitView: map[common.Hash]struct{}{}, -// discoveryIds: &map[parachaintypes.AuthorityDiscoveryID]struct{}{ -// peer1AuthDiscoveryID: {}, -// }, -// }, -// }, -// keystore: dummyKeystore, -// candidates: candidatesMock, -// } - -// overseerCh := make(chan any, 1) -// sd := &StatementDistribution{ -// state: state, -// blockState: blockStateMock, -// SubSystemToOverseer: overseerCh, -// } - -// // start a goroutine to handle the overseer subsystem -// wg := sync.WaitGroup{} -// wg.Add(1) -// go func() { -// defer wg.Done() - -// // first message is a hypothetical membership request -// hypotheticalMsg := <-overseerCh -// msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) -// require.True(t, ok) - -// // just return an empty slice -// msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} - -// // second message is a statement distribution message -// // we don't expect a third message about pending grid statements -// // because the grid tracker is empty and no pending statements are available -// // on a new active leaf update -// sendValidationMsg := <-overseerCh -// validationMsg, ok := sendValidationMsg.(*networkbridgemessages.SendValidationMessage) -// require.True(t, ok) - -// require.Equal(t, []peer.ID{peer.ID("peer1")}, validationMsg.To) - -// expectedSDMV3 := validationprotocol.NewStatementDistributionMessage() -// err := expectedSDMV3.SetValue(validationprotocol.Statement{ -// RelayParent: leafHash, -// Compact: parachaintypes.UncheckedSignedCompactStatement(*signedStmt), -// }) -// require.NoError(t, err) - -// expectedMsg := validationprotocol.NewValidationProtocolVDT() -// err = expectedMsg.SetValue(validationprotocol.StatementDistribution{ -// StatementDistributionMessage: expectedSDMV3}) -// require.NoError(t, err) - -// require.Equal(t, expectedMsg, validationMsg.ValidationProtocolMessage) -// }() - -// // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. -// // Instead, we just ensure no panic and no error. -// err = sd.handleActiveLeavesUpdate(activatedLeaf) -// require.NoError(t, err) -// wg.Wait() - -// // assertions -// expectedPerRelayParentState := &perRelayParentState{ -// localValidator: &localValidatorState{ -// gridTracker: newGridTracker(), -// active: &activeValidatorState{ -// index: parachaintypes.ValidatorIndex(0), -// groupIndex: parachaintypes.GroupIndex(0), -// assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, -// clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) -// }, -// }, -// statementStore: nil, -// session: parachaintypes.SessionIndex(1), -// transposedClaimQueue: transposedDummyClaimQueue, -// groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ -// parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, -// parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, -// parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, -// parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, -// }, -// disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), -// assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ -// parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, -// parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, -// }, -// } -// require.Len(t, state.perRelayParent, 1) -// require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) - -// expectedSessionState := newPerSessionState( -// &sessionInfoDummy, -// dummyKeystore, -// 3, -// true, -// ) -// require.Len(t, state.perSession, 1) -// require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) -// } +func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + leafHash := common.MustBlake2bHash([]byte("leaf1")) + activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + ActivateLeaf(leafHash, gomock.Any()). + Return(nil) + implicitViewMock.EXPECT(). + AllAllowedRelayParents(). + Return([]common.Hash{leafHash}) + + rtInstanceMock := NewMockInstance(ctrl) + rtInstanceMock.EXPECT(). + ParachainHostDisabledValidators(). + Return([]parachaintypes.ValidatorIndex{}, nil) + + // as the returned session index does not exists + // in the perSession map, it will be created by handleActiveLeafUpdate + // the next mocks are needed to ensure the creation of the session state + rtInstanceMock.EXPECT(). + ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil) + + dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") + kp, err := sr25519.GenerateKeypair() + require.NoError(t, err) + + err = dummyKeystore.Insert(kp) + require.NoError(t, err) + + dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) + + sessionInfoDummy := parachaintypes.SessionInfo{ + ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, + RandomSeed: [32]byte{}, + DisputePeriod: parachaintypes.SessionIndex(10), + Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, + DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, + AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, + ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, + NCores: 2, + ZerothDelayTrancheWidth: 1, + RelayVRFModuloSamples: 1, + NDelayTranches: 1, + NoShowSlots: 1, + NeededApprovals: 1, + } + + rtInstanceMock.EXPECT(). + ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). + Return(&sessionInfoDummy, nil) + + rtInstanceMock.EXPECT(). + ParachainHostMinimumBackingVotes(). + Return(uint32(3), nil) + + featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) + require.NoError(t, err) + + rtInstanceMock.EXPECT(). + ParachainHostNodeFeatures(). + Return(featuresBitVec, nil) + + // the next runtime instances are needed to create the + // per relay parent state + dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ + Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, + GroupRotationInfo: parachaintypes.GroupRotationInfo{ + SessionStartBlock: parachaintypes.BlockNumber(100), + GroupRotationFrequency: parachaintypes.BlockNumber(10), + Now: parachaintypes.BlockNumber(105), + }, + } + rtInstanceMock.EXPECT(). + ParachainHostValidatorGroups(). + Return(dummyValidatorGroups, nil) + + dummyClaimQueue := parachaintypes.ClaimQueue{ + parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + } + transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() + + rtInstanceMock.EXPECT(). + ParachainHostClaimQueue(). + Return(dummyClaimQueue, nil) + + blockStateMock := NewMockblockState(ctrl) + blockStateMock.EXPECT(). + GetRuntime(leafHash). + Return(rtInstanceMock, nil) + + candidatesMock := NewMockcandidatesTracker(ctrl) + candidatesMock.EXPECT(). + frontierHypotheticals(nil, nil). + Return([]parachaintypes.HypotheticalCandidate{}) + + candidatesMock.EXPECT(). + isConfirmed(parachaintypes.CandidateHash{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}). + Return(true) + + compactStmtToSend := ¶chaintypes.CompactValid{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))} + validatorStmtPair := []originatorStatementPair{ + { + validatorIndex: parachaintypes.ValidatorIndex(1), + compactStmt: compactStmtToSend, + }, + } + + var signature [64]byte + copy(signature[:], bytes.Repeat([]byte{0x01}, 64)) + signedStmt := ¶chaintypes.SignedStatement{ + Payload: *compactStmtToSend.ToEncodable(), + ValidatorIndex: parachaintypes.ValidatorIndex(2), + Signature: parachaintypes.ValidatorSignature(parachaintypes.Signature(signature)), + } + + stmtStoreMock := NewMockstatementStore(ctrl) + stmtStoreMock.EXPECT(). + validatorStatement(validatorStmtPair). + Return(signedStmt) + + peer1AuthDiscoveryID := parachaintypes.AuthorityDiscoveryID{2} + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: make(map[common.Hash]*perRelayParentState), + perSession: make(map[parachaintypes.SessionIndex]*perSessionState), + peers: map[string]peerState{ + "peer1": { + view: parachaintypes.View{ + Heads: []common.Hash{leafHash}, + }, + protocolVersion: validationprotocol.ValidationVersionV3, + implicitView: map[common.Hash]struct{}{}, + discoveryIds: &map[parachaintypes.AuthorityDiscoveryID]struct{}{ + peer1AuthDiscoveryID: {}, + }, + }, + }, + keystore: dummyKeystore, + candidates: candidatesMock, + } + + overseerCh := make(chan any, 1) + sd := &StatementDistribution{ + state: state, + blockState: blockStateMock, + SubSystemToOverseer: overseerCh, + } + + // start a goroutine to handle the overseer subsystem + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + + // first message is a hypothetical membership request + hypotheticalMsg := <-overseerCh + msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) + require.True(t, ok) + + // just return an empty slice + msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} + + // second message is a statement distribution message + // we don't expect a third message about pending grid statements + // because the grid tracker is empty and no pending statements are available + // on a new active leaf update + sendValidationMsg := <-overseerCh + validationMsg, ok := sendValidationMsg.(*networkbridgemessages.SendValidationMessage) + require.True(t, ok) + + require.Equal(t, []peer.ID{peer.ID("peer1")}, validationMsg.To) + + expectedSDMV3 := validationprotocol.NewStatementDistributionMessage() + err := expectedSDMV3.SetValue(validationprotocol.Statement{ + RelayParent: leafHash, + Compact: parachaintypes.UncheckedSignedCompactStatement(*signedStmt), + }) + require.NoError(t, err) + + expectedMsg := validationprotocol.NewValidationProtocolVDT() + err = expectedMsg.SetValue(validationprotocol.StatementDistribution{ + StatementDistributionMessage: expectedSDMV3}) + require.NoError(t, err) + + require.Equal(t, expectedMsg, validationMsg.ValidationProtocolMessage) + }() + + // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. + // Instead, we just ensure no panic and no error. + err = sd.handleActiveLeavesUpdate(activatedLeaf) + require.NoError(t, err) + wg.Wait() + + // assertions + expectedPerRelayParentState := &perRelayParentState{ + localValidator: &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + }, + }, + statementStore: nil, + session: parachaintypes.SessionIndex(1), + transposedClaimQueue: transposedDummyClaimQueue, + groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ + parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, + parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, + }, + disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), + assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ + parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + }, + } + require.Len(t, state.perRelayParent, 1) + require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) + + expectedSessionState := newPerSessionState( + &sessionInfoDummy, + dummyKeystore, + 3, + true, + ) + require.Len(t, state.perSession, 1) + require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) +} func TestHandleDeactivatedLeaves(t *testing.T) { ctrl := gomock.NewController(t) diff --git a/dot/parachain/statement-distribution/cluster_tracker.go b/dot/parachain/statement-distribution/cluster_tracker.go index 7f984aaf19..32eb7ac989 100644 --- a/dot/parachain/statement-distribution/cluster_tracker.go +++ b/dot/parachain/statement-distribution/cluster_tracker.go @@ -7,6 +7,7 @@ import ( "slices" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" ) // accept signifies that an incoming statement was accepted. @@ -408,3 +409,80 @@ func (c *clusterTracker) validatorSeconded( } return false } + +// Note that we sent an outgoing statement to a peer in the group. +// This must be preceded by a successful `canSend` call. +func (c *clusterTracker) noteSent( + target parachaintypes.ValidatorIndex, + originator parachaintypes.ValidatorIndex, + stmt parachaintypes.CompactStatement, +) { + targetKnowledge, ok := c.knowledge[target] + if !ok { + targetKnowledge = make(map[taggedKnowledge]struct{}) + } + + targetKnowledge[outgoingP2P{specific{stmt, originator}}] = struct{}{} + + if _, ok := stmt.(*parachaintypes.CompactSeconded); ok { + targetKnowledge[outgoingP2P{general{stmt.CandidateHash()}}] = struct{}{} + + originatorKnowledge, ok := c.knowledge[originator] + if !ok { + originatorKnowledge = make(map[taggedKnowledge]struct{}) + } + + originatorKnowledge[seconded{stmt.CandidateHash()}] = struct{}{} + c.knowledge[originator] = originatorKnowledge + } + + c.knowledge[target] = targetKnowledge + + if pending, ok := c.pending[target]; ok { + delete(pending, originatorStatementPair{ + validatorIndex: originator, + compactStmt: stmt, + }) + } +} + +// pendingStatementsFor a slice of pending statements to be sent to a particular validator +// index. `Seconded` statements are sorted to the front of the vector. +// Pending statements have the form (originator, compact statement). +func (c *clusterTracker) pendingStatementsFor(target parachaintypes.ValidatorIndex) []originatorStatementPair { + set := c.pending[target] + pairs := make([]originatorStatementPair, len(set)) + + sIdx := 0 + vIdx := len(set) - 1 + + for k := range set { + if _, ok := k.compactStmt.(*parachaintypes.CompactSeconded); ok { + pairs[sIdx] = k + sIdx++ + } else { + pairs[vIdx] = k + vIdx-- + } + } + + return pairs +} + +func (c *clusterTracker) warningIfTooManyPendingStatements(parentHash common.Hash) { + setsCount := 0 + for _, set := range c.pending { + if len(set) > 0 { + setsCount++ + } + } + + // No reason to warn if we are the only node in the cluster. + if setsCount > len(c.validators) && len(c.validators) > 1 { + logger.Warnf("Cluster has too many pending statements, "+ + "something wrong with our connection to our group peers. "+ + "Restart might be needed if validator gets 0 backing rewards "+ + "for more than 3-4 consecutive sessions. pending statements: %d, parent hash: %s", + len(c.pending), parentHash.String()) + } +} diff --git a/dot/parachain/statement-distribution/cluster_tracker_test.go b/dot/parachain/statement-distribution/cluster_tracker_test.go index 941e1f49c0..ce68d88fff 100644 --- a/dot/parachain/statement-distribution/cluster_tracker_test.go +++ b/dot/parachain/statement-distribution/cluster_tracker_test.go @@ -210,3 +210,101 @@ func TestClusterTracker_receive_statements(t *testing.T) { ) }) } + +func TestClusterTracker_pendingStatementsFor(t *testing.T) { + group := []parachaintypes.ValidatorIndex{5, 200, 24, 146} + secondingLimit := uint(2) + tracker := newClusterTracker(group, secondingLimit) + + tracker.pending[parachaintypes.ValidatorIndex(5)] = map[originatorStatementPair]struct{}{ + { + validatorIndex: parachaintypes.ValidatorIndex(24), + compactStmt: parachaintypes.NewCompactSeconded(parachaintypes.CandidateHash{Value: common.Hash{0xab}}), + }: {}, + { + validatorIndex: parachaintypes.ValidatorIndex(200), + compactStmt: parachaintypes.NewCompactValid(parachaintypes.CandidateHash{Value: common.Hash{0xab}}), + }: {}, + { + validatorIndex: parachaintypes.ValidatorIndex(146), + compactStmt: parachaintypes.NewCompactValid(parachaintypes.CandidateHash{Value: common.Hash{0xab}}), + }: {}, + { + validatorIndex: parachaintypes.ValidatorIndex(200), + compactStmt: parachaintypes.NewCompactSeconded(parachaintypes.CandidateHash{Value: common.Hash{0x1}}), + }: {}, + { + validatorIndex: parachaintypes.ValidatorIndex(24), + compactStmt: parachaintypes.NewCompactValid(parachaintypes.CandidateHash{Value: common.Hash{0x1}}), + }: {}, + { + validatorIndex: parachaintypes.ValidatorIndex(146), + compactStmt: parachaintypes.NewCompactValid(parachaintypes.CandidateHash{Value: common.Hash{0x1}}), + }: {}, + } + + pairs := tracker.pendingStatementsFor(parachaintypes.ValidatorIndex(5)) + require.Len(t, pairs, 6) + + for i := 0; i < 2; i++ { + _, ok := pairs[i].compactStmt.(*parachaintypes.CompactSeconded) + require.True(t, ok) + } + + for i := 2; i < 6; i++ { + _, ok := pairs[i].compactStmt.(*parachaintypes.CompactValid) + require.True(t, ok) + } +} + +func TestClusterTracker_noteSent(t *testing.T) { + group := []parachaintypes.ValidatorIndex{5, 200, 24, 146} + secondingLimit := uint(2) + tracker := newClusterTracker(group, secondingLimit) + + secondedStmt := parachaintypes.NewCompactSeconded( + parachaintypes.CandidateHash{Value: common.Hash{0xab}}) + + // noteSent should not panic if the validator is not in the group + tracker.noteSent( + parachaintypes.ValidatorIndex(100), + parachaintypes.ValidatorIndex(5), + secondedStmt, + ) + + expectedSpecific := outgoingP2P{specific{secondedStmt, parachaintypes.ValidatorIndex(5)}} + _, ok := tracker.knowledge[parachaintypes.ValidatorIndex(100)][expectedSpecific] + require.True(t, ok) + + expectedGeneral := outgoingP2P{general{secondedStmt.CandidateHash()}} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(100)][expectedGeneral] + require.True(t, ok) + + // since the compact statement is seconded, the originator will also be part of the knowledge + expectedSecondedOriginator := seconded{secondedStmt.CandidateHash()} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSecondedOriginator] + require.True(t, ok) + + // add a pending statement that should be deleted after noteSent. + validStmt := parachaintypes.NewCompactValid( + parachaintypes.CandidateHash{Value: common.Hash{0xab}}) + tracker.pending[parachaintypes.ValidatorIndex(5)] = map[originatorStatementPair]struct{}{ + { + validatorIndex: parachaintypes.ValidatorIndex(24), + compactStmt: validStmt, + }: {}, + } + + tracker.noteSent( + parachaintypes.ValidatorIndex(5), + parachaintypes.ValidatorIndex(24), + validStmt, + ) + + // since the compact statement is valid, we dont add the originator to knowledge + expectedSpecific = outgoingP2P{specific{validStmt, parachaintypes.ValidatorIndex(24)}} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSpecific] + require.True(t, ok) + + require.Len(t, tracker.pending[parachaintypes.ValidatorIndex(5)], 0) +} diff --git a/dot/parachain/statement-distribution/grid_tracker.go b/dot/parachain/statement-distribution/grid_tracker.go index a0a46b54ca..9e230efe5a 100644 --- a/dot/parachain/statement-distribution/grid_tracker.go +++ b/dot/parachain/statement-distribution/grid_tracker.go @@ -49,7 +49,7 @@ func (o originatorStatementPairSet) remove( statement parachaintypes.CompactStatement, ) bool { for pair := range o { - if pair.validatorIndex == validatorIndex && pair.statement.Equals(statement) { + if pair.validatorIndex == validatorIndex && pair.compactStmt.Equals(statement) { delete(o, pair) return true } diff --git a/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go b/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go deleted file mode 100644 index 07f91e1959..0000000000 --- a/dot/parachain/statement-distribution/mocks_cluster_tracker_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: clusterTracker) -// -// Generated by this command: -// -// mockgen -destination=mocks_cluster_tracker_test.go -package=statementdistribution . clusterTracker -// - -// Package statementdistribution is a generated GoMock package. -package statementdistribution - -import ( - reflect "reflect" - - parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - common "github.com/ChainSafe/gossamer/lib/common" - gomock "go.uber.org/mock/gomock" -) - -// MockclusterTracker is a mock of clusterTracker interface. -type MockclusterTracker struct { - ctrl *gomock.Controller - recorder *MockclusterTrackerMockRecorder - isgomock struct{} -} - -// MockclusterTrackerMockRecorder is the mock recorder for MockclusterTracker. -type MockclusterTrackerMockRecorder struct { - mock *MockclusterTracker -} - -// NewMockclusterTracker creates a new mock instance. -func NewMockclusterTracker(ctrl *gomock.Controller) *MockclusterTracker { - mock := &MockclusterTracker{ctrl: ctrl} - mock.recorder = &MockclusterTrackerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockclusterTracker) EXPECT() *MockclusterTrackerMockRecorder { - return m.recorder -} - -// noteSend mocks base method. -func (m *MockclusterTracker) noteSend(target, originator parachaintypes.ValidatorIndex, stmt parachaintypes.CompactStatement) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "noteSend", target, originator, stmt) -} - -// noteSend indicates an expected call of noteSend. -func (mr *MockclusterTrackerMockRecorder) noteSend(target, originator, stmt any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "noteSend", reflect.TypeOf((*MockclusterTracker)(nil).noteSend), target, originator, stmt) -} - -// pendingStatementsFor mocks base method. -func (m *MockclusterTracker) pendingStatementsFor(target parachaintypes.ValidatorIndex) []originatorStatementPair { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "pendingStatementsFor", target) - ret0, _ := ret[0].([]originatorStatementPair) - return ret0 -} - -// pendingStatementsFor indicates an expected call of pendingStatementsFor. -func (mr *MockclusterTrackerMockRecorder) pendingStatementsFor(target any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pendingStatementsFor", reflect.TypeOf((*MockclusterTracker)(nil).pendingStatementsFor), target) -} - -// warningIfTooManyPendingStatements mocks base method. -func (m *MockclusterTracker) warningIfTooManyPendingStatements(rp common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "warningIfTooManyPendingStatements", rp) -} - -// warningIfTooManyPendingStatements indicates an expected call of warningIfTooManyPendingStatements. -func (mr *MockclusterTrackerMockRecorder) warningIfTooManyPendingStatements(rp any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "warningIfTooManyPendingStatements", reflect.TypeOf((*MockclusterTracker)(nil).warningIfTooManyPendingStatements), rp) -} diff --git a/dot/parachain/statement-distribution/mocks_generate_test.go b/dot/parachain/statement-distribution/mocks_generate_test.go index 0c54ee7685..bedd8460b7 100644 --- a/dot/parachain/statement-distribution/mocks_generate_test.go +++ b/dot/parachain/statement-distribution/mocks_generate_test.go @@ -6,7 +6,6 @@ package statementdistribution //go:generate mockgen -destination=mocks_implicitview_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/dot/parachain/util ImplicitView //go:generate mockgen -destination=mocks_statement_store_test.go -package=$GOPACKAGE . statementStore //go:generate mockgen -destination=mocks_candidates_tracker_test.go -package=$GOPACKAGE . candidatesTracker -//go:generate mockgen -destination=mocks_cluster_tracker_test.go -package=$GOPACKAGE . clusterTracker //go:generate mockgen -destination=mocks_req_manager_test.go -package=$GOPACKAGE . requestManager //go:generate mockgen -destination=mocks_block_state_test.go -package=$GOPACKAGE . blockState //go:generate mockgen -destination=mocks_instance_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance diff --git a/dot/parachain/statement-distribution/mocks_statement_store_test.go b/dot/parachain/statement-distribution/mocks_statement_store_test.go index 9574ae1a78..22d124bca3 100644 --- a/dot/parachain/statement-distribution/mocks_statement_store_test.go +++ b/dot/parachain/statement-distribution/mocks_statement_store_test.go @@ -53,10 +53,10 @@ func (mr *MockstatementStoreMockRecorder) fillStatementFilter(arg0, arg1, arg2 a } // freshStatementsForBacking mocks base method. -func (m *MockstatementStore) freshStatementsForBacking(validators []parachaintypes.ValidatorIndex, candidateHash parachaintypes.CandidateHash) []parachaintypes.SignedStatement { +func (m *MockstatementStore) freshStatementsForBacking(validators []parachaintypes.ValidatorIndex, candidateHash parachaintypes.CandidateHash) []*parachaintypes.SignedStatement { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "freshStatementsForBacking", validators, candidateHash) - ret0, _ := ret[0].([]parachaintypes.SignedStatement) + ret0, _ := ret[0].([]*parachaintypes.SignedStatement) return ret0 } @@ -67,10 +67,10 @@ func (mr *MockstatementStoreMockRecorder) freshStatementsForBacking(validators, } // groupStatements mocks base method. -func (m *MockstatementStore) groupStatements(arg0 *groups, arg1 parachaintypes.GroupIndex, arg2 parachaintypes.CandidateHash, arg3 *parachaintypes.StatementFilter) []parachaintypes.SignedStatement { +func (m *MockstatementStore) groupStatements(arg0 *groups, arg1 parachaintypes.GroupIndex, arg2 parachaintypes.CandidateHash, arg3 *parachaintypes.StatementFilter) []*parachaintypes.SignedStatement { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "groupStatements", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].([]parachaintypes.SignedStatement) + ret0, _ := ret[0].([]*parachaintypes.SignedStatement) return ret0 } @@ -93,11 +93,12 @@ func (mr *MockstatementStoreMockRecorder) noteKnownByBacking(arg0, arg1 any) *go } // validatorStatement mocks base method. -func (m *MockstatementStore) validatorStatement(stmt originatorStatementPair) *parachaintypes.SignedStatement { +func (m *MockstatementStore) validatorStatement(stmt originatorStatementPair) (*parachaintypes.SignedStatement, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "validatorStatement", stmt) ret0, _ := ret[0].(*parachaintypes.SignedStatement) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // validatorStatement indicates an expected call of validatorStatement. diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 5bca3b3798..e42f0c0f77 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -14,12 +14,6 @@ import ( "github.com/ChainSafe/gossamer/lib/keystore" ) -type clusterTracker interface { - warningIfTooManyPendingStatements(rp common.Hash) - pendingStatementsFor(target parachaintypes.ValidatorIndex) []originatorStatementPair - noteSend(target, originator parachaintypes.ValidatorIndex, stmt parachaintypes.CompactStatement) -} - type candidatesTracker interface { frontierHypotheticals(*common.Hash, *parachaintypes.ParaID) []parachaintypes.HypotheticalCandidate onDeactivateLeaves(leaves []common.Hash, rpLiveFn func(common.Hash) bool) @@ -35,19 +29,19 @@ type requestManager interface { } type statementStore interface { - validatorStatement(stmt originatorStatementPair) *parachaintypes.SignedStatement + validatorStatement(stmt originatorStatementPair) (*parachaintypes.SignedStatement, bool) // freshStatementsForBacking provides a list of all statements marked as being // unknown by the backing subsystem. This provides `Seconded` statements prior to `Valid` statements. freshStatementsForBacking(validators []parachaintypes.ValidatorIndex, - candidateHash parachaintypes.CandidateHash) []parachaintypes.SignedStatement + candidateHash parachaintypes.CandidateHash) []*parachaintypes.SignedStatement noteKnownByBacking(parachaintypes.ValidatorIndex, parachaintypes.CompactStatement) fillStatementFilter(parachaintypes.GroupIndex, parachaintypes.CandidateHash, *parachaintypes.StatementFilter) // Get an iterator over stored signed statements by the group conforming to the // given filter. // Seconded statements are provided first. groupStatements(*groups, parachaintypes.GroupIndex, parachaintypes.CandidateHash, - *parachaintypes.StatementFilter) []parachaintypes.SignedStatement + *parachaintypes.StatementFilter) []*parachaintypes.SignedStatement } // skipcq:SCC-U1000 @@ -99,7 +93,7 @@ type activeValidatorState struct { index parachaintypes.ValidatorIndex groupIndex parachaintypes.GroupIndex assignments []parachaintypes.ParaID - clusterTracker clusterTracker // TODO: use cluster tracker implementation (#4713) + clusterTracker *clusterTracker // TODO: use cluster tracker implementation (#4713) } // skipcq:SCC-U1000 diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index 5fd2a4e455..4ed52ea064 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -250,7 +250,7 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, peerID peer.ID, validationVersion validationprotocol.ValidationVersion, peerValidatorIdx parachaintypes.ValidatorIndex, - clusterTracker clusterTracker, + clusterTracker *clusterTracker, candidates candidatesTracker, statementStore statementStore, ) { @@ -262,7 +262,7 @@ func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, msg := pendingStatementNetworkMessage(statementStore, rp, peerID, validationVersion, stmt) if msg != nil { - clusterTracker.noteSend(peerValidatorIdx, stmt.validatorIndex, stmt.compactStmt) + clusterTracker.noteSent(peerValidatorIdx, stmt.validatorIndex, stmt.compactStmt) // TODO: create a SendValidationMessages to send a batch of messages s.SubSystemToOverseer <- msg } @@ -445,7 +445,7 @@ func (s *StatementDistribution) sendBackingFreshStatements( panic(fmt.Sprintf("unexpected error setting Statement VDT: %s", err.Error())) } - signed, err := compareAndConvert(freshStmt, convertedStmt, withPVD) + signed, err := compareAndConvert(*freshStmt, convertedStmt, withPVD) if err != nil { return fmt.Errorf("comparing and converting stmt: %w", err) } @@ -614,7 +614,7 @@ func postAcknowledgementStatementMessages( stmtMessage := validationprotocol.NewStatementDistributionMessage() err := stmtMessage.SetValue(validationprotocol.Statement{ RelayParent: rp, - Compact: parachaintypes.UncheckedSignedCompactStatement(stmt), + Compact: parachaintypes.UncheckedSignedCompactStatement(*stmt), }) if err != nil { panic(fmt.Sprintf("failed while defining enum variant: %s", err.Error())) @@ -643,8 +643,8 @@ func pendingStatementNetworkMessage( pending originatorStatementPair, ) *networkbridgemessages.SendValidationMessage { if validationVersion == validationprotocol.ValidationVersionV3 { - signed := stmtStore.validatorStatement(pending) - if signed == nil { + signed, ok := stmtStore.validatorStatement(pending) + if !ok { return nil } diff --git a/dot/parachain/statement-distribution/statement_store.go b/dot/parachain/statement-distribution/statement_store.go index 4fda57da21..e3d1200967 100644 --- a/dot/parachain/statement-distribution/statement_store.go +++ b/dot/parachain/statement-distribution/statement_store.go @@ -257,18 +257,17 @@ func (s *statements) groupStatements( //nolint:unused // validatorStatement returns the full statement of this kind issued by this validator, if it is known. func (s *statements) validatorStatement( //nolint:unused - validatorIndex parachaintypes.ValidatorIndex, - statement parachaintypes.CompactStatement, + pair originatorStatementPair, ) (*parachaintypes.SignedStatement, bool) { kind := fingerprintKindCompactSeconded // default to Seconded - if _, ok := statement.(*parachaintypes.CompactValid); ok { + if _, ok := pair.compactStmt.(*parachaintypes.CompactValid); ok { kind = fingerprintKindCompactValid } fp := fingerprint{ - validator: validatorIndex, + validator: pair.validatorIndex, kind: kind, - candidateHash: statement.CandidateHash(), + candidateHash: pair.compactStmt.CandidateHash(), } sst, ok := s.knownStmts[fp] return sst.stmt, ok From b5f61707fe9932c419f3326cc5fe2dcbe8f60e78 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 1 Jul 2025 09:39:14 -0400 Subject: [PATCH 7/9] chore: resolving candidates tracker mocks --- .../statement_distribution_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/dot/parachain/statement-distribution/statement_distribution_test.go b/dot/parachain/statement-distribution/statement_distribution_test.go index c63ec981e1..4ad7b931cf 100644 --- a/dot/parachain/statement-distribution/statement_distribution_test.go +++ b/dot/parachain/statement-distribution/statement_distribution_test.go @@ -261,7 +261,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_stmts_but_none_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(0) gt := newGridTracker() @@ -281,19 +280,11 @@ func TestSendPendingGridMessages(t *testing.T) { }, } - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(nil, false) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(nil, false) - sd := StatementDistribution{} err := sd.sendPendingGridMessages(rpHash, peerID, validationVersion, peerValidatorID, nil, - rpState, candidatesMock, + rpState, &candidates{}, ) require.Nil(t, err) }) From c0f35b58dcd1e1479df23f43015bf09dec6bf4d4 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 2 Jul 2025 19:25:02 -0400 Subject: [PATCH 8/9] wip --- .../active_leaves_update.go | 7 +- .../active_leaves_update_test.go | 107 ++++++------------ .../cluster_tracker_test.go | 11 ++ .../statement_distribution.go | 10 +- .../statement_distribution_test.go | 68 +++++------ 5 files changed, 81 insertions(+), 122 deletions(-) diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go index e32acb501c..31f965bea7 100644 --- a/dot/parachain/statement-distribution/active_leaves_update.go +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -49,7 +49,7 @@ func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.Ac } } - s.fragmentChainUpdateInner(&leaf.Hash, nil, nil, nil) + s.fragmentChainUpdateInner(&leaf.Hash, nil, nil) return nil } @@ -295,8 +295,7 @@ func findActiveValidatorState( } parasAssignedToCore := assignmentsPerGroup[*ourGroup] - // TODO: use cluster tracker implementation (#4713) - // secondingLimit := len(parasAssignedToCore) + secondingLimit := len(parasAssignedToCore) return &localValidatorState{ gridTracker: newGridTracker(), @@ -304,7 +303,7 @@ func findActiveValidatorState( index: validatorIdx, groupIndex: *ourGroup, assignments: slices.Clone(parasAssignedToCore), - clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + clusterTracker: newClusterTracker(groupValidators, uint(secondingLimit)), }, } } diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go index 8143bfbf87..86c6199bf7 100644 --- a/dot/parachain/statement-distribution/active_leaves_update_test.go +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -5,14 +5,12 @@ import ( "sync" "testing" - networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" keystore "github.com/ChainSafe/gossamer/lib/keystore" - "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) @@ -69,6 +67,8 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { NeededApprovals: 1, } + groups := newGroups(sessionInfoDummy.ValidatorGroups, 3) + rtInstanceMock.EXPECT(). ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). Return(&sessionInfoDummy, nil) @@ -113,18 +113,13 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { GetRuntime(leafHash). Return(rtInstanceMock, nil) - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - frontierHypotheticals(nil, nil). - Return([]parachaintypes.HypotheticalCandidate{}) - state := &v2State{ implicitView: implicitViewMock, perRelayParent: make(map[common.Hash]*perRelayParentState), perSession: make(map[parachaintypes.SessionIndex]*perSessionState), peers: map[string]peerState{}, keystore: dummyKeystore, - candidates: candidatesMock, // No candidates tracker needed for this test + candidates: &candidates{}, } overseerCh := make(chan any, 1) @@ -158,13 +153,18 @@ func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { localValidator: &localValidatorState{ gridTracker: newGridTracker(), active: &activeValidatorState{ - index: parachaintypes.ValidatorIndex(0), - groupIndex: parachaintypes.GroupIndex(0), - assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, - clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: &clusterTracker{ + validators: []parachaintypes.ValidatorIndex{0, 1}, + secondingLimit: 2, + knowledge: map[parachaintypes.ValidatorIndex]map[taggedKnowledge]struct{}{}, + pending: map[parachaintypes.ValidatorIndex]originatorStatementPairSet{}, + }, }, }, - statementStore: nil, + statementStore: newStatementStore(groups), session: parachaintypes.SessionIndex(1), transposedClaimQueue: transposedDummyClaimQueue, groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ @@ -288,36 +288,14 @@ func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { GetRuntime(leafHash). Return(rtInstanceMock, nil) - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - frontierHypotheticals(nil, nil). - Return([]parachaintypes.HypotheticalCandidate{}) - - candidatesMock.EXPECT(). - isConfirmed(parachaintypes.CandidateHash{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}). - Return(true) - - compactStmtToSend := ¶chaintypes.CompactValid{Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))} - validatorStmtPair := []originatorStatementPair{ - { - validatorIndex: parachaintypes.ValidatorIndex(1), - compactStmt: compactStmtToSend, + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + {Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}: &confirmedCandidate{ + pvd: ¶chaintypes.PersistedValidationData{}, + }, }, } - var signature [64]byte - copy(signature[:], bytes.Repeat([]byte{0x01}, 64)) - signedStmt := ¶chaintypes.SignedStatement{ - Payload: *compactStmtToSend.ToEncodable(), - ValidatorIndex: parachaintypes.ValidatorIndex(2), - Signature: parachaintypes.ValidatorSignature(parachaintypes.Signature(signature)), - } - - stmtStoreMock := NewMockstatementStore(ctrl) - stmtStoreMock.EXPECT(). - validatorStatement(validatorStmtPair). - Return(signedStmt) - peer1AuthDiscoveryID := parachaintypes.AuthorityDiscoveryID{2} state := &v2State{ @@ -337,7 +315,7 @@ func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { }, }, keystore: dummyKeystore, - candidates: candidatesMock, + candidates: candidatesTracker, } overseerCh := make(chan any, 1) @@ -360,30 +338,6 @@ func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { // just return an empty slice msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} - - // second message is a statement distribution message - // we don't expect a third message about pending grid statements - // because the grid tracker is empty and no pending statements are available - // on a new active leaf update - sendValidationMsg := <-overseerCh - validationMsg, ok := sendValidationMsg.(*networkbridgemessages.SendValidationMessage) - require.True(t, ok) - - require.Equal(t, []peer.ID{peer.ID("peer1")}, validationMsg.To) - - expectedSDMV3 := validationprotocol.NewStatementDistributionMessage() - err := expectedSDMV3.SetValue(validationprotocol.Statement{ - RelayParent: leafHash, - Compact: parachaintypes.UncheckedSignedCompactStatement(*signedStmt), - }) - require.NoError(t, err) - - expectedMsg := validationprotocol.NewValidationProtocolVDT() - err = expectedMsg.SetValue(validationprotocol.StatementDistribution{ - StatementDistributionMessage: expectedSDMV3}) - require.NoError(t, err) - - require.Equal(t, expectedMsg, validationMsg.ValidationProtocolMessage) }() // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. @@ -393,17 +347,24 @@ func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { wg.Wait() // assertions + groups := newGroups(sessionInfoDummy.ValidatorGroups, 3) + expectedPerRelayParentState := &perRelayParentState{ localValidator: &localValidatorState{ gridTracker: newGridTracker(), active: &activeValidatorState{ - index: parachaintypes.ValidatorIndex(0), - groupIndex: parachaintypes.GroupIndex(0), - assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, - clusterTracker: nil, // TODO: use cluster tracker implementation (#4713) + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: &clusterTracker{ + validators: []parachaintypes.ValidatorIndex{0, 1}, + secondingLimit: 2, + knowledge: map[parachaintypes.ValidatorIndex]map[taggedKnowledge]struct{}{}, + pending: map[parachaintypes.ValidatorIndex]originatorStatementPairSet{}, + }, }, }, - statementStore: nil, + statementStore: newStatementStore(groups), session: parachaintypes.SessionIndex(1), transposedClaimQueue: transposedDummyClaimQueue, groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ @@ -447,10 +408,6 @@ func TestHandleDeactivatedLeaves(t *testing.T) { reqManagerMock.EXPECT(). removeByRelayParent(common.Hash{0xab}) - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - onDeactivateLeaves([]common.Hash{{0xcd}}, gomock.Any()) - state := &v2State{ implicitView: implicitViewMock, perRelayParent: map[common.Hash]*perRelayParentState{ @@ -463,7 +420,7 @@ func TestHandleDeactivatedLeaves(t *testing.T) { parachaintypes.SessionIndex(2): nil, }, requestManager: reqManagerMock, - candidates: candidatesMock, + candidates: &candidates{}, } sd := &StatementDistribution{ diff --git a/dot/parachain/statement-distribution/cluster_tracker_test.go b/dot/parachain/statement-distribution/cluster_tracker_test.go index cb60f19b7b..a56828ae56 100644 --- a/dot/parachain/statement-distribution/cluster_tracker_test.go +++ b/dot/parachain/statement-distribution/cluster_tracker_test.go @@ -5,6 +5,7 @@ package statementdistribution import ( "cmp" + "fmt" "slices" "testing" @@ -276,6 +277,16 @@ func TestClusterTracker_noteSent(t *testing.T) { // since the compact statement is valid, we dont add the originator to knowledge expectedSpecific = outgoingP2P{specific{validStmt, parachaintypes.ValidatorIndex(24)}} _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSpecific] + + for k := range tracker.knowledge[parachaintypes.ValidatorIndex(5)] { + fmt.Printf("%+v\n", k) + fmt.Printf("%+v\n", expectedSpecific) + + if k == expectedSpecific { + fmt.Println("eql") + } + } + require.True(t, ok) require.Len(t, tracker.pending[parachaintypes.ValidatorIndex(5)], 0) diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index 7cb1d5ffff..aa4069f79b 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -204,8 +204,7 @@ func (s *StatementDistribution) sendPeerMessagesForRelayParent(pid string, rp co } func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, - requiredParentHash *common.Hash, requiredParentParaID *parachaintypes.ParaID, - knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { + parent *hashAndParaID, knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { // 1. get hypothetical candidates var hypotheticals []parachaintypes.HypotheticalCandidate @@ -213,10 +212,7 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, if knowHypotheticals != nil { hypotheticals = *knowHypotheticals } else { - hypotheticals = s.state.candidates.frontierHypotheticals(&hashAndParaID{ - Hash: *requiredParentHash, - ParaID: *requiredParentParaID, - }) + hypotheticals = s.state.candidates.frontierHypotheticals(parent) } // 2. find out which are in the frontier @@ -238,6 +234,8 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, candidateMemberships = resp } + fmt.Println(len(candidateMemberships)) + // 3. note that they are importable under a given leaf hash. for _, item := range candidateMemberships { // skip parablocks which aren't potential candidates diff --git a/dot/parachain/statement-distribution/statement_distribution_test.go b/dot/parachain/statement-distribution/statement_distribution_test.go index 4ad7b931cf..27d00ab2a0 100644 --- a/dot/parachain/statement-distribution/statement_distribution_test.go +++ b/dot/parachain/statement-distribution/statement_distribution_test.go @@ -16,7 +16,6 @@ import ( "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" ) func TestSendBackingFreshStatements(t *testing.T) { @@ -292,7 +291,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_full_manifest_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(4) gt := newGridTracker() @@ -303,21 +301,19 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(1), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(10), + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + parachaintypes.CandidateHash{Value: common.Hash{0x12}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(1), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(10), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), - }, true) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(nil, false) + }, + } gps := newGroups([][]parachaintypes.ValidatorIndex{ {0, 1, 2}, @@ -358,7 +354,7 @@ func TestSendPendingGridMessages(t *testing.T) { err = sd.sendPendingGridMessages(rpHash, peerID, v3, peerValidatorID, gps, - rpState, candidatesMock, + rpState, candidatesTracker, ) require.Nil(t, err) @@ -391,7 +387,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_full_and_ack_manifest_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(4) gt := newGridTracker() @@ -402,29 +397,28 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesTracker(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(1), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(10), + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + parachaintypes.CandidateHash{Value: common.Hash{0x12}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(1), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(10), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), - }, true) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(0), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(11), + parachaintypes.CandidateHash{Value: common.Hash{0xab}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(0), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(11), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xee}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xee}, 32)), - }, true) + }, + } gps := newGroups([][]parachaintypes.ValidatorIndex{ {0, 1, 2}, @@ -476,7 +470,7 @@ func TestSendPendingGridMessages(t *testing.T) { err = sd.sendPendingGridMessages(rpHash, peerID, v3, peerValidatorID, gps, - rpState, candidatesMock, + rpState, candidatesTracker, ) require.Nil(t, err) From 9e41a579d49b57b76bc3687a3cfc1d30e4fc6f96 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 3 Jul 2025 12:06:25 -0400 Subject: [PATCH 9/9] chore: fix cluster tracker note send --- .../statement-distribution/cluster_tracker.go | 7 ++++--- .../statement-distribution/cluster_tracker_test.go | 14 +++----------- .../statement_distribution.go | 2 -- 3 files changed, 7 insertions(+), 16 deletions(-) diff --git a/dot/parachain/statement-distribution/cluster_tracker.go b/dot/parachain/statement-distribution/cluster_tracker.go index cb6082222e..1988e98c66 100644 --- a/dot/parachain/statement-distribution/cluster_tracker.go +++ b/dot/parachain/statement-distribution/cluster_tracker.go @@ -497,12 +497,13 @@ func (c *clusterTracker) noteSent( ) { targetKnowledge, ok := c.knowledge[target] if !ok { - targetKnowledge = map[taggedKnowledge]struct{}{ - outgoingP2P{specific{statement, originator}}: {}, - } + targetKnowledge = map[taggedKnowledge]struct{}{} c.knowledge[target] = targetKnowledge } + targetKnowledge[outgoingP2P{specific{statement, originator}}] = struct{}{} + c.knowledge[target] = targetKnowledge + if _, ok := statement.(*parachaintypes.CompactSeconded); ok { targetKnowledge[outgoingP2P{general{statement.CandidateHash()}}] = struct{}{} diff --git a/dot/parachain/statement-distribution/cluster_tracker_test.go b/dot/parachain/statement-distribution/cluster_tracker_test.go index a56828ae56..b9cf347add 100644 --- a/dot/parachain/statement-distribution/cluster_tracker_test.go +++ b/dot/parachain/statement-distribution/cluster_tracker_test.go @@ -5,7 +5,6 @@ package statementdistribution import ( "cmp" - "fmt" "slices" "testing" @@ -277,18 +276,11 @@ func TestClusterTracker_noteSent(t *testing.T) { // since the compact statement is valid, we dont add the originator to knowledge expectedSpecific = outgoingP2P{specific{validStmt, parachaintypes.ValidatorIndex(24)}} _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSpecific] - - for k := range tracker.knowledge[parachaintypes.ValidatorIndex(5)] { - fmt.Printf("%+v\n", k) - fmt.Printf("%+v\n", expectedSpecific) - - if k == expectedSpecific { - fmt.Println("eql") - } - } - require.True(t, ok) + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(24)] + require.False(t, ok) + require.Len(t, tracker.pending[parachaintypes.ValidatorIndex(5)], 0) } diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index aa4069f79b..a478fa8d57 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -234,8 +234,6 @@ func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, candidateMemberships = resp } - fmt.Println(len(candidateMemberships)) - // 3. note that they are importable under a given leaf hash. for _, item := range candidateMemberships { // skip parablocks which aren't potential candidates