diff --git a/dot/parachain/bitfield-distribution/bitfield_distribution.go b/dot/parachain/bitfield-distribution/bitfield_distribution.go index b2a856e90a..9cf68ad110 100644 --- a/dot/parachain/bitfield-distribution/bitfield_distribution.go +++ b/dot/parachain/bitfield-distribution/bitfield_distribution.go @@ -8,7 +8,6 @@ import ( "fmt" "sync" - "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/runtime" networkbridge "github.com/ChainSafe/gossamer/dot/parachain/network-bridge" @@ -265,7 +264,7 @@ func (b *BitfieldDistribution) processNewGossipTopologyEvent(event networkbridge prevNeighbors := b.topologies.CurrentTopology.LocalNeighbours peers := make(map[peer.ID]struct{}) - for _, val := range newTopology.PeerIDs { + for val := range newTopology.Peers { peers[val] = struct{}{} } @@ -277,9 +276,9 @@ func (b *BitfieldDistribution) processNewGossipTopologyEvent(event networkbridge canonicalShuffling := make([]grid.TopologyPeerInfo, len(event.Topology.CanonicalShuffling)) for i, info := range event.Topology.CanonicalShuffling { t := grid.TopologyPeerInfo{ - Peers: info.PeerID, + Peers: info.Peers, ValidatorIndex: info.ValidatorIndex, - DiscoveryID: types.AuthorityID(info.DiscoveryID), + DiscoveryID: parachaintypes.AuthorityDiscoveryID(info.DiscoveryID), } canonicalShuffling[i] = t } @@ -493,9 +492,9 @@ func (b *BitfieldDistribution) processIncomingPeerMessageEvent(event networkbrid func (b *BitfieldDistribution) processUpdatedAuthorityIDsEvent(event networkbridgeevents.UpdatedAuthorityIDs) error { logger.Tracef("process UpdatedAuthorityIDs event: %v", event) - ids := make(map[types.AuthorityID]struct{}) + ids := make(map[parachaintypes.AuthorityDiscoveryID]struct{}) for _, id := range event.AuthorityDiscoveryIDs { - ids[types.AuthorityID(id)] = struct{}{} + ids[parachaintypes.AuthorityDiscoveryID(id)] = struct{}{} } ok, err := b.topologies.CurrentTopology.UpdateAuthoritiesIDs(event.PeerID, ids) if err != nil { diff --git a/dot/parachain/bitfield-distribution/bitfield_distribution_test.go b/dot/parachain/bitfield-distribution/bitfield_distribution_test.go index 2a7a487694..c7b2ee147f 100644 --- a/dot/parachain/bitfield-distribution/bitfield_distribution_test.go +++ b/dot/parachain/bitfield-distribution/bitfield_distribution_test.go @@ -17,7 +17,6 @@ import ( parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/dot/parachain/util" validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" - "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/keystore" @@ -627,7 +626,7 @@ func TestBitfieldDistribution_ProcessBitfieldDistributionMessage_CheckSignedAvai gt := grid.NewSessionGridTopology([]uint{1, 2, 3}, []grid.TopologyPeerInfo{{ Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{1}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{1}, }}) sgte := &grid.SessionGridTopologyEntry{ Topology: gt, @@ -1204,7 +1203,7 @@ func TestBitfieldDistribution_ProcessIncomingPeerMessageEvent_Success(t *testing gt := grid.NewSessionGridTopology([]uint{1, 2, 3}, []grid.TopologyPeerInfo{{ Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{1}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{1}, }}) sgte := &grid.SessionGridTopologyEntry{ Topology: gt, @@ -1607,7 +1606,7 @@ func TestBitfieldDistribution_processUpdatedAuthorityIDsEvent(t *testing.T) { gt := grid.NewSessionGridTopology([]uint{1}, []grid.TopologyPeerInfo{{ Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{2}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{2}, }, }) assert.Len(t, gt.Peers, 2) diff --git a/dot/parachain/gossip-support/gossip_support.go b/dot/parachain/gossip-support/gossip_support.go index e9b6acdf50..db3e1533ff 100644 --- a/dot/parachain/gossip-support/gossip_support.go +++ b/dot/parachain/gossip-support/gossip_support.go @@ -450,7 +450,7 @@ func (gs *GossipSupport) updateGossipTopology( ) error { authLen := len(authorities) canonicalShuffling := make([]networkbridgeevents.CanonicalShuffling, authLen) - shuffledIndices := make([]uint8, authLen) + shuffledIndices := make([]uint, authLen) for i, a := range authorities { canonicalShuffling[i] = networkbridgeevents.CanonicalShuffling{AuthorityDiscoveryID: a, @@ -468,7 +468,7 @@ func (gs *GossipSupport) updateGossipTopology( } for i, pair := range canonicalShuffling { - shuffledIndices[int(pair.ValidatorIndex)] = uint8(i) + shuffledIndices[int(pair.ValidatorIndex)] = uint(i) } localIndex := parachaintypes.ValidatorIndex(ourIndex) diff --git a/dot/parachain/grid/grid_topology.go b/dot/parachain/grid/grid_topology.go index 735817e9b8..be84fe6a19 100644 --- a/dot/parachain/grid/grid_topology.go +++ b/dot/parachain/grid/grid_topology.go @@ -5,7 +5,6 @@ import ( "math" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - "github.com/ChainSafe/gossamer/dot/types" "github.com/libp2p/go-libp2p/core/peer" ) @@ -18,7 +17,7 @@ type TopologyPeerInfo struct { // This can extend _beyond_ the set of active parachain validators. ValidatorIndex parachaintypes.ValidatorIndex // DiscoveryID is the authority discovery public key of the validator in the corresponding `SessionInfo`. - DiscoveryID types.AuthorityID + DiscoveryID parachaintypes.AuthorityDiscoveryID } // SessionGridTopology is topology representation for session @@ -54,7 +53,7 @@ func NewSessionGridTopology(shuffledIndices []uint, canonicalShuffling []Topolog // hence there could be multiple AuthorityID associated with a peerID. // Updates Peers hashset with new peer id if the peer is in the grid topology. // Returns true if the peer is in the grid topology. -func (gt *SessionGridTopology) UpdateAuthoritiesIDs(peer peer.ID, discoveryIDs map[types.AuthorityID]struct{}) bool { +func (gt *SessionGridTopology) UpdateAuthoritiesIDs(peer peer.ID, discoveryIDs map[parachaintypes.AuthorityDiscoveryID]struct{}) bool { updated := false if _, ok := gt.Peers[peer]; !ok { for i := range gt.CanonicalShuffling { @@ -295,7 +294,7 @@ func (s *SessionGridTopologyEntry) PeersToRoute(routing RequiredRouting) []peer. func (s *SessionGridTopologyEntry) UpdateAuthoritiesIDs( peer peer.ID, - discoveryIDs map[types.AuthorityID]struct{}) (bool, error) { + discoveryIDs map[parachaintypes.AuthorityDiscoveryID]struct{}) (bool, error) { if s.Topology.UpdateAuthoritiesIDs(peer, discoveryIDs) { // If authorities update, recompile neighbours new_neighbours, err := s.Topology.ComputeGridNeighboursFor(s.LocalIndex) diff --git a/dot/parachain/grid/grid_topology_test.go b/dot/parachain/grid/grid_topology_test.go index e9ac958b7c..2222d625e5 100644 --- a/dot/parachain/grid/grid_topology_test.go +++ b/dot/parachain/grid/grid_topology_test.go @@ -4,7 +4,6 @@ import ( "testing" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - "github.com/ChainSafe/gossamer/dot/types" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" @@ -16,57 +15,57 @@ func FixtureTopologyPeerInfo() []TopologyPeerInfo { { Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(0), - DiscoveryID: types.AuthorityID{1}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{1}, }, { Peers: []peer.ID{"peer3", "peer4"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{2}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{2}, }, { Peers: []peer.ID{"peer5", "peer6"}, ValidatorIndex: parachaintypes.ValidatorIndex(2), - DiscoveryID: types.AuthorityID{3}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{3}, }, { Peers: []peer.ID{"peer7", "peer8"}, ValidatorIndex: parachaintypes.ValidatorIndex(3), - DiscoveryID: types.AuthorityID{4}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{4}, }, { Peers: []peer.ID{"peer9", "peer10"}, ValidatorIndex: parachaintypes.ValidatorIndex(4), - DiscoveryID: types.AuthorityID{5}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{5}, }, { Peers: []peer.ID{"peer11", "peer12"}, ValidatorIndex: parachaintypes.ValidatorIndex(5), - DiscoveryID: types.AuthorityID{6}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{6}, }, { Peers: []peer.ID{"peer13", "peer14"}, ValidatorIndex: parachaintypes.ValidatorIndex(6), - DiscoveryID: types.AuthorityID{7}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{7}, }, { Peers: []peer.ID{"peer15", "peer16"}, ValidatorIndex: parachaintypes.ValidatorIndex(7), - DiscoveryID: types.AuthorityID{8}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{8}, }, { Peers: []peer.ID{"peer17", "peer18"}, ValidatorIndex: parachaintypes.ValidatorIndex(8), - DiscoveryID: types.AuthorityID{9}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{9}, }, { Peers: []peer.ID{"peer19", "peer20"}, ValidatorIndex: parachaintypes.ValidatorIndex(9), - DiscoveryID: types.AuthorityID{10}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{10}, }, { Peers: []peer.ID{"peer21", "peer22"}, ValidatorIndex: parachaintypes.ValidatorIndex(10), - DiscoveryID: types.AuthorityID{11}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{11}, }, } } @@ -75,15 +74,15 @@ func Test_SessionGridTopology(t *testing.T) { gt := NewSessionGridTopology([]uint{1, 2, 3}, []TopologyPeerInfo{{ Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{1}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{1}, }, }) assert.Len(t, gt.Peers, 2) - updated := gt.UpdateAuthoritiesIDs(peer.ID("peer2"), map[types.AuthorityID]struct{}{{1}: {}}) + updated := gt.UpdateAuthoritiesIDs(peer.ID("peer2"), map[parachaintypes.AuthorityDiscoveryID]struct{}{{1}: {}}) assert.False(t, updated) - updated = gt.UpdateAuthoritiesIDs(peer.ID("peer3"), map[types.AuthorityID]struct{}{{1}: {}}) + updated = gt.UpdateAuthoritiesIDs(peer.ID("peer3"), map[parachaintypes.AuthorityDiscoveryID]struct{}{{1}: {}}) assert.True(t, updated) assert.Len(t, gt.Peers, 3) assert.Equal(t, @@ -96,7 +95,7 @@ func Test_SessionGridTopologyNeighbours(t *testing.T) { gt := NewSessionGridTopology([]uint{1, 2, 3}, []TopologyPeerInfo{{ Peers: []peer.ID{"peer1", "peer2"}, ValidatorIndex: parachaintypes.ValidatorIndex(1), - DiscoveryID: types.AuthorityID{1}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{1}, }, }) _, err := gt.ComputeGridNeighboursFor(1) @@ -167,7 +166,7 @@ func Test_SessionGridTopologyEntry(t *testing.T) { assert.Len(t, sgte.PeersToRoute(RequiredRoutingGridY), 6) assert.Len(t, sgte.PeersToRoute(RequiredRoutingAll), 22) - updated, err := sgte.UpdateAuthoritiesIDs(peer.ID("peer99"), map[types.AuthorityID]struct{}{{10}: {}}) + updated, err := sgte.UpdateAuthoritiesIDs(peer.ID("peer99"), map[parachaintypes.AuthorityDiscoveryID]struct{}{{10}: {}}) assert.True(t, updated) assert.Nil(t, err) // Now we added one more peer to validator with AuthorityID 10, index 9. Sine we are actins as validator with index 10 diff --git a/dot/parachain/network-bridge/events/events.go b/dot/parachain/network-bridge/events/events.go index 8c325163ba..66cc9595d1 100644 --- a/dot/parachain/network-bridge/events/events.go +++ b/dot/parachain/network-bridge/events/events.go @@ -5,6 +5,7 @@ package events import ( collationprotocol "github.com/ChainSafe/gossamer/dot/parachain/collator-protocol/messages" + "github.com/ChainSafe/gossamer/dot/parachain/grid" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" @@ -43,7 +44,7 @@ type CanonicalShuffling struct { type NewGossipTopology struct { Session parachaintypes.SessionIndex - Topology SessionGridTopology + Topology grid.SessionGridTopology LocalIndex *parachaintypes.ValidatorIndex } diff --git a/dot/parachain/network-bridge/events/grid_topology.go b/dot/parachain/network-bridge/events/grid_topology.go deleted file mode 100644 index 0f53336b99..0000000000 --- a/dot/parachain/network-bridge/events/grid_topology.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2025 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package events - -import ( - parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - "github.com/libp2p/go-libp2p/core/peer" -) - -// SessionGridTopology is topology representation for a session. -type SessionGridTopology struct { - // An array mapping validator indices to their indices in the - // shuffling itself. This has the same size as the number of validators - // in the session. - ShuffledIndices []uint8 - // The canonical shuffling of validators for the session. - CanonicalShuffling []TopologyPeerInfo - // The list of peer-ids in an efficient way to search. - PeerIDs []peer.ID -} - -// TopologyPeerInfo contains information about a peer in the gossip topology for a session. -type TopologyPeerInfo struct { - // The validator's known peer IDs. - PeerID []peer.ID - // The index of the validator in the discovery keys of the corresponding - // `SessionInfo`. This can extend _beyond_ the set of active parachain validators. - ValidatorIndex parachaintypes.ValidatorIndex - // The authority discovery public key of the validator in the corresponding - // `SessionInfo`. - DiscoveryID parachaintypes.AuthorityDiscoveryID -} diff --git a/dot/parachain/network-bridge/messages/rx_overseer_messages.go b/dot/parachain/network-bridge/messages/rx_overseer_messages.go index c838811412..33896aee6c 100644 --- a/dot/parachain/network-bridge/messages/rx_overseer_messages.go +++ b/dot/parachain/network-bridge/messages/rx_overseer_messages.go @@ -24,7 +24,7 @@ type NewGossipTopology struct { CanonicalShuffling []events.CanonicalShuffling // The reverse mapping of `canonical_shuffling`: from validator index // to the index in `canonical_shuffling` - ShuffledIndices []uint8 + ShuffledIndices []uint } // UpdateAuthorityIDs is used to inform the distribution subsystems about `AuthorityDiscoveryId` key rotations. diff --git a/dot/parachain/network-bridge/receiver.go b/dot/parachain/network-bridge/receiver.go index dbbc1d013e..f9202d73bd 100644 --- a/dot/parachain/network-bridge/receiver.go +++ b/dot/parachain/network-bridge/receiver.go @@ -15,6 +15,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network" collatorprotocolmessages "github.com/ChainSafe/gossamer/dot/parachain/collator-protocol/messages" + "github.com/ChainSafe/gossamer/dot/parachain/grid" "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/events" networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" @@ -373,7 +374,7 @@ func (nbr *NetworkBridgeReceiver) processMessage(msg any) error { //nolint newGossipTopology := events.NewGossipTopology{ Session: msg.Session, - Topology: events.SessionGridTopology{ + Topology: grid.SessionGridTopology{ ShuffledIndices: msg.ShuffledIndices, CanonicalShuffling: peerTopologies, }, @@ -395,14 +396,14 @@ func (nbr *NetworkBridgeReceiver) processMessage(msg any) error { //nolint } func getTopologyPeers(authorityDiscoveryService AuthorityDiscoveryService, - neighbours []events.CanonicalShuffling) []events.TopologyPeerInfo { + neighbours []events.CanonicalShuffling) []grid.TopologyPeerInfo { - peers := make([]events.TopologyPeerInfo, len(neighbours)) + peers := make([]grid.TopologyPeerInfo, len(neighbours)) for _, neighbour := range neighbours { peerID := authorityDiscoveryService.GetPeerIDByAuthorityID(neighbour.AuthorityDiscoveryID) - peers = append(peers, events.TopologyPeerInfo{ - PeerID: []peer.ID{peerID}, + peers = append(peers, grid.TopologyPeerInfo{ + Peers: peer.IDSlice{peerID}, ValidatorIndex: neighbour.ValidatorIndex, DiscoveryID: neighbour.AuthorityDiscoveryID, }) diff --git a/dot/parachain/statement-distribution/active_leaves_update.go b/dot/parachain/statement-distribution/active_leaves_update.go new file mode 100644 index 0000000000..9bb13cde17 --- /dev/null +++ b/dot/parachain/statement-distribution/active_leaves_update.go @@ -0,0 +1,310 @@ +package statementdistribution + +import ( + "fmt" + "maps" + "slices" + + "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/events" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/libp2p/go-libp2p/core/peer" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" +) + +func (s *StatementDistribution) handleActiveLeavesUpdate(leaf *parachaintypes.ActivatedLeaf) error { + err := s.state.implicitView.ActivateLeaf(leaf.Hash, s.SubSystemToOverseer) + if err != nil { + return fmt.Errorf("implicit view activating leaf: %w", err) + } + + newRelayParents := s.state.implicitView.AllAllowedRelayParents() + + for _, nrp := range newRelayParents { + if _, ok := s.state.perRelayParent[nrp]; ok { + continue + } + + if err := s.handleActiveLeafUpdate(nrp); err != nil { + logger.Warnf("failed to handle active leaf %s: %s", nrp.String(), err.Error()) + } + } + + logger.Debugf("activated leaves. Now tracking %d relay-parent across %d sessions", + len(s.state.perRelayParent), len(s.state.perSession)) + + // Reconcile all peers' views with the active leaf and any relay parents + // it implies. If they learned about the block before we did, this reconciliation will give + // non-empty results and we should send them messages concerning all activated relay-parents. + updatePeers := make(map[peer.ID][]common.Hash) + for pid, pState := range s.state.peers { + fresh := pState.reconcileActiveLeaf(leaf.Hash, newRelayParents) + if len(fresh) > 0 { + updatePeers[pid] = fresh + } + } + + for pid, fresh := range updatePeers { + for _, freshRp := range fresh { + s.sendPeerMessagesForRelayParent(pid, freshRp) + } + } + + s.fragmentChainUpdateInner(&leaf.Hash, nil, nil) + return nil +} + +func (s *StatementDistribution) handleActiveLeafUpdate(rp common.Hash) error { + rt, err := s.blockState.GetRuntime(rp) + if err != nil { + return fmt.Errorf("getting runtime: %w", err) + } + + disabledValidators, err := rt.ParachainHostDisabledValidators() + if err != nil { + return fmt.Errorf("querying disabled validators: %w", err) + } + + disableValidatorsSet := make(map[parachaintypes.ValidatorIndex]struct{}, len(disabledValidators)) + for _, dv := range disabledValidators { + disableValidatorsSet[dv] = struct{}{} + } + + sessionIdx, err := rt.ParachainHostSessionIndexForChild() + if err != nil { + return fmt.Errorf("querying session index for child: %w", err) + } + + if _, ok := s.state.perSession[sessionIdx]; !ok { + sessionInfo, err := rt.ParachainHostSessionInfo(sessionIdx) + if err != nil { + return fmt.Errorf("querying session info: %w", err) + } + + if sessionInfo == nil { + logger.Warnf("no session info provided for session %d, relay parent=%s", sessionIdx, rp) + return nil + } + + minBackingVotes, err := rt.ParachainHostMinimumBackingVotes() + if err != nil { + return fmt.Errorf("querying minimum backing votes: %w", err) + } + + nodeFeatures, err := rt.ParachainHostNodeFeatures() + if err != nil { + return fmt.Errorf("querying node features: %w", err) + } + + allowV2Descriptor, err := nodeFeatures.Get(uint(parachaintypes.CandidateReceiptV2Feature)) + if err != nil { + return fmt.Errorf("getting candidate receipt v2 feature: %w", err) + } + + perSessionState := newPerSessionState( + sessionInfo, s.state.keystore, minBackingVotes, allowV2Descriptor) + + if top, ok := s.state.unusedTopologies[sessionIdx]; ok { + delete(s.state.unusedTopologies, sessionIdx) + perSessionState.supplyTopology(&top.Topology, top.LocalIndex) + } + + s.state.perSession[sessionIdx] = perSessionState + } + + perSession := s.state.perSession[sessionIdx] + if perSession == nil { + panic("either existed or just inserted; qed.") + } + + if len(disableValidatorsSet) > 0 { + disabled := make([]parachaintypes.ValidatorIndex, len(disableValidatorsSet)) + for d := range disableValidatorsSet { + disabled = append(disabled, d) + } + + logger.Debugf("disabled validators detected: %v, "+ + "session index=%v, relay parent=%s", disabled, sessionIdx, rp.String()) + } + + validatorGroups, err := rt.ParachainHostValidatorGroups() + if err != nil { + return fmt.Errorf("querying validator groups: %w", err) + } + + claimQueue, err := rt.ParachainHostClaimQueue() + if err != nil { + return fmt.Errorf("querying host claim queue") + } + + groupsPerPara, assignmentsPerGroup := determineGroupAssignment( + len(perSession.groups.all()), + &validatorGroups.GroupRotationInfo, + &claimQueue, + ) + + var localValidator *localValidatorState + if perSession.localValidator != nil { + localValidator = findActiveValidatorState( + *perSession.localValidator, + perSession.groups, + assignmentsPerGroup, + ) + } else { + localValidator = &localValidatorState{ + gridTracker: newGridTracker(), + active: nil, + } + } + + transposedCq := claimQueue.ToTransposed() + + s.state.perRelayParent[rp] = &perRelayParentState{ + localValidator: localValidator, + statementStore: newStatementStore(perSession.groups), + session: sessionIdx, + groupsPerPara: groupsPerPara, + disabledValidators: disableValidatorsSet, + transposedClaimQueue: transposedCq, + assignmentsPerGroup: assignmentsPerGroup, + } + + return nil +} + +// Utility function to populate: +// - per relay parent `ParaId` to `GroupIndex` mappings. +// - per `GroupIndex` claim queue assignments +func determineGroupAssignment( + numCores int, + groupRotationInfo *parachaintypes.GroupRotationInfo, + claimQueue *parachaintypes.ClaimQueue, +) (map[parachaintypes.ParaID][]parachaintypes.GroupIndex, map[parachaintypes.GroupIndex][]parachaintypes.ParaID) { + // Determine the core indices occupied by each para at the current relay parent. To support + // on-demand parachains we also consider the core indices at next blocks. + ordered := claimQueue.Ordered() + + schedule := make(map[parachaintypes.CoreIndex][]parachaintypes.ParaID) + for _, cqEntry := range ordered { + schedule[cqEntry.Core] = cqEntry.Paras + } + + groupsPerPara := make(map[parachaintypes.ParaID][]parachaintypes.GroupIndex) + assignmentsPerGroup := make(map[parachaintypes.GroupIndex][]parachaintypes.ParaID, len(schedule)) + + for coreIdx, paras := range schedule { + groupIdx := groupRotationInfo.GroupForCore(coreIdx, uint(numCores)) + + assignmentsPerGroup[groupIdx] = slices.Clone(paras) + + for _, para := range paras { + groups, ok := groupsPerPara[para] + if !ok { + groups = make([]parachaintypes.GroupIndex, 0) + } + + groups = append(groups, groupIdx) + groupsPerPara[para] = groups + } + } + + return groupsPerPara, assignmentsPerGroup +} + +// handleDeactivatedLeaves deactivate leaves in the implicit view +func (s *StatementDistribution) handleDeactivatedLeaves(leaves []common.Hash) { + for _, l := range leaves { + pruned := s.state.implicitView.DeactivateLeaf(l) + + for _, prunedRp := range pruned { + // clean up per-relay-parent data based on everything removed. + rpInfo, ok := s.state.perRelayParent[prunedRp] + if !ok { + continue + } + + delete(s.state.perRelayParent, prunedRp) + + if activeValidatorState := rpInfo.activeValidatorState(); activeValidatorState != nil { + activeValidatorState.clusterTracker.warningIfTooManyPendingStatements(prunedRp) + } + + // clean up requests related to this relay parent. + s.state.requestManager.removeByRelayParent(prunedRp) + } + } + + s.state.candidates.onDeactivateLeaves(leaves, func(h common.Hash) bool { + _, ok := s.state.perRelayParent[h] + return ok + }) + + // clean up sessions based on everything remaining. + sessions := make(map[parachaintypes.SessionIndex]struct{}) + for _, v := range s.state.perRelayParent { + sessions[v.session] = struct{}{} + } + + maps.DeleteFunc(s.state.perSession, func(key parachaintypes.SessionIndex, _value *perSessionState) bool { + _, ok := sessions[key] + return !ok + }) + + var lastSessionIndex *parachaintypes.SessionIndex + for k := range s.state.unusedTopologies { + if lastSessionIndex == nil || k > *lastSessionIndex { + //pin so we don't get the address of a looping variable + sessionIdx := k + lastSessionIndex = &sessionIdx + } + } + + // Do not clean-up the last saved toplogy unless we moved to the next session + // This is needed because handle_deactive_leaves, gets also called when + // prospective_parachains APIs are not present, so we would actually remove + // the topology without using it because `perRelayParent` is empty until + // prospective_parachains gets enabled + maps.DeleteFunc(s.state.unusedTopologies, func(s parachaintypes.SessionIndex, _v events.NewGossipTopology) bool { + _, ok := sessions[s] + if ok || lastSessionIndex != nil && *lastSessionIndex == s { + return false + } + + return true + }) +} + +func findActiveValidatorState( + validatorIdx parachaintypes.ValidatorIndex, + groups *groups, + assignmentsPerGroup map[parachaintypes.GroupIndex][]parachaintypes.ParaID, +) *localValidatorState { + if len(groups.all()) == 0 { + return nil + } + + ourGroup := groups.byValidatorIndex(validatorIdx) + if ourGroup == nil { + logger.Warnf("no group found for validator %d, assignmentsPerGroup=%v", validatorIdx, assignmentsPerGroup) + return nil + } + + groupValidators := groups.get(*ourGroup) + if len(groupValidators) == 0 { + logger.Warnf("no validators found in group %d, assignmentsPerGroup=%v", *ourGroup, assignmentsPerGroup) + return nil + } + + parasAssignedToCore := assignmentsPerGroup[*ourGroup] + secondingLimit := len(parasAssignedToCore) + + return &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: validatorIdx, + groupIndex: *ourGroup, + assignments: slices.Clone(parasAssignedToCore), + clusterTracker: newClusterTracker(groupValidators, uint(secondingLimit)), + }, + } +} diff --git a/dot/parachain/statement-distribution/active_leaves_update_test.go b/dot/parachain/statement-distribution/active_leaves_update_test.go new file mode 100644 index 0000000000..3400e36219 --- /dev/null +++ b/dot/parachain/statement-distribution/active_leaves_update_test.go @@ -0,0 +1,440 @@ +package statementdistribution + +import ( + "bytes" + "sync" + "testing" + + prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" + keystore "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestHandleActiveLeavesUpdate_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + leafHash := common.MustBlake2bHash([]byte("leaf1")) + activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + ActivateLeaf(leafHash, gomock.Any()). + Return(nil) + implicitViewMock.EXPECT(). + AllAllowedRelayParents(). + Return([]common.Hash{leafHash}) + + rtInstanceMock := NewMockInstance(ctrl) + rtInstanceMock.EXPECT(). + ParachainHostDisabledValidators(). + Return([]parachaintypes.ValidatorIndex{}, nil) + + // as the returned session index does not exists + // in the perSession map, it will be created by handleActiveLeafUpdate + // the next mocks are needed to ensure the creation of the session state + rtInstanceMock.EXPECT(). + ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil) + + dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") + kp, err := sr25519.GenerateKeypair() + require.NoError(t, err) + + err = dummyKeystore.Insert(kp) + require.NoError(t, err) + + dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) + + sessionInfoDummy := parachaintypes.SessionInfo{ + ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, + RandomSeed: [32]byte{}, + DisputePeriod: parachaintypes.SessionIndex(10), + Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, + DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, + AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, + ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, + NCores: 2, + ZerothDelayTrancheWidth: 1, + RelayVRFModuloSamples: 1, + NDelayTranches: 1, + NoShowSlots: 1, + NeededApprovals: 1, + } + + groups := newGroups(sessionInfoDummy.ValidatorGroups, 3) + + rtInstanceMock.EXPECT(). + ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). + Return(&sessionInfoDummy, nil) + + rtInstanceMock.EXPECT(). + ParachainHostMinimumBackingVotes(). + Return(uint32(3), nil) + + featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) + require.NoError(t, err) + + rtInstanceMock.EXPECT(). + ParachainHostNodeFeatures(). + Return(featuresBitVec, nil) + + // the next runtime instances are needed to create the + // per relay parent state + dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ + Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, + GroupRotationInfo: parachaintypes.GroupRotationInfo{ + SessionStartBlock: parachaintypes.BlockNumber(100), + GroupRotationFrequency: parachaintypes.BlockNumber(10), + Now: parachaintypes.BlockNumber(105), + }, + } + rtInstanceMock.EXPECT(). + ParachainHostValidatorGroups(). + Return(dummyValidatorGroups, nil) + + dummyClaimQueue := parachaintypes.ClaimQueue{ + parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + } + transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() + + rtInstanceMock.EXPECT(). + ParachainHostClaimQueue(). + Return(dummyClaimQueue, nil) + + blockStateMock := NewMockblockState(ctrl) + blockStateMock.EXPECT(). + GetRuntime(leafHash). + Return(rtInstanceMock, nil) + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: make(map[common.Hash]*perRelayParentState), + perSession: make(map[parachaintypes.SessionIndex]*perSessionState), + peers: map[peer.ID]peerState{}, + keystore: dummyKeystore, + candidates: &candidates{}, + } + + overseerCh := make(chan any, 1) + sd := &StatementDistribution{ + state: state, + blockState: blockStateMock, + SubSystemToOverseer: overseerCh, + } + + // start a goroutine to handle the overseer subsystem + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + hypotheticalMsg := <-overseerCh + msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) + require.True(t, ok) + + // just return an empty slice + msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} + }() + + // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. + // Instead, we just ensure no panic and no error. + err = sd.handleActiveLeavesUpdate(activatedLeaf) + require.NoError(t, err) + wg.Wait() + + // assertions + expectedPerRelayParentState := &perRelayParentState{ + localValidator: &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: &clusterTracker{ + validators: []parachaintypes.ValidatorIndex{0, 1}, + secondingLimit: 2, + knowledge: map[parachaintypes.ValidatorIndex]map[taggedKnowledge]struct{}{}, + pending: map[parachaintypes.ValidatorIndex]originatorStatementPairSet{}, + }, + }, + }, + statementStore: newStatementStore(groups), + session: parachaintypes.SessionIndex(1), + transposedClaimQueue: transposedDummyClaimQueue, + groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ + parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, + parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, + }, + disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), + assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ + parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + }, + } + require.Len(t, state.perRelayParent, 1) + require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) + + expectedSessionState := newPerSessionState( + &sessionInfoDummy, + dummyKeystore, + 3, + true, + ) + require.Len(t, state.perSession, 1) + require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) +} + +func TestHandleActiveLeavesUpdate_SendPeerMessageForRelayParent(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + leafHash := common.MustBlake2bHash([]byte("leaf1")) + activatedLeaf := ¶chaintypes.ActivatedLeaf{Hash: leafHash} + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + ActivateLeaf(leafHash, gomock.Any()). + Return(nil) + implicitViewMock.EXPECT(). + AllAllowedRelayParents(). + Return([]common.Hash{leafHash}) + + rtInstanceMock := NewMockInstance(ctrl) + rtInstanceMock.EXPECT(). + ParachainHostDisabledValidators(). + Return([]parachaintypes.ValidatorIndex{}, nil) + + // as the returned session index does not exists + // in the perSession map, it will be created by handleActiveLeafUpdate + // the next mocks are needed to ensure the creation of the session state + rtInstanceMock.EXPECT(). + ParachainHostSessionIndexForChild(). + Return(parachaintypes.SessionIndex(1), nil) + + dummyKeystore := keystore.NewGenericKeystore("generic_test_keystore") + kp, err := sr25519.GenerateKeypair() + require.NoError(t, err) + + err = dummyKeystore.Insert(kp) + require.NoError(t, err) + + dummyPubKey := parachaintypes.ValidatorID(dummyKeystore.Sr25519PublicKeys()[0].Encode()) + + sessionInfoDummy := parachaintypes.SessionInfo{ + ActiveValidatorIndices: []parachaintypes.ValidatorIndex{0, 1, 2, 3}, + RandomSeed: [32]byte{}, + DisputePeriod: parachaintypes.SessionIndex(10), + Validators: []parachaintypes.ValidatorID{dummyPubKey, {1}, {2}, {3}}, + DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{{0}, {1}, {2}, {3}}, + AssignmentKeys: []parachaintypes.AssignmentID{parachaintypes.AssignmentID(dummyPubKey), {1}, {2}, {3}}, + ValidatorGroups: [][]parachaintypes.ValidatorIndex{{0, 1}, {2, 3}}, + NCores: 2, + ZerothDelayTrancheWidth: 1, + RelayVRFModuloSamples: 1, + NDelayTranches: 1, + NoShowSlots: 1, + NeededApprovals: 1, + } + + rtInstanceMock.EXPECT(). + ParachainHostSessionInfo(parachaintypes.SessionIndex(1)). + Return(&sessionInfoDummy, nil) + + rtInstanceMock.EXPECT(). + ParachainHostMinimumBackingVotes(). + Return(uint32(3), nil) + + featuresBitVec, err := parachaintypes.NewBitVec([]bool{true, true, false, true}) + require.NoError(t, err) + + rtInstanceMock.EXPECT(). + ParachainHostNodeFeatures(). + Return(featuresBitVec, nil) + + // the next runtime instances are needed to create the + // per relay parent state + dummyValidatorGroups := ¶chaintypes.ValidatorGroups{ + Validators: [][]parachaintypes.ValidatorIndex{{1}, {2}}, + GroupRotationInfo: parachaintypes.GroupRotationInfo{ + SessionStartBlock: parachaintypes.BlockNumber(100), + GroupRotationFrequency: parachaintypes.BlockNumber(10), + Now: parachaintypes.BlockNumber(105), + }, + } + rtInstanceMock.EXPECT(). + ParachainHostValidatorGroups(). + Return(dummyValidatorGroups, nil) + + dummyClaimQueue := parachaintypes.ClaimQueue{ + parachaintypes.CoreIndex{Index: 0}: {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.CoreIndex{Index: 1}: {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + } + transposedDummyClaimQueue := dummyClaimQueue.ToTransposed() + + rtInstanceMock.EXPECT(). + ParachainHostClaimQueue(). + Return(dummyClaimQueue, nil) + + blockStateMock := NewMockblockState(ctrl) + blockStateMock.EXPECT(). + GetRuntime(leafHash). + Return(rtInstanceMock, nil) + + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + {Value: common.Hash(bytes.Repeat([]byte{0xff}, 32))}: &confirmedCandidate{ + pvd: ¶chaintypes.PersistedValidationData{}, + }, + }, + } + + peer1AuthDiscoveryID := parachaintypes.AuthorityDiscoveryID{2} + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: make(map[common.Hash]*perRelayParentState), + perSession: make(map[parachaintypes.SessionIndex]*perSessionState), + peers: map[peer.ID]peerState{ + peer.ID("peer1"): { + view: parachaintypes.View{ + Heads: []common.Hash{leafHash}, + }, + protocolVersion: validationprotocol.ValidationVersionV3, + implicitView: map[common.Hash]struct{}{}, + discoveryIds: &map[parachaintypes.AuthorityDiscoveryID]struct{}{ + peer1AuthDiscoveryID: {}, + }, + }, + }, + keystore: dummyKeystore, + candidates: candidatesTracker, + } + + overseerCh := make(chan any, 1) + sd := &StatementDistribution{ + state: state, + blockState: blockStateMock, + SubSystemToOverseer: overseerCh, + } + + // start a goroutine to handle the overseer subsystem + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + + // first message is a hypothetical membership request + hypotheticalMsg := <-overseerCh + msg, ok := hypotheticalMsg.(prospectiveparachainsmessages.GetHypotheticalMembership) + require.True(t, ok) + + // just return an empty slice + msg.Response <- []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem{} + }() + + // The actual sendPeerMessagesForRelayParent will run, but we can't assert its call directly. + // Instead, we just ensure no panic and no error. + err = sd.handleActiveLeavesUpdate(activatedLeaf) + require.NoError(t, err) + wg.Wait() + + // assertions + groups := newGroups(sessionInfoDummy.ValidatorGroups, 3) + + expectedPerRelayParentState := &perRelayParentState{ + localValidator: &localValidatorState{ + gridTracker: newGridTracker(), + active: &activeValidatorState{ + index: parachaintypes.ValidatorIndex(0), + groupIndex: parachaintypes.GroupIndex(0), + assignments: []parachaintypes.ParaID{parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + clusterTracker: &clusterTracker{ + validators: []parachaintypes.ValidatorIndex{0, 1}, + secondingLimit: 2, + knowledge: map[parachaintypes.ValidatorIndex]map[taggedKnowledge]struct{}{}, + pending: map[parachaintypes.ValidatorIndex]originatorStatementPairSet{}, + }, + }, + }, + statementStore: newStatementStore(groups), + session: parachaintypes.SessionIndex(1), + transposedClaimQueue: transposedDummyClaimQueue, + groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ + parachaintypes.ParaID(1): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(2): {parachaintypes.GroupIndex(0)}, + parachaintypes.ParaID(3): {parachaintypes.GroupIndex(1)}, + parachaintypes.ParaID(4): {parachaintypes.GroupIndex(1)}, + }, + disabledValidators: make(map[parachaintypes.ValidatorIndex]struct{}), + assignmentsPerGroup: map[parachaintypes.GroupIndex][]parachaintypes.ParaID{ + parachaintypes.GroupIndex(0): {parachaintypes.ParaID(1), parachaintypes.ParaID(2)}, + parachaintypes.GroupIndex(1): {parachaintypes.ParaID(3), parachaintypes.ParaID(4)}, + }, + } + require.Len(t, state.perRelayParent, 1) + require.Equal(t, expectedPerRelayParentState, state.perRelayParent[leafHash]) + + expectedSessionState := newPerSessionState( + &sessionInfoDummy, + dummyKeystore, + 3, + true, + ) + require.Len(t, state.perSession, 1) + require.Equal(t, expectedSessionState, state.perSession[parachaintypes.SessionIndex(1)]) +} + +func TestHandleDeactivatedLeaves(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + implicitViewMock := NewMockImplicitView(ctrl) + implicitViewMock.EXPECT(). + DeactivateLeaf(common.Hash{0xcd}). + Return([]common.Hash{{0xab}, {0xcd}}) + + reqManagerMock := NewMockrequestManager(ctrl) + reqManagerMock.EXPECT(). + removeByRelayParent(common.Hash{0xcd}) + + reqManagerMock.EXPECT(). + removeByRelayParent(common.Hash{0xab}) + + state := &v2State{ + implicitView: implicitViewMock, + perRelayParent: map[common.Hash]*perRelayParentState{ + {0xab}: {}, + {0xcd}: {}, + {0xef}: {session: parachaintypes.SessionIndex(2)}, + }, + perSession: map[parachaintypes.SessionIndex]*perSessionState{ + parachaintypes.SessionIndex(1): nil, + parachaintypes.SessionIndex(2): nil, + }, + requestManager: reqManagerMock, + candidates: &candidates{}, + } + + sd := &StatementDistribution{ + state: state, + } + + sd.handleDeactivatedLeaves([]common.Hash{{0xcd}}) + + require.Len(t, sd.state.perRelayParent, 1) + require.Equal(t, &perRelayParentState{session: parachaintypes.SessionIndex(2)}, + sd.state.perRelayParent[common.Hash{0xef}]) + + require.Len(t, sd.state.perSession, 1) + _, ok := sd.state.perSession[parachaintypes.SessionIndex(2)] + require.True(t, ok) +} diff --git a/dot/parachain/statement-distribution/cluster_tracker.go b/dot/parachain/statement-distribution/cluster_tracker.go index 3421ef3ba7..1988e98c66 100644 --- a/dot/parachain/statement-distribution/cluster_tracker.go +++ b/dot/parachain/statement-distribution/cluster_tracker.go @@ -497,12 +497,13 @@ func (c *clusterTracker) noteSent( ) { targetKnowledge, ok := c.knowledge[target] if !ok { - targetKnowledge = map[taggedKnowledge]struct{}{ - outgoingP2P{specific{statement, originator}}: {}, - } + targetKnowledge = map[taggedKnowledge]struct{}{} c.knowledge[target] = targetKnowledge } + targetKnowledge[outgoingP2P{specific{statement, originator}}] = struct{}{} + c.knowledge[target] = targetKnowledge + if _, ok := statement.(*parachaintypes.CompactSeconded); ok { targetKnowledge[outgoingP2P{general{statement.CandidateHash()}}] = struct{}{} @@ -531,7 +532,7 @@ func (c *clusterTracker) pendingStatementsFor(target parachaintypes.ValidatorInd } for pair := range pending { - switch pair.statement.(type) { + switch pair.compactStmt.(type) { case *parachaintypes.CompactSeconded: seconded = append(seconded, pair) case *parachaintypes.CompactValid: @@ -551,7 +552,7 @@ func (c *clusterTracker) pendingStatementsFor(target parachaintypes.ValidatorInd // in backing. Occasional pending statements are expected if two authorities // can't detect each other or after restart, where it takes a while to discover // the whole network. -func (c *clusterTracker) warnIfTooManyStatements(parentHash common.Hash) { //nolint:unused +func (c *clusterTracker) warningIfTooManyPendingStatements(parentHash common.Hash) { //nolint:unused count := 0 for _, set := range c.pending { if len(set) > 0 { diff --git a/dot/parachain/statement-distribution/cluster_tracker_test.go b/dot/parachain/statement-distribution/cluster_tracker_test.go index fef6554ddc..b9cf347add 100644 --- a/dot/parachain/statement-distribution/cluster_tracker_test.go +++ b/dot/parachain/statement-distribution/cluster_tracker_test.go @@ -229,6 +229,61 @@ func TestClusterTracker_receive_statements(t *testing.T) { }) } +func TestClusterTracker_noteSent(t *testing.T) { + group := []parachaintypes.ValidatorIndex{5, 200, 24, 146} + secondingLimit := uint(2) + tracker := newClusterTracker(group, secondingLimit) + + secondedStmt := parachaintypes.NewCompactSeconded( + parachaintypes.CandidateHash{Value: common.Hash{0xab}}) + + // noteSent should not panic if the validator is not in the group + tracker.noteSent( + parachaintypes.ValidatorIndex(100), + parachaintypes.ValidatorIndex(5), + secondedStmt, + ) + + expectedSpecific := outgoingP2P{specific{secondedStmt, parachaintypes.ValidatorIndex(5)}} + _, ok := tracker.knowledge[parachaintypes.ValidatorIndex(100)][expectedSpecific] + require.True(t, ok) + + expectedGeneral := outgoingP2P{general{secondedStmt.CandidateHash()}} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(100)][expectedGeneral] + require.True(t, ok) + + // since the compact statement is seconded, the originator will also be part of the knowledge + expectedSecondedOriginator := seconded{secondedStmt.CandidateHash()} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSecondedOriginator] + require.True(t, ok) + + // add a pending statement that should be deleted after noteSent. + validStmt := parachaintypes.NewCompactValid( + parachaintypes.CandidateHash{Value: common.Hash{0xab}}) + tracker.pending[parachaintypes.ValidatorIndex(5)] = map[originatorStatementPair]struct{}{ + { + validatorIndex: parachaintypes.ValidatorIndex(24), + compactStmt: validStmt, + }: {}, + } + + tracker.noteSent( + parachaintypes.ValidatorIndex(5), + parachaintypes.ValidatorIndex(24), + validStmt, + ) + + // since the compact statement is valid, we dont add the originator to knowledge + expectedSpecific = outgoingP2P{specific{validStmt, parachaintypes.ValidatorIndex(24)}} + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(5)][expectedSpecific] + require.True(t, ok) + + _, ok = tracker.knowledge[parachaintypes.ValidatorIndex(24)] + require.False(t, ok) + + require.Len(t, tracker.pending[parachaintypes.ValidatorIndex(5)], 0) +} + // tests that cover clusterTracker.canSend() and clusterTracker.noteSent() func TestClusterTracker_send_statements(t *testing.T) { t.Parallel() diff --git a/dot/parachain/statement-distribution/grid_tracker.go b/dot/parachain/statement-distribution/grid_tracker.go index 937fd41ae5..2bedfb8a64 100644 --- a/dot/parachain/statement-distribution/grid_tracker.go +++ b/dot/parachain/statement-distribution/grid_tracker.go @@ -43,7 +43,7 @@ type manifestKindByCandidateHash map[parachaintypes.CandidateHash]manifestKind type originatorStatementPair struct { validatorIndex parachaintypes.ValidatorIndex - statement parachaintypes.CompactStatement + compactStmt parachaintypes.CompactStatement } type originatorStatementPairSet map[originatorStatementPair]struct{} @@ -60,7 +60,7 @@ func (o originatorStatementPairSet) remove( statement parachaintypes.CompactStatement, ) bool { //nolint:unparam for pair := range o { - if pair.validatorIndex == validatorIndex && pair.statement.Equals(statement) { + if pair.validatorIndex == validatorIndex && pair.compactStmt.Equals(statement) { delete(o, pair) return true } @@ -344,7 +344,7 @@ func (g *gridTracker) allPendingStatementsFor( var seconded, valid []originatorStatementPair for pair := range g.pendingStatements[validatorIndex] { - if _, ok := pair.statement.(*parachaintypes.CompactSeconded); ok { + if _, ok := pair.compactStmt.(*parachaintypes.CompactSeconded); ok { seconded = append(seconded, pair) } else { valid = append(valid, pair) @@ -568,7 +568,7 @@ func decomposeStatementFilter( pair := originatorStatementPair{ validatorIndex: validatorIndex, - statement: parachaintypes.NewCompactSeconded(candidateHash), + compactStmt: parachaintypes.NewCompactSeconded(candidateHash), } result[pair] = struct{}{} @@ -581,7 +581,7 @@ func decomposeStatementFilter( pair := originatorStatementPair{ validatorIndex: validatorIndex, - statement: parachaintypes.NewCompactValid(candidateHash), + compactStmt: parachaintypes.NewCompactValid(candidateHash), } result[pair] = struct{}{} diff --git a/dot/parachain/statement-distribution/grid_tracker_test.go b/dot/parachain/statement-distribution/grid_tracker_test.go index 8cf8e816d6..517701e823 100644 --- a/dot/parachain/statement-distribution/grid_tracker_test.go +++ b/dot/parachain/statement-distribution/grid_tracker_test.go @@ -867,7 +867,7 @@ func ensurePendingStatements( expectedPair := originatorStatementPair{ validatorIndex: originator, - statement: expectedStatement, + compactStmt: expectedStatement, } require.Equal(t, expectedPair, allPendingStatements[0]) } diff --git a/dot/parachain/statement-distribution/mocks_block_state_test.go b/dot/parachain/statement-distribution/mocks_block_state_test.go new file mode 100644 index 0000000000..8cea19c6b7 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_block_state_test.go @@ -0,0 +1,73 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: blockState) +// +// Generated by this command: +// +// mockgen -destination=mocks_block_state_test.go -package=statementdistribution . blockState +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + gomock "go.uber.org/mock/gomock" +) + +// MockblockState is a mock of blockState interface. +type MockblockState struct { + ctrl *gomock.Controller + recorder *MockblockStateMockRecorder + isgomock struct{} +} + +// MockblockStateMockRecorder is the mock recorder for MockblockState. +type MockblockStateMockRecorder struct { + mock *MockblockState +} + +// NewMockblockState creates a new mock instance. +func NewMockblockState(ctrl *gomock.Controller) *MockblockState { + mock := &MockblockState{ctrl: ctrl} + mock.recorder = &MockblockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockblockState) EXPECT() *MockblockStateMockRecorder { + return m.recorder +} + +// GetHeader mocks base method. +func (m *MockblockState) GetHeader(hash common.Hash) (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader", hash) + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockblockStateMockRecorder) GetHeader(hash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockblockState)(nil).GetHeader), hash) +} + +// GetRuntime mocks base method. +func (m *MockblockState) GetRuntime(blockHash common.Hash) (runtime.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRuntime", blockHash) + ret0, _ := ret[0].(runtime.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRuntime indicates an expected call of GetRuntime. +func (mr *MockblockStateMockRecorder) GetRuntime(blockHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRuntime", reflect.TypeOf((*MockblockState)(nil).GetRuntime), blockHash) +} diff --git a/dot/parachain/statement-distribution/mocks_candidates_store_test.go b/dot/parachain/statement-distribution/mocks_candidates_store_test.go deleted file mode 100644 index b488193c54..0000000000 --- a/dot/parachain/statement-distribution/mocks_candidates_store_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: candidatesStore) -// -// Generated by this command: -// -// mockgen -destination=mocks_candidates_store_test.go -package=statementdistribution . candidatesStore -// - -// Package statementdistribution is a generated GoMock package. -package statementdistribution - -import ( - reflect "reflect" - - parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - gomock "go.uber.org/mock/gomock" -) - -// MockcandidatesStore is a mock of candidatesStore interface. -type MockcandidatesStore struct { - ctrl *gomock.Controller - recorder *MockcandidatesStoreMockRecorder - isgomock struct{} -} - -// MockcandidatesStoreMockRecorder is the mock recorder for MockcandidatesStore. -type MockcandidatesStoreMockRecorder struct { - mock *MockcandidatesStore -} - -// NewMockcandidatesStore creates a new mock instance. -func NewMockcandidatesStore(ctrl *gomock.Controller) *MockcandidatesStore { - mock := &MockcandidatesStore{ctrl: ctrl} - mock.recorder = &MockcandidatesStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockcandidatesStore) EXPECT() *MockcandidatesStoreMockRecorder { - return m.recorder -} - -// getConfirmed mocks base method. -func (m *MockcandidatesStore) getConfirmed(candidateHash parachaintypes.CandidateHash) (*confirmedCandidate, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getConfirmed", candidateHash) - ret0, _ := ret[0].(*confirmedCandidate) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// getConfirmed indicates an expected call of getConfirmed. -func (mr *MockcandidatesStoreMockRecorder) getConfirmed(candidateHash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getConfirmed", reflect.TypeOf((*MockcandidatesStore)(nil).getConfirmed), candidateHash) -} diff --git a/dot/parachain/statement-distribution/mocks_generate_test.go b/dot/parachain/statement-distribution/mocks_generate_test.go index 2f2fadc987..cd9fa2da36 100644 --- a/dot/parachain/statement-distribution/mocks_generate_test.go +++ b/dot/parachain/statement-distribution/mocks_generate_test.go @@ -4,4 +4,6 @@ package statementdistribution //go:generate mockgen -destination=mocks_implicitview_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/dot/parachain/util ImplicitView -//go:generate mockgen -destination=mocks_candidates_store_test.go -package=$GOPACKAGE . candidatesStore +//go:generate mockgen -destination=mocks_req_manager_test.go -package=$GOPACKAGE . requestManager +//go:generate mockgen -destination=mocks_block_state_test.go -package=$GOPACKAGE . blockState +//go:generate mockgen -destination=mocks_instance_test.go -package=$GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance diff --git a/dot/parachain/statement-distribution/mocks_instance_test.go b/dot/parachain/statement-distribution/mocks_instance_test.go new file mode 100644 index 0000000000..ce94510bb6 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_instance_test.go @@ -0,0 +1,756 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/lib/runtime (interfaces: Instance) +// +// Generated by this command: +// +// mockgen -destination=mocks_instance_test.go -package=statementdistribution github.com/ChainSafe/gossamer/lib/runtime Instance +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + types "github.com/ChainSafe/gossamer/dot/types" + common "github.com/ChainSafe/gossamer/lib/common" + ed25519 "github.com/ChainSafe/gossamer/lib/crypto/ed25519" + keystore "github.com/ChainSafe/gossamer/lib/keystore" + runtime "github.com/ChainSafe/gossamer/lib/runtime" + transaction "github.com/ChainSafe/gossamer/lib/transaction" + gomock "go.uber.org/mock/gomock" +) + +// MockInstance is a mock of Instance interface. +type MockInstance struct { + ctrl *gomock.Controller + recorder *MockInstanceMockRecorder + isgomock struct{} +} + +// MockInstanceMockRecorder is the mock recorder for MockInstance. +type MockInstanceMockRecorder struct { + mock *MockInstance +} + +// NewMockInstance creates a new mock instance. +func NewMockInstance(ctrl *gomock.Controller) *MockInstance { + mock := &MockInstance{ctrl: ctrl} + mock.recorder = &MockInstanceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockInstance) EXPECT() *MockInstanceMockRecorder { + return m.recorder +} + +// ApplyExtrinsic mocks base method. +func (m *MockInstance) ApplyExtrinsic(data types.Extrinsic) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyExtrinsic", data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ApplyExtrinsic indicates an expected call of ApplyExtrinsic. +func (mr *MockInstanceMockRecorder) ApplyExtrinsic(data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyExtrinsic", reflect.TypeOf((*MockInstance)(nil).ApplyExtrinsic), data) +} + +// BabeConfiguration mocks base method. +func (m *MockInstance) BabeConfiguration() (*types.BabeConfiguration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeConfiguration") + ret0, _ := ret[0].(*types.BabeConfiguration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BabeConfiguration indicates an expected call of BabeConfiguration. +func (mr *MockInstanceMockRecorder) BabeConfiguration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeConfiguration", reflect.TypeOf((*MockInstance)(nil).BabeConfiguration)) +} + +// BabeGenerateKeyOwnershipProof mocks base method. +func (m *MockInstance) BabeGenerateKeyOwnershipProof(slot uint64, authorityID [32]byte) (types.OpaqueKeyOwnershipProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeGenerateKeyOwnershipProof", slot, authorityID) + ret0, _ := ret[0].(types.OpaqueKeyOwnershipProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BabeGenerateKeyOwnershipProof indicates an expected call of BabeGenerateKeyOwnershipProof. +func (mr *MockInstanceMockRecorder) BabeGenerateKeyOwnershipProof(slot, authorityID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).BabeGenerateKeyOwnershipProof), slot, authorityID) +} + +// BabeSubmitReportEquivocationUnsignedExtrinsic mocks base method. +func (m *MockInstance) BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof types.BabeEquivocationProof, keyOwnershipProof types.OpaqueKeyOwnershipProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BabeSubmitReportEquivocationUnsignedExtrinsic", equivocationProof, keyOwnershipProof) + ret0, _ := ret[0].(error) + return ret0 +} + +// BabeSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of BabeSubmitReportEquivocationUnsignedExtrinsic. +func (mr *MockInstanceMockRecorder) BabeSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BabeSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).BabeSubmitReportEquivocationUnsignedExtrinsic), equivocationProof, keyOwnershipProof) +} + +// CheckInherents mocks base method. +func (m *MockInstance) CheckInherents() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CheckInherents") +} + +// CheckInherents indicates an expected call of CheckInherents. +func (mr *MockInstanceMockRecorder) CheckInherents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckInherents", reflect.TypeOf((*MockInstance)(nil).CheckInherents)) +} + +// DecodeSessionKeys mocks base method. +func (m *MockInstance) DecodeSessionKeys(enc []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecodeSessionKeys", enc) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DecodeSessionKeys indicates an expected call of DecodeSessionKeys. +func (mr *MockInstanceMockRecorder) DecodeSessionKeys(enc any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeSessionKeys", reflect.TypeOf((*MockInstance)(nil).DecodeSessionKeys), enc) +} + +// Exec mocks base method. +func (m *MockInstance) Exec(function string, data []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exec", function, data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exec indicates an expected call of Exec. +func (mr *MockInstanceMockRecorder) Exec(function, data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockInstance)(nil).Exec), function, data) +} + +// ExecuteBlock mocks base method. +func (m *MockInstance) ExecuteBlock(block *types.Block) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteBlock", block) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteBlock indicates an expected call of ExecuteBlock. +func (mr *MockInstanceMockRecorder) ExecuteBlock(block any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockInstance)(nil).ExecuteBlock), block) +} + +// FinalizeBlock mocks base method. +func (m *MockInstance) FinalizeBlock() (*types.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizeBlock") + ret0, _ := ret[0].(*types.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinalizeBlock indicates an expected call of FinalizeBlock. +func (mr *MockInstanceMockRecorder) FinalizeBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeBlock", reflect.TypeOf((*MockInstance)(nil).FinalizeBlock)) +} + +// GenerateSessionKeys mocks base method. +func (m *MockInstance) GenerateSessionKeys() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GenerateSessionKeys") +} + +// GenerateSessionKeys indicates an expected call of GenerateSessionKeys. +func (mr *MockInstanceMockRecorder) GenerateSessionKeys() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSessionKeys", reflect.TypeOf((*MockInstance)(nil).GenerateSessionKeys)) +} + +// GetCodeHash mocks base method. +func (m *MockInstance) GetCodeHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCodeHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GetCodeHash indicates an expected call of GetCodeHash. +func (mr *MockInstanceMockRecorder) GetCodeHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCodeHash", reflect.TypeOf((*MockInstance)(nil).GetCodeHash)) +} + +// GrandpaAuthorities mocks base method. +func (m *MockInstance) GrandpaAuthorities() ([]types.Authority, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaAuthorities") + ret0, _ := ret[0].([]types.Authority) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GrandpaAuthorities indicates an expected call of GrandpaAuthorities. +func (mr *MockInstanceMockRecorder) GrandpaAuthorities() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaAuthorities", reflect.TypeOf((*MockInstance)(nil).GrandpaAuthorities)) +} + +// GrandpaGenerateKeyOwnershipProof mocks base method. +func (m *MockInstance) GrandpaGenerateKeyOwnershipProof(authSetID uint64, authorityID ed25519.PublicKeyBytes) (types.GrandpaOpaqueKeyOwnershipProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaGenerateKeyOwnershipProof", authSetID, authorityID) + ret0, _ := ret[0].(types.GrandpaOpaqueKeyOwnershipProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GrandpaGenerateKeyOwnershipProof indicates an expected call of GrandpaGenerateKeyOwnershipProof. +func (mr *MockInstanceMockRecorder) GrandpaGenerateKeyOwnershipProof(authSetID, authorityID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaGenerateKeyOwnershipProof", reflect.TypeOf((*MockInstance)(nil).GrandpaGenerateKeyOwnershipProof), authSetID, authorityID) +} + +// GrandpaSubmitReportEquivocationUnsignedExtrinsic mocks base method. +func (m *MockInstance) GrandpaSubmitReportEquivocationUnsignedExtrinsic(equivocationProof types.GrandpaEquivocationProof, keyOwnershipProof types.GrandpaOpaqueKeyOwnershipProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", equivocationProof, keyOwnershipProof) + ret0, _ := ret[0].(error) + return ret0 +} + +// GrandpaSubmitReportEquivocationUnsignedExtrinsic indicates an expected call of GrandpaSubmitReportEquivocationUnsignedExtrinsic. +func (mr *MockInstanceMockRecorder) GrandpaSubmitReportEquivocationUnsignedExtrinsic(equivocationProof, keyOwnershipProof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GrandpaSubmitReportEquivocationUnsignedExtrinsic", reflect.TypeOf((*MockInstance)(nil).GrandpaSubmitReportEquivocationUnsignedExtrinsic), equivocationProof, keyOwnershipProof) +} + +// InherentExtrinsics mocks base method. +func (m *MockInstance) InherentExtrinsics(data []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InherentExtrinsics", data) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InherentExtrinsics indicates an expected call of InherentExtrinsics. +func (mr *MockInstanceMockRecorder) InherentExtrinsics(data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InherentExtrinsics", reflect.TypeOf((*MockInstance)(nil).InherentExtrinsics), data) +} + +// InitializeBlock mocks base method. +func (m *MockInstance) InitializeBlock(header *types.Header) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeBlock", header) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeBlock indicates an expected call of InitializeBlock. +func (mr *MockInstanceMockRecorder) InitializeBlock(header any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeBlock", reflect.TypeOf((*MockInstance)(nil).InitializeBlock), header) +} + +// Keystore mocks base method. +func (m *MockInstance) Keystore() *keystore.GlobalKeystore { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Keystore") + ret0, _ := ret[0].(*keystore.GlobalKeystore) + return ret0 +} + +// Keystore indicates an expected call of Keystore. +func (mr *MockInstanceMockRecorder) Keystore() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keystore", reflect.TypeOf((*MockInstance)(nil).Keystore)) +} + +// Metadata mocks base method. +func (m *MockInstance) Metadata() ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Metadata") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Metadata indicates an expected call of Metadata. +func (mr *MockInstanceMockRecorder) Metadata() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Metadata", reflect.TypeOf((*MockInstance)(nil).Metadata)) +} + +// NetworkService mocks base method. +func (m *MockInstance) NetworkService() runtime.BasicNetwork { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkService") + ret0, _ := ret[0].(runtime.BasicNetwork) + return ret0 +} + +// NetworkService indicates an expected call of NetworkService. +func (mr *MockInstanceMockRecorder) NetworkService() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkService", reflect.TypeOf((*MockInstance)(nil).NetworkService)) +} + +// NodeStorage mocks base method. +func (m *MockInstance) NodeStorage() runtime.NodeStorage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStorage") + ret0, _ := ret[0].(runtime.NodeStorage) + return ret0 +} + +// NodeStorage indicates an expected call of NodeStorage. +func (mr *MockInstanceMockRecorder) NodeStorage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStorage", reflect.TypeOf((*MockInstance)(nil).NodeStorage)) +} + +// OffchainWorker mocks base method. +func (m *MockInstance) OffchainWorker() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OffchainWorker") +} + +// OffchainWorker indicates an expected call of OffchainWorker. +func (mr *MockInstanceMockRecorder) OffchainWorker() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OffchainWorker", reflect.TypeOf((*MockInstance)(nil).OffchainWorker)) +} + +// ParachainHostAsyncBackingParams mocks base method. +func (m *MockInstance) ParachainHostAsyncBackingParams() (*parachaintypes.AsyncBackingParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostAsyncBackingParams") + ret0, _ := ret[0].(*parachaintypes.AsyncBackingParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostAsyncBackingParams indicates an expected call of ParachainHostAsyncBackingParams. +func (mr *MockInstanceMockRecorder) ParachainHostAsyncBackingParams() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostAsyncBackingParams", reflect.TypeOf((*MockInstance)(nil).ParachainHostAsyncBackingParams)) +} + +// ParachainHostAvailabilityCores mocks base method. +func (m *MockInstance) ParachainHostAvailabilityCores() ([]parachaintypes.CoreState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostAvailabilityCores") + ret0, _ := ret[0].([]parachaintypes.CoreState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostAvailabilityCores indicates an expected call of ParachainHostAvailabilityCores. +func (mr *MockInstanceMockRecorder) ParachainHostAvailabilityCores() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostAvailabilityCores", reflect.TypeOf((*MockInstance)(nil).ParachainHostAvailabilityCores)) +} + +// ParachainHostBackingConstraints mocks base method. +func (m *MockInstance) ParachainHostBackingConstraints(paraID parachaintypes.ParaID) (*parachaintypes.VStagingConstraints, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostBackingConstraints", paraID) + ret0, _ := ret[0].(*parachaintypes.VStagingConstraints) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostBackingConstraints indicates an expected call of ParachainHostBackingConstraints. +func (mr *MockInstanceMockRecorder) ParachainHostBackingConstraints(paraID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostBackingConstraints", reflect.TypeOf((*MockInstance)(nil).ParachainHostBackingConstraints), paraID) +} + +// ParachainHostCandidateEvents mocks base method. +func (m *MockInstance) ParachainHostCandidateEvents() ([]parachaintypes.CandidateEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidateEvents") + ret0, _ := ret[0].([]parachaintypes.CandidateEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidateEvents indicates an expected call of ParachainHostCandidateEvents. +func (mr *MockInstanceMockRecorder) ParachainHostCandidateEvents() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidateEvents", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidateEvents)) +} + +// ParachainHostCandidatePendingAvailability mocks base method. +func (m *MockInstance) ParachainHostCandidatePendingAvailability(parachainID parachaintypes.ParaID) (*parachaintypes.CommittedCandidateReceiptV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidatePendingAvailability", parachainID) + ret0, _ := ret[0].(*parachaintypes.CommittedCandidateReceiptV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidatePendingAvailability indicates an expected call of ParachainHostCandidatePendingAvailability. +func (mr *MockInstanceMockRecorder) ParachainHostCandidatePendingAvailability(parachainID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidatePendingAvailability", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidatePendingAvailability), parachainID) +} + +// ParachainHostCandidatesPendingAvailability mocks base method. +func (m *MockInstance) ParachainHostCandidatesPendingAvailability(paraID parachaintypes.ParaID) ([]parachaintypes.CommittedCandidateReceiptV2, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCandidatesPendingAvailability", paraID) + ret0, _ := ret[0].([]parachaintypes.CommittedCandidateReceiptV2) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCandidatesPendingAvailability indicates an expected call of ParachainHostCandidatesPendingAvailability. +func (mr *MockInstanceMockRecorder) ParachainHostCandidatesPendingAvailability(paraID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCandidatesPendingAvailability", reflect.TypeOf((*MockInstance)(nil).ParachainHostCandidatesPendingAvailability), paraID) +} + +// ParachainHostCheckValidationOutputs mocks base method. +func (m *MockInstance) ParachainHostCheckValidationOutputs(parachainID parachaintypes.ParaID, outputs parachaintypes.CandidateCommitments) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostCheckValidationOutputs", parachainID, outputs) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostCheckValidationOutputs indicates an expected call of ParachainHostCheckValidationOutputs. +func (mr *MockInstanceMockRecorder) ParachainHostCheckValidationOutputs(parachainID, outputs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostCheckValidationOutputs", reflect.TypeOf((*MockInstance)(nil).ParachainHostCheckValidationOutputs), parachainID, outputs) +} + +// ParachainHostClaimQueue mocks base method. +func (m *MockInstance) ParachainHostClaimQueue() (parachaintypes.ClaimQueue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostClaimQueue") + ret0, _ := ret[0].(parachaintypes.ClaimQueue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostClaimQueue indicates an expected call of ParachainHostClaimQueue. +func (mr *MockInstanceMockRecorder) ParachainHostClaimQueue() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostClaimQueue", reflect.TypeOf((*MockInstance)(nil).ParachainHostClaimQueue)) +} + +// ParachainHostDisabledValidators mocks base method. +func (m *MockInstance) ParachainHostDisabledValidators() ([]parachaintypes.ValidatorIndex, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostDisabledValidators") + ret0, _ := ret[0].([]parachaintypes.ValidatorIndex) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostDisabledValidators indicates an expected call of ParachainHostDisabledValidators. +func (mr *MockInstanceMockRecorder) ParachainHostDisabledValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostDisabledValidators", reflect.TypeOf((*MockInstance)(nil).ParachainHostDisabledValidators)) +} + +// ParachainHostDisputes mocks base method. +func (m *MockInstance) ParachainHostDisputes() (map[parachaintypes.DisputeKey]parachaintypes.DisputeState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostDisputes") + ret0, _ := ret[0].(map[parachaintypes.DisputeKey]parachaintypes.DisputeState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostDisputes indicates an expected call of ParachainHostDisputes. +func (mr *MockInstanceMockRecorder) ParachainHostDisputes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostDisputes", reflect.TypeOf((*MockInstance)(nil).ParachainHostDisputes)) +} + +// ParachainHostMinimumBackingVotes mocks base method. +func (m *MockInstance) ParachainHostMinimumBackingVotes() (uint32, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostMinimumBackingVotes") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostMinimumBackingVotes indicates an expected call of ParachainHostMinimumBackingVotes. +func (mr *MockInstanceMockRecorder) ParachainHostMinimumBackingVotes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostMinimumBackingVotes", reflect.TypeOf((*MockInstance)(nil).ParachainHostMinimumBackingVotes)) +} + +// ParachainHostNodeFeatures mocks base method. +func (m *MockInstance) ParachainHostNodeFeatures() (parachaintypes.BitVec, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostNodeFeatures") + ret0, _ := ret[0].(parachaintypes.BitVec) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostNodeFeatures indicates an expected call of ParachainHostNodeFeatures. +func (mr *MockInstanceMockRecorder) ParachainHostNodeFeatures() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostNodeFeatures", reflect.TypeOf((*MockInstance)(nil).ParachainHostNodeFeatures)) +} + +// ParachainHostPersistedValidationData mocks base method. +func (m *MockInstance) ParachainHostPersistedValidationData(parachaidID parachaintypes.ParaID, assumption parachaintypes.OccupiedCoreAssumption) (*parachaintypes.PersistedValidationData, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostPersistedValidationData", parachaidID, assumption) + ret0, _ := ret[0].(*parachaintypes.PersistedValidationData) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostPersistedValidationData indicates an expected call of ParachainHostPersistedValidationData. +func (mr *MockInstanceMockRecorder) ParachainHostPersistedValidationData(parachaidID, assumption any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostPersistedValidationData", reflect.TypeOf((*MockInstance)(nil).ParachainHostPersistedValidationData), parachaidID, assumption) +} + +// ParachainHostSchedulingLookAhead mocks base method. +func (m *MockInstance) ParachainHostSchedulingLookAhead() (uint32, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSchedulingLookAhead") + ret0, _ := ret[0].(uint32) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSchedulingLookAhead indicates an expected call of ParachainHostSchedulingLookAhead. +func (mr *MockInstanceMockRecorder) ParachainHostSchedulingLookAhead() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSchedulingLookAhead", reflect.TypeOf((*MockInstance)(nil).ParachainHostSchedulingLookAhead)) +} + +// ParachainHostSessionExecutorParams mocks base method. +func (m *MockInstance) ParachainHostSessionExecutorParams(index parachaintypes.SessionIndex) (*parachaintypes.ExecutorParams, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionExecutorParams", index) + ret0, _ := ret[0].(*parachaintypes.ExecutorParams) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionExecutorParams indicates an expected call of ParachainHostSessionExecutorParams. +func (mr *MockInstanceMockRecorder) ParachainHostSessionExecutorParams(index any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionExecutorParams", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionExecutorParams), index) +} + +// ParachainHostSessionIndexForChild mocks base method. +func (m *MockInstance) ParachainHostSessionIndexForChild() (parachaintypes.SessionIndex, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionIndexForChild") + ret0, _ := ret[0].(parachaintypes.SessionIndex) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionIndexForChild indicates an expected call of ParachainHostSessionIndexForChild. +func (mr *MockInstanceMockRecorder) ParachainHostSessionIndexForChild() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionIndexForChild", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionIndexForChild)) +} + +// ParachainHostSessionInfo mocks base method. +func (m *MockInstance) ParachainHostSessionInfo(sessionIndex parachaintypes.SessionIndex) (*parachaintypes.SessionInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostSessionInfo", sessionIndex) + ret0, _ := ret[0].(*parachaintypes.SessionInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostSessionInfo indicates an expected call of ParachainHostSessionInfo. +func (mr *MockInstanceMockRecorder) ParachainHostSessionInfo(sessionIndex any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostSessionInfo", reflect.TypeOf((*MockInstance)(nil).ParachainHostSessionInfo), sessionIndex) +} + +// ParachainHostValidationCode mocks base method. +func (m *MockInstance) ParachainHostValidationCode(parachaidID parachaintypes.ParaID, assumption parachaintypes.OccupiedCoreAssumption) (*parachaintypes.ValidationCode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidationCode", parachaidID, assumption) + ret0, _ := ret[0].(*parachaintypes.ValidationCode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidationCode indicates an expected call of ParachainHostValidationCode. +func (mr *MockInstanceMockRecorder) ParachainHostValidationCode(parachaidID, assumption any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidationCode", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidationCode), parachaidID, assumption) +} + +// ParachainHostValidationCodeByHash mocks base method. +func (m *MockInstance) ParachainHostValidationCodeByHash(validationCodeHash common.Hash) (*parachaintypes.ValidationCode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidationCodeByHash", validationCodeHash) + ret0, _ := ret[0].(*parachaintypes.ValidationCode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidationCodeByHash indicates an expected call of ParachainHostValidationCodeByHash. +func (mr *MockInstanceMockRecorder) ParachainHostValidationCodeByHash(validationCodeHash any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidationCodeByHash", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidationCodeByHash), validationCodeHash) +} + +// ParachainHostValidatorGroups mocks base method. +func (m *MockInstance) ParachainHostValidatorGroups() (*parachaintypes.ValidatorGroups, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidatorGroups") + ret0, _ := ret[0].(*parachaintypes.ValidatorGroups) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidatorGroups indicates an expected call of ParachainHostValidatorGroups. +func (mr *MockInstanceMockRecorder) ParachainHostValidatorGroups() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidatorGroups", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidatorGroups)) +} + +// ParachainHostValidators mocks base method. +func (m *MockInstance) ParachainHostValidators() ([]parachaintypes.ValidatorID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParachainHostValidators") + ret0, _ := ret[0].([]parachaintypes.ValidatorID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParachainHostValidators indicates an expected call of ParachainHostValidators. +func (mr *MockInstanceMockRecorder) ParachainHostValidators() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParachainHostValidators", reflect.TypeOf((*MockInstance)(nil).ParachainHostValidators)) +} + +// PaymentQueryInfo mocks base method. +func (m *MockInstance) PaymentQueryInfo(ext []byte) (*types.RuntimeDispatchInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PaymentQueryInfo", ext) + ret0, _ := ret[0].(*types.RuntimeDispatchInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PaymentQueryInfo indicates an expected call of PaymentQueryInfo. +func (mr *MockInstanceMockRecorder) PaymentQueryInfo(ext any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaymentQueryInfo", reflect.TypeOf((*MockInstance)(nil).PaymentQueryInfo), ext) +} + +// RandomSeed mocks base method. +func (m *MockInstance) RandomSeed() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RandomSeed") +} + +// RandomSeed indicates an expected call of RandomSeed. +func (mr *MockInstanceMockRecorder) RandomSeed() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RandomSeed", reflect.TypeOf((*MockInstance)(nil).RandomSeed)) +} + +// SetContextStorage mocks base method. +func (m *MockInstance) SetContextStorage(s runtime.Storage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetContextStorage", s) +} + +// SetContextStorage indicates an expected call of SetContextStorage. +func (mr *MockInstanceMockRecorder) SetContextStorage(s any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetContextStorage", reflect.TypeOf((*MockInstance)(nil).SetContextStorage), s) +} + +// Stop mocks base method. +func (m *MockInstance) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockInstanceMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockInstance)(nil).Stop)) +} + +// ValidateTransaction mocks base method. +func (m *MockInstance) ValidateTransaction(e types.Extrinsic) (*transaction.Validity, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateTransaction", e) + ret0, _ := ret[0].(*transaction.Validity) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateTransaction indicates an expected call of ValidateTransaction. +func (mr *MockInstanceMockRecorder) ValidateTransaction(e any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTransaction", reflect.TypeOf((*MockInstance)(nil).ValidateTransaction), e) +} + +// Validator mocks base method. +func (m *MockInstance) Validator() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validator") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Validator indicates an expected call of Validator. +func (mr *MockInstanceMockRecorder) Validator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validator", reflect.TypeOf((*MockInstance)(nil).Validator)) +} + +// Version mocks base method. +func (m *MockInstance) Version() (runtime.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(runtime.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockInstanceMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockInstance)(nil).Version)) +} diff --git a/dot/parachain/statement-distribution/mocks_req_manager_test.go b/dot/parachain/statement-distribution/mocks_req_manager_test.go new file mode 100644 index 0000000000..d602054035 --- /dev/null +++ b/dot/parachain/statement-distribution/mocks_req_manager_test.go @@ -0,0 +1,53 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: requestManager) +// +// Generated by this command: +// +// mockgen -destination=mocks_req_manager_test.go -package=statementdistribution . requestManager +// + +// Package statementdistribution is a generated GoMock package. +package statementdistribution + +import ( + reflect "reflect" + + common "github.com/ChainSafe/gossamer/lib/common" + gomock "go.uber.org/mock/gomock" +) + +// MockrequestManager is a mock of requestManager interface. +type MockrequestManager struct { + ctrl *gomock.Controller + recorder *MockrequestManagerMockRecorder + isgomock struct{} +} + +// MockrequestManagerMockRecorder is the mock recorder for MockrequestManager. +type MockrequestManagerMockRecorder struct { + mock *MockrequestManager +} + +// NewMockrequestManager creates a new mock instance. +func NewMockrequestManager(ctrl *gomock.Controller) *MockrequestManager { + mock := &MockrequestManager{ctrl: ctrl} + mock.recorder = &MockrequestManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockrequestManager) EXPECT() *MockrequestManagerMockRecorder { + return m.recorder +} + +// removeByRelayParent mocks base method. +func (m *MockrequestManager) removeByRelayParent(rp common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "removeByRelayParent", rp) +} + +// removeByRelayParent indicates an expected call of removeByRelayParent. +func (mr *MockrequestManagerMockRecorder) removeByRelayParent(rp any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "removeByRelayParent", reflect.TypeOf((*MockrequestManager)(nil).removeByRelayParent), rp) +} diff --git a/dot/parachain/statement-distribution/mocks_statement_store_test.go b/dot/parachain/statement-distribution/mocks_statement_store_test.go deleted file mode 100644 index 9d6814f458..0000000000 --- a/dot/parachain/statement-distribution/mocks_statement_store_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/parachain/statement-distribution (interfaces: statementStore) -// -// Generated by this command: -// -// mockgen -destination=mocks_statement_store_test.go -package=statementdistribution . statementStore -// - -// Package statementdistribution is a generated GoMock package. -package statementdistribution - -import ( - reflect "reflect" - - parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - gomock "go.uber.org/mock/gomock" -) - -// MockstatementStore is a mock of statementStore interface. -type MockstatementStore struct { - ctrl *gomock.Controller - recorder *MockstatementStoreMockRecorder - isgomock struct{} -} - -// MockstatementStoreMockRecorder is the mock recorder for MockstatementStore. -type MockstatementStoreMockRecorder struct { - mock *MockstatementStore -} - -// NewMockstatementStore creates a new mock instance. -func NewMockstatementStore(ctrl *gomock.Controller) *MockstatementStore { - mock := &MockstatementStore{ctrl: ctrl} - mock.recorder = &MockstatementStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockstatementStore) EXPECT() *MockstatementStoreMockRecorder { - return m.recorder -} - -// fillStatementFilter mocks base method. -func (m *MockstatementStore) fillStatementFilter(arg0 parachaintypes.GroupIndex, arg1 parachaintypes.CandidateHash, arg2 *parachaintypes.StatementFilter) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "fillStatementFilter", arg0, arg1, arg2) -} - -// fillStatementFilter indicates an expected call of fillStatementFilter. -func (mr *MockstatementStoreMockRecorder) fillStatementFilter(arg0, arg1, arg2 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "fillStatementFilter", reflect.TypeOf((*MockstatementStore)(nil).fillStatementFilter), arg0, arg1, arg2) -} - -// freshStatementsForBacking mocks base method. -func (m *MockstatementStore) freshStatementsForBacking(validators []parachaintypes.ValidatorIndex, candidateHash parachaintypes.CandidateHash) []*parachaintypes.SignedStatement { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "freshStatementsForBacking", validators, candidateHash) - ret0, _ := ret[0].([]*parachaintypes.SignedStatement) - return ret0 -} - -// freshStatementsForBacking indicates an expected call of freshStatementsForBacking. -func (mr *MockstatementStoreMockRecorder) freshStatementsForBacking(validators, candidateHash any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "freshStatementsForBacking", reflect.TypeOf((*MockstatementStore)(nil).freshStatementsForBacking), validators, candidateHash) -} - -// groupStatements mocks base method. -func (m *MockstatementStore) groupStatements(arg0 *groups, arg1 parachaintypes.GroupIndex, arg2 parachaintypes.CandidateHash, arg3 *parachaintypes.StatementFilter) []*parachaintypes.SignedStatement { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "groupStatements", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].([]*parachaintypes.SignedStatement) - return ret0 -} - -// groupStatements indicates an expected call of groupStatements. -func (mr *MockstatementStoreMockRecorder) groupStatements(arg0, arg1, arg2, arg3 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "groupStatements", reflect.TypeOf((*MockstatementStore)(nil).groupStatements), arg0, arg1, arg2, arg3) -} - -// noteKnownByBacking mocks base method. -func (m *MockstatementStore) noteKnownByBacking(arg0 parachaintypes.ValidatorIndex, arg1 parachaintypes.CompactStatement) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "noteKnownByBacking", arg0, arg1) -} - -// noteKnownByBacking indicates an expected call of noteKnownByBacking. -func (mr *MockstatementStoreMockRecorder) noteKnownByBacking(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "noteKnownByBacking", reflect.TypeOf((*MockstatementStore)(nil).noteKnownByBacking), arg0, arg1) -} - -// validatorStatement mocks base method. -func (m *MockstatementStore) validatorStatement(validatorIndex parachaintypes.ValidatorIndex, statement parachaintypes.CompactStatement) (*parachaintypes.SignedStatement, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "validatorStatement", validatorIndex, statement) - ret0, _ := ret[0].(*parachaintypes.SignedStatement) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// validatorStatement indicates an expected call of validatorStatement. -func (mr *MockstatementStoreMockRecorder) validatorStatement(validatorIndex, statement any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "validatorStatement", reflect.TypeOf((*MockstatementStore)(nil).validatorStatement), validatorIndex, statement) -} diff --git a/dot/parachain/statement-distribution/muxed_message.go b/dot/parachain/statement-distribution/muxed_message.go new file mode 100644 index 0000000000..c47d2f1f01 --- /dev/null +++ b/dot/parachain/statement-distribution/muxed_message.go @@ -0,0 +1,50 @@ +package statementdistribution + +import "time" + +// MuxedMessage represents the kinds of messages +// the statement distribution can handle, these messages +// can have different origin and types, so the interface +// acts like a union that helps the subsytem to handle +// each message properly +type MuxedMessage interface { + isMuxedMessage() +} + +type overseerMessage struct { + inner any +} + +func (*overseerMessage) isMuxedMessage() {} + +// responderMessage is a message from the request handler +// indicating we received a request and we should produce +// a proper response and send it back +type responderMessage struct { + inner any // should be replaced with AttestedCandidateRequest type +} + +func (*responderMessage) isMuxedMessage() {} + +// reputationChangeMessage is a message indicating we should +// batch the reputation changes to the network bridge via +// Reputation Aggregator +type reputationChangeMessage struct{} + +func (*reputationChangeMessage) isMuxedMessage() {} + +// awaitMessageFrom waits for messages from either the overseerToSubSystem, responderCh, or reputationDelay +func (s *StatementDistribution) awaitMessageFrom( + overseerToSubSystem <-chan any, + responderCh chan any, + reputationDelay <-chan time.Time, +) MuxedMessage { + select { + case msg := <-overseerToSubSystem: + return &overseerMessage{inner: msg} + case msg := <-responderCh: + return &responderMessage{inner: msg} + case <-reputationDelay: + return &reputationChangeMessage{} + } +} diff --git a/dot/parachain/statement-distribution/session_topology_view_test.go b/dot/parachain/statement-distribution/session_topology_view_test.go index 19ec516786..ecd45215f2 100644 --- a/dot/parachain/statement-distribution/session_topology_view_test.go +++ b/dot/parachain/statement-distribution/session_topology_view_test.go @@ -5,7 +5,6 @@ import ( "github.com/ChainSafe/gossamer/dot/parachain/grid" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - "github.com/ChainSafe/gossamer/dot/types" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -29,17 +28,17 @@ func TestBuildSessionTopology(t *testing.T) { CanonicalShuffling: []grid.TopologyPeerInfo{ { ValidatorIndex: 0, - DiscoveryID: types.AuthorityID{}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{}, Peers: []peer.ID{}, }, { ValidatorIndex: 1, - DiscoveryID: types.AuthorityID{}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{}, Peers: []peer.ID{}, }, { ValidatorIndex: 2, - DiscoveryID: types.AuthorityID{}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{}, Peers: []peer.ID{}, }, }, @@ -67,7 +66,7 @@ func TestBuildSessionTopology(t *testing.T) { for i := 0; i < 9; i++ { baseTopology.CanonicalShuffling[i] = grid.TopologyPeerInfo{ ValidatorIndex: parachaintypes.ValidatorIndex(i), - DiscoveryID: types.AuthorityID{}, + DiscoveryID: parachaintypes.AuthorityDiscoveryID{}, Peers: []peer.ID{}, } } diff --git a/dot/parachain/statement-distribution/state_v2.go b/dot/parachain/statement-distribution/state_v2.go index 522e710e99..005b890df7 100644 --- a/dot/parachain/statement-distribution/state_v2.go +++ b/dot/parachain/statement-distribution/state_v2.go @@ -15,13 +15,15 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) -type candidatesStore interface { - getConfirmed(candidateHash parachaintypes.CandidateHash) (*confirmedCandidate, bool) +// requestManager defines the interface that manages +// outgoing requests +type requestManager interface { + removeByRelayParent(rp common.Hash) } // skipcq:SCC-U1000 type perRelayParentState struct { - localValidator *localValidatorStore + localValidator *localValidatorState statementStore *statementStore secondingLimit uint session parachaintypes.SessionIndex @@ -31,6 +33,14 @@ type perRelayParentState struct { assignmentsPerGroup map[parachaintypes.GroupIndex][]parachaintypes.ParaID } +func (p *perRelayParentState) activeValidatorState() *activeValidatorState { + if p.localValidator != nil { + return p.localValidator.active + } + + return nil +} + // isDisabled returns `true` if the given validator is disabled in the context of the relay parent. func (p *perRelayParentState) isDisabled(vIdx parachaintypes.ValidatorIndex) bool { _, ok := p.disabledValidators[vIdx] @@ -51,7 +61,7 @@ func (p *perRelayParentState) disabledBitmask(group []parachaintypes.ValidatorIn return bm, err } -type localValidatorStore struct { +type localValidatorState struct { gridTracker *gridTracker active *activeValidatorState // skipcq:SCC-U1000 } @@ -61,12 +71,12 @@ type activeValidatorState struct { index parachaintypes.ValidatorIndex groupIndex parachaintypes.GroupIndex assignments []parachaintypes.ParaID - clusterTracker any // TODO: use cluster tracker implementation (#4713) + clusterTracker *clusterTracker // TODO: use cluster tracker implementation (#4713) } // skipcq:SCC-U1000 type perSessionState struct { - sessionInfo parachaintypes.SessionInfo + sessionInfo *parachaintypes.SessionInfo groups *groups authLookup map[parachaintypes.AuthorityDiscoveryID]parachaintypes.ValidatorIndex gridView *sessionTopologyView @@ -76,8 +86,8 @@ type perSessionState struct { allowV2Descriptors bool } -// skipcq:SCC-U1000 -func newPerSessionState(sessionInfo parachaintypes.SessionInfo, +func newPerSessionState( + sessionInfo *parachaintypes.SessionInfo, keystore keystore.Keystore, backingThreshold uint32, allowV2Descriptor bool, @@ -213,13 +223,28 @@ func (p *peerState) iterKnownDiscoveryIDs() []parachaintypes.AuthorityDiscoveryI type v2State struct { implicitView parachainutil.ImplicitView - candidates candidates - perRelayParent map[common.Hash]perRelayParentState - perSession map[parachaintypes.SessionIndex]perSessionState + candidates *candidates + perRelayParent map[common.Hash]*perRelayParentState + perSession map[parachaintypes.SessionIndex]*perSessionState unusedTopologies map[parachaintypes.SessionIndex]events.NewGossipTopology peers map[peer.ID]peerState keystore keystore.Keystore authorities map[parachaintypes.AuthorityDiscoveryID]string - requestManager any // TODO: #4377 - responseManager any // TODO: #4378 + requestManager requestManager // TODO: #4377 + responseManager any // TODO: #4378 +} + +func newV2State(ks keystore.Keystore, iv parachainutil.ImplicitView) *v2State { + return &v2State{ + implicitView: iv, + candidates: nil, + perRelayParent: map[common.Hash]*perRelayParentState{}, + perSession: map[parachaintypes.SessionIndex]*perSessionState{}, + unusedTopologies: map[parachaintypes.SessionIndex]events.NewGossipTopology{}, + peers: map[peer.ID]peerState{}, + keystore: ks, + authorities: map[parachaintypes.AuthorityDiscoveryID]string{}, + requestManager: nil, + responseManager: nil, + } } diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index dc9455eebd..7c47588267 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -13,14 +13,22 @@ import ( "github.com/ChainSafe/gossamer/dot/parachain/backing" networkbridgemessages "github.com/ChainSafe/gossamer/dot/parachain/network-bridge/messages" + prospectiveparachainsmessages "github.com/ChainSafe/gossamer/dot/parachain/prospective-parachains/messages" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" parachainutil "github.com/ChainSafe/gossamer/dot/parachain/util" validationprotocol "github.com/ChainSafe/gossamer/dot/parachain/validation-protocol" + "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/ChainSafe/gossamer/lib/runtime" "github.com/libp2p/go-libp2p/core/peer" ) +const ( + HypotheticalMembershipTimeout = 2 * time.Second +) + var ( errEncodedStatementsDoNotMatch = errors.New("encoded statements do not match") errUnkownLocalValidator = errors.New("unknown local validator") @@ -76,30 +84,25 @@ var ( var logger = log.NewFromGlobal(log.AddContext("pkg", "parachain-statement-distribution")) -type StatementDistribution struct { - SubSystemToOverseer chan<- any -} - -type MuxedMessage interface { - isMuxedMessage() +type blockState interface { + GetHeader(hash common.Hash) (header *types.Header, err error) + GetRuntime(blockHash common.Hash) (instance runtime.Instance, err error) } -type overseerMessage struct { - inner any +type StatementDistribution struct { + blockState blockState + SubSystemToOverseer chan<- any + state *v2State } -func (*overseerMessage) isMuxedMessage() {} - -type responderMessage struct { - inner any // should be replaced with AttestedCandidateRequest type +func New(overseerChan chan<- any, ks keystore.Keystore, blockState parachainutil.BlockState) *StatementDistribution { + return &StatementDistribution{ + SubSystemToOverseer: overseerChan, + blockState: blockState, + state: newV2State(ks, parachainutil.NewBackingImplicitView(blockState, nil)), + } } -func (*responderMessage) isMuxedMessage() {} - -type reputationChangeMessage struct{} - -func (*reputationChangeMessage) isMuxedMessage() {} - // Run just receives the ctx and a channel from the overseer to subsystem func (s *StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-chan any) { // Inside the method Run, we spawn a goroutine to handle network incoming requests @@ -117,27 +120,198 @@ func (s *StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-c switch innerMessage := message.(type) { case *reputationChangeMessage: logger.Info("Reputation change triggered.") + case *overseerMessage: + shouldStop, err := s.handleSubsystemMessage(innerMessage.inner) + if err != nil { + logger.Errorf("handling subsystem message: %s", err.Error()) + } + + if shouldStop { + logger.Warn("handling subsystem message: should stop statement distribution") + break + } default: logger.Warn("Unhandled message type: " + fmt.Sprintf("%v", innerMessage)) } } } -func taskResponder(responderCh chan any) {} +func (s *StatementDistribution) handleSubsystemMessage(overseerMessage any) (bool, error) { + switch message := overseerMessage.(type) { + case parachaintypes.ActiveLeavesUpdateSignal: + if message.Activated != nil { + if err := s.handleActiveLeavesUpdate(message.Activated); err != nil { + return false, fmt.Errorf("handling active leaves update: %w", err) + } + } + s.handleDeactivatedLeaves(message.Deactivated) + + case parachaintypes.Conclude: + return true, nil + } + + return false, nil +} + +// Send a peer, apparently just becoming aware of a relay-parent, all messages +// concerning that relay-parent. +// +// In particular, we send all statements pertaining to our common cluster, +// as well as all manifests, acknowledgements, or other grid statements. +// +// Note that due to the way we handle views, our knowledge of peers' relay parents +// may "oscillate" with relay parents repeatedly leaving and entering the +// view of a peer based on the implicit view of active leaves. +// +// This function is designed to be cheap and not to send duplicate messages in repeated +// cases. +func (s *StatementDistribution) sendPeerMessagesForRelayParent(pid peer.ID, rp common.Hash) { + peerData, ok := s.state.peers[pid] + if !ok { + return + } + + rpState, ok := s.state.perRelayParent[rp] + if !ok { + return + } + + perSessionState, ok := s.state.perSession[rpState.session] + if !ok { + return + } + + for _, vID := range peerData.iterKnownDiscoveryIDs() { + vIndex, ok := perSessionState.authLookup[vID] + if !ok { + continue + } + + active := rpState.activeValidatorState() + if active != nil { + s.sendPendingClusterStatements(rp, + peer.ID(pid), peerData.protocolVersion, + vIndex, + active.clusterTracker, + s.state.candidates, + rpState.statementStore, + ) + } + + s.sendPendingGridMessages(rp, + peer.ID(pid), peerData.protocolVersion, + vIndex, + perSessionState.groups, + rpState, + s.state.candidates, + ) + } +} + +func (s *StatementDistribution) fragmentChainUpdateInner(rp *common.Hash, + parent *hashAndParaID, knowHypotheticals *[]parachaintypes.HypotheticalCandidate) { + + // 1. get hypothetical candidates + var hypotheticals []parachaintypes.HypotheticalCandidate + + if knowHypotheticals != nil { + hypotheticals = *knowHypotheticals + } else { + hypotheticals = s.state.candidates.frontierHypotheticals(parent) + } + + // 2. find out which are in the frontier + response := make(chan []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem) + hypotheticalMembershipRequest := prospectiveparachainsmessages.GetHypotheticalMembership{ + Candidates: hypotheticals, + FragmentChainRelayParent: rp, + Response: response, + } + + s.SubSystemToOverseer <- hypotheticalMembershipRequest -// awaitMessageFrom waits for messages from either the overseerToSubSystem, responderCh, or reputationDelay -func (s *StatementDistribution) awaitMessageFrom( - overseerToSubSystem <-chan any, - responderCh chan any, - reputationDelay <-chan time.Time, -) MuxedMessage { + var candidateMemberships []*prospectiveparachainsmessages.HypotheticalMembershipResponseItem select { - case msg := <-overseerToSubSystem: - return &overseerMessage{inner: msg} - case msg := <-responderCh: - return &responderMessage{inner: msg} - case <-reputationDelay: - return &reputationChangeMessage{} + case <-time.After(HypotheticalMembershipTimeout): + logger.Warnf("timed out waiting for hypothetical membership response for relay-parent %s", rp.String()) + return + case resp := <-response: + candidateMemberships = resp + } + + // 3. note that they are importable under a given leaf hash. + for _, item := range candidateMemberships { + // skip parablocks which aren't potential candidates + if len(item.HypotheticalMembership) == 0 { + continue + } + + for _, leafHash := range item.HypotheticalMembership { + s.state.candidates.noteImportableUnder(item.HypotheticalCandidate, leafHash) + } + + // 4. for confirmed candidates, send all statements which are new to backing. + if complete, ok := item.HypotheticalCandidate.(*parachaintypes.HypotheticalCandidateComplete); ok { + confirmedCandidate, ok := s.state.candidates.getConfirmed(complete.ClaimedCandidateHash) + if !ok { + continue + } + + perRelayParentState, ok := s.state.perRelayParent[complete.CommittedCandidateReceipt.Descriptor.RelayParent] + if confirmedCandidate == nil || !ok { + continue + } + + groupIndex := confirmedCandidate.assignedGroup + perSessionState, ok := s.state.perSession[perRelayParentState.session] + if !ok { + continue + } + + // Sanity check if group_index is valid for this para at relay parent. + expectedGroups, ok := perRelayParentState.groupsPerPara[complete.CommittedCandidateReceipt.Descriptor.ParaID] + if !ok { + continue + } + + if !slices.Contains(expectedGroups, groupIndex) { + logger.Warnf("group index %d not found for para %d at relay parent %s", + groupIndex, complete.CommittedCandidateReceipt.Descriptor.ParaID, rp.String()) + continue + } + + s.sendBackingFreshStatements( + complete.ClaimedCandidateHash, + confirmedCandidate.assignedGroup, + complete.CommittedCandidateReceipt.Descriptor.RelayParent, + perRelayParentState, + confirmedCandidate, + perSessionState, + ) + } + } +} + +// Send a peer all pending cluster statements for a relay parent. +func (s *StatementDistribution) sendPendingClusterStatements(rp common.Hash, + peerID peer.ID, validationVersion validationprotocol.ValidationVersion, + peerValidatorIdx parachaintypes.ValidatorIndex, + clusterTracker *clusterTracker, + candidates *candidates, + statementStore *statementStore, +) { + pendingStmts := clusterTracker.pendingStatementsFor(peerValidatorIdx) + for _, stmt := range pendingStmts { + if !candidates.isConfirmed(stmt.compactStmt.CandidateHash()) { + continue + } + + msg := pendingStatementNetworkMessage(statementStore, rp, peerID, validationVersion, stmt) + if msg != nil { + clusterTracker.noteSent(peerValidatorIdx, stmt.validatorIndex, stmt.compactStmt) + // TODO: create a SendValidationMessages to send a batch of messages + s.SubSystemToOverseer <- msg + } } } @@ -150,7 +324,7 @@ func (s *StatementDistribution) sendPendingGridMessages( peerValidatorID parachaintypes.ValidatorIndex, groups *groups, rpState *perRelayParentState, - candidates candidatesStore, + candidates *candidates, ) error { if rpState.localValidator == nil { return errUnkownLocalValidator @@ -255,7 +429,7 @@ func (s *StatementDistribution) sendPendingGridMessages( *groups, ps.validatorIndex, peerValidatorID, - ps.statement, + ps.compactStmt, false, ) @@ -341,8 +515,8 @@ func (s *StatementDistribution) sendBackingFreshStatements( } type manifestImportSuccess struct { - relayParentState perRelayParentState - perSession perSessionState + relayParentState *perRelayParentState + perSession *perSessionState acknowledge bool senderIndex parachaintypes.ValidatorIndex } @@ -354,9 +528,9 @@ type manifestImportSuccess struct { func (s *StatementDistribution) handleIncomingManifestCommon( peerID peer.ID, peers map[peer.ID]peerState, - perRelayParent map[common.Hash]perRelayParentState, - perSession map[parachaintypes.SessionIndex]perSessionState, - candidates candidates, + perRelayParent map[common.Hash]*perRelayParentState, + perSession map[parachaintypes.SessionIndex]*perSessionState, + candidates *candidates, candidateHash parachaintypes.CandidateHash, relayParent common.Hash, paraID parachaintypes.ParaID, @@ -589,7 +763,7 @@ func (s *StatementDistribution) handleIncomingManifest( validationVersion, senderIndex, perSession.groups, - &rpState, + rpState, manifest.RelayParent, manifest.GroupIndex, manifest.CandidateHash, @@ -870,7 +1044,12 @@ func pendingStatementNetworkMessage( pending originatorStatementPair, ) *networkbridgemessages.SendValidationMessage { if validationVersion == validationprotocol.ValidationVersionV3 { - signed, known := stmtStore.validatorStatement(pending.validatorIndex, pending.statement) + pair := originatorStatementPair{ + validatorIndex: pending.validatorIndex, + compactStmt: pending.compactStmt, + } + + signed, known := stmtStore.validatorStatement(pair) if !known || signed == nil { return nil } @@ -884,7 +1063,6 @@ func pendingStatementNetworkMessage( panic(fmt.Sprintf("unexpected error setting value in StatementDistributionMessageV3: %s", err)) } - // TODO: this will panic as validation protocol does not support V3 yet vp := validationprotocol.NewValidationProtocolVDT() err = vp.SetValue(validationprotocol.StatementDistribution{StatementDistributionMessage: sdmV3}) if err != nil { @@ -899,3 +1077,6 @@ func pendingStatementNetworkMessage( return nil } + +// TODO: https://github.com/ChainSafe/gossamer/issues/4285 +func taskResponder(responderCh chan any) {} diff --git a/dot/parachain/statement-distribution/statement_distribution_test.go b/dot/parachain/statement-distribution/statement_distribution_test.go index f28330f974..09631a6c43 100644 --- a/dot/parachain/statement-distribution/statement_distribution_test.go +++ b/dot/parachain/statement-distribution/statement_distribution_test.go @@ -16,7 +16,6 @@ import ( "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" ) func TestSendBackingFreshStatements(t *testing.T) { @@ -244,7 +243,7 @@ func TestSendPendingGridMessages(t *testing.T) { validationVersion := validationprotocol.ValidationVersionV3 peerValidatorID := parachaintypes.ValidatorIndex(0) rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, } @@ -261,7 +260,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_stmts_but_none_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(0) gt := newGridTracker() @@ -276,24 +274,16 @@ func TestSendPendingGridMessages(t *testing.T) { peerID := peer.ID("peer-ex") validationVersion := validationprotocol.ValidationVersionV3 rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, } - candidatesMock := NewMockcandidatesStore(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(nil, false) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(nil, false) - sd := StatementDistribution{} err := sd.sendPendingGridMessages(rpHash, peerID, validationVersion, peerValidatorID, nil, - rpState, candidatesMock, + rpState, &candidates{}, ) require.Nil(t, err) }) @@ -301,7 +291,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_full_manifest_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(4) gt := newGridTracker() @@ -312,21 +301,19 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesStore(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(1), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(10), + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + parachaintypes.CandidateHash{Value: common.Hash{0x12}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(1), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(10), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), - }, true) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(nil, false) + }, + } gps := newGroups([][]parachaintypes.ValidatorIndex{ {0, 1, 2}, @@ -354,7 +341,7 @@ func TestSendPendingGridMessages(t *testing.T) { } rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, statementStore: stmtStore, @@ -367,7 +354,7 @@ func TestSendPendingGridMessages(t *testing.T) { err = sd.sendPendingGridMessages(rpHash, peerID, v3, peerValidatorID, gps, - rpState, candidatesMock, + rpState, candidatesTracker, ) require.Nil(t, err) @@ -400,7 +387,6 @@ func TestSendPendingGridMessages(t *testing.T) { t.Run("pending_full_and_ack_manifest_confirmed", func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) peerValidatorID := parachaintypes.ValidatorIndex(4) gt := newGridTracker() @@ -411,29 +397,28 @@ func TestSendPendingGridMessages(t *testing.T) { }, ) - candidatesMock := NewMockcandidatesStore(ctrl) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0x12}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(1), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(10), + candidatesTracker := &candidates{ + candidates: map[parachaintypes.CandidateHash]candidateState{ + parachaintypes.CandidateHash{Value: common.Hash{0x12}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(1), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(10), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xbc}, 32)), - }, true) - candidatesMock.EXPECT(). - getConfirmed(parachaintypes.CandidateHash{Value: common.Hash{0xab}}). - Return(&confirmedCandidate{ - assignedGroup: parachaintypes.GroupIndex(0), - receipt: parachaintypes.CommittedCandidateReceiptV2{ - Descriptor: parachaintypes.CandidateDescriptorV2{ - ParaID: parachaintypes.ParaID(11), + parachaintypes.CandidateHash{Value: common.Hash{0xab}}: &confirmedCandidate{ + assignedGroup: parachaintypes.GroupIndex(0), + receipt: parachaintypes.CommittedCandidateReceiptV2{ + Descriptor: parachaintypes.CandidateDescriptorV2{ + ParaID: parachaintypes.ParaID(11), + }, }, + parentHash: common.Hash(bytes.Repeat([]byte{0xee}, 32)), }, - parentHash: common.Hash(bytes.Repeat([]byte{0xee}, 32)), - }, true) + }, + } gps := newGroups([][]parachaintypes.ValidatorIndex{ {0, 1, 2}, @@ -472,7 +457,7 @@ func TestSendPendingGridMessages(t *testing.T) { } rpState := &perRelayParentState{ - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, statementStore: stmtStore, @@ -485,7 +470,7 @@ func TestSendPendingGridMessages(t *testing.T) { err = sd.sendPendingGridMessages(rpHash, peerID, v3, peerValidatorID, gps, - rpState, candidatesMock, + rpState, candidatesTracker, ) require.Nil(t, err) @@ -559,9 +544,9 @@ func TestHandleIncomingManifestCommon(t *testing.T) { importSuccess := sd.handleIncomingManifestCommon( pierre, make(map[peer.ID]peerState), - make(map[common.Hash]perRelayParentState), - make(map[parachaintypes.SessionIndex]perSessionState), - candidates{}, + make(map[common.Hash]*perRelayParentState), + make(map[parachaintypes.SessionIndex]*perSessionState), + &candidates{}, candidateHash, relayParent, paraID, @@ -590,9 +575,9 @@ func TestHandleIncomingManifestCommon(t *testing.T) { importSuccess := sd.handleIncomingManifestCommon( pierre, peers, - make(map[common.Hash]perRelayParentState), - make(map[parachaintypes.SessionIndex]perSessionState), - candidates{}, + make(map[common.Hash]*perRelayParentState), + make(map[parachaintypes.SessionIndex]*perSessionState), + &candidates{}, candidateHash, relayParent, paraID, @@ -650,7 +635,7 @@ func TestHandleIncomingManifestCommon(t *testing.T) { } gt := newGridTracker() - localValidator := &localValidatorStore{ + localValidator := &localValidatorState{ gridTracker: gt, } @@ -660,7 +645,7 @@ func TestHandleIncomingManifestCommon(t *testing.T) { } groups := newGroups(initGroups, 2) - relayParentState := perRelayParentState{ + relayParentState := &perRelayParentState{ session: 1, localValidator: localValidator, groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{paraID: {groupIndex}}, @@ -668,7 +653,7 @@ func TestHandleIncomingManifestCommon(t *testing.T) { groupIndex: {paraID}, }, } - perRelayParent := map[common.Hash]perRelayParentState{ + perRelayParent := map[common.Hash]*perRelayParentState{ relayParent: relayParentState, } @@ -690,17 +675,17 @@ func TestHandleIncomingManifestCommon(t *testing.T) { }, } - sessionState := perSessionState{ + sessionState := &perSessionState{ gridView: gridTopology, groups: groups, - sessionInfo: sessionInfo, + sessionInfo: &sessionInfo, localValidator: &validatorIndex, } - perSession := map[parachaintypes.SessionIndex]perSessionState{ + perSession := map[parachaintypes.SessionIndex]*perSessionState{ 1: sessionState, } - candidates := candidates{ + candidates := &candidates{ candidates: make(map[parachaintypes.CandidateHash]candidateState), byParent: make(map[hashAndParaID]map[parachaintypes.CandidateHash]struct{}), } @@ -798,10 +783,10 @@ func TestHandleIncomingManifest(t *testing.T) { discoveryIds: &map[parachaintypes.AuthorityDiscoveryID]struct{}{authKey: {}}, }, }, - perRelayParent: map[common.Hash]perRelayParentState{ + perRelayParent: map[common.Hash]*perRelayParentState{ relayParent: { session: 1, - localValidator: &localValidatorStore{ + localValidator: &localValidatorState{ gridTracker: gt, }, groupsPerPara: map[parachaintypes.ParaID][]parachaintypes.GroupIndex{ @@ -813,7 +798,7 @@ func TestHandleIncomingManifest(t *testing.T) { statementStore: newStatementStore(groups), }, }, - perSession: map[parachaintypes.SessionIndex]perSessionState{ + perSession: map[parachaintypes.SessionIndex]*perSessionState{ 1: { localValidator: &validatorIndex, gridView: &sessionTopologyView{ @@ -825,7 +810,7 @@ func TestHandleIncomingManifest(t *testing.T) { }, }, groups: groups, - sessionInfo: parachaintypes.SessionInfo{ + sessionInfo: ¶chaintypes.SessionInfo{ DiscoveryKeys: []parachaintypes.AuthorityDiscoveryID{ {0x0a}, // Validator 0 authKey, // Validator 1 - matches our peer's authority key @@ -834,7 +819,7 @@ func TestHandleIncomingManifest(t *testing.T) { }, }, }, - candidates: candidates{ + candidates: &candidates{ candidates: map[parachaintypes.CandidateHash]candidateState{ candidateHash: &confirmedCandidate{ receipt: parachaintypes.CommittedCandidateReceiptV2{ diff --git a/dot/parachain/statement-distribution/statement_store.go b/dot/parachain/statement-distribution/statement_store.go index 1ce8fa10c8..34e06a087d 100644 --- a/dot/parachain/statement-distribution/statement_store.go +++ b/dot/parachain/statement-distribution/statement_store.go @@ -256,19 +256,18 @@ func (s *statementStore) groupStatements( } // validatorStatement returns the full statement of this kind issued by this validator, if it is known. -func (s *statementStore) validatorStatement( - validatorIndex parachaintypes.ValidatorIndex, - statement parachaintypes.CompactStatement, +func (s *statementStore) validatorStatement( //nolint:unused + pair originatorStatementPair, ) (*parachaintypes.SignedStatement, bool) { kind := fingerprintKindCompactSeconded // default to Seconded - if _, ok := statement.(*parachaintypes.CompactValid); ok { + if _, ok := pair.compactStmt.(*parachaintypes.CompactValid); ok { kind = fingerprintKindCompactValid } fp := fingerprint{ - validator: validatorIndex, + validator: pair.validatorIndex, kind: kind, - candidateHash: statement.CandidateHash(), + candidateHash: pair.compactStmt.CandidateHash(), } sst, ok := s.knownStmts[fp] return sst.stmt, ok diff --git a/dot/parachain/types/overseer_message.go b/dot/parachain/types/overseer_message.go index 336d1ecab5..77fc861c4c 100644 --- a/dot/parachain/types/overseer_message.go +++ b/dot/parachain/types/overseer_message.go @@ -5,11 +5,6 @@ package parachaintypes import "github.com/ChainSafe/gossamer/lib/common" -var ( - _ HypotheticalCandidate = (*HypotheticalCandidateIncomplete)(nil) - _ HypotheticalCandidate = (*HypotheticalCandidateComplete)(nil) -) - // OverseerFuncRes is a result of an overseer function type OverseerFuncRes[T any] struct { Err error diff --git a/dot/parachain/types/statement_test.go b/dot/parachain/types/statement_test.go index 3dc0bc7fee..26d1572a56 100644 --- a/dot/parachain/types/statement_test.go +++ b/dot/parachain/types/statement_test.go @@ -199,6 +199,8 @@ func TestCompactStatement(t *testing.T) { for _, c := range testCases { c := c + fmt.Println("spawning test for", c.name) + t.Run(c.name, func(t *testing.T) { t.Parallel() diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index 26a4ca0a42..4b572281e7 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -925,20 +925,27 @@ type Subsystem interface { Stop() } -// AvailabilityChunkMapping tells if the chunk mapping feature is enabled. -// Enables the implementation of -// [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). -// Must not be enabled unless all validators and collators have stopped using `req_chunk` -// protocol version 1. If it is enabled, validators can start systematic chunk recovery. -const AvailabilityChunkMapping NodeFeatureIndex = 2 - // NodeFeatureIndex represents the index of a feature in a bitvector of node features fetched from runtime. type NodeFeatureIndex byte -// This feature enables the extension of `BackedCandidate.ValidatorIndices` by 8 bits. -// The value stored there represents the assumed core index where the candidates -// are backed. This is needed for the elastic scaling MVP. -const ElasticScalingMVP NodeFeatureIndex = 1 +const ( + // This feature enables the extension of `BackedCandidate.ValidatorIndices` by 8 bits. + // The value stored there represents the assumed core index where the candidates + // are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP NodeFeatureIndex = 1 + + // AvailabilityChunkMapping tells if the chunk mapping feature is enabled. + // Enables the implementation of + // [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + // Must not be enabled unless all validators and collators have stopped using `req_chunk` + // protocol version 1. If it is enabled, validators can start systematic chunk recovery. + AvailabilityChunkMapping NodeFeatureIndex = 2 + + // Enables node side support of `CoreIndex` committed candidate receipts. + // See [RFC-103](https://github.com/polkadot-fellows/RFCs/pull/103) for details. + // Only enable if at least 2/3 of nodes support the feature. + CandidateReceiptV2Feature NodeFeatureIndex = 3 +) // TransposedClaimQueue represents a mapping between ParaID and the cores assigned per depth type TransposedClaimQueue map[ParaID]map[uint8]map[CoreIndex]struct{}