From f06bbbe6207546b78ea9e7c236c10d7119e1966d Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 24 Sep 2025 13:57:22 +0300 Subject: [PATCH 01/14] Update execution.go --- state/execution.go | 176 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 165 insertions(+), 11 deletions(-) diff --git a/state/execution.go b/state/execution.go index ce3bda70e5f..8ed40695011 100644 --- a/state/execution.go +++ b/state/execution.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "sync" "time" abci "github.com/cometbft/cometbft/abci/types" @@ -43,6 +44,14 @@ type BlockExecutor struct { // 1-element cache of validated blocks lastValidatedBlock *types.Block + // cache for validators to avoid repeated DB lookups + validatorCache map[int64]*types.ValidatorSet + validatorCacheMutex sync.RWMutex + + // cache for ABCI validators to avoid repeated conversions + abciValidatorCache map[string]abci.Validator + abciValidatorCacheMutex sync.RWMutex + logger log.Logger metrics *Metrics @@ -68,14 +77,16 @@ func NewBlockExecutor( options ...BlockExecutorOption, ) *BlockExecutor { res := &BlockExecutor{ - store: stateStore, - proxyApp: proxyApp, - eventBus: types.NopEventBus{}, - mempool: mempool, - evpool: evpool, - logger: logger, - metrics: NopMetrics(), - blockStore: blockStore, + store: stateStore, + proxyApp: proxyApp, + eventBus: types.NopEventBus{}, + mempool: mempool, + evpool: evpool, + logger: logger, + metrics: NopMetrics(), + blockStore: blockStore, + validatorCache: make(map[int64]*types.ValidatorSet), + abciValidatorCache: make(map[string]abci.Validator), } for _, option := range options { @@ -89,6 +100,17 @@ func (blockExec *BlockExecutor) Store() Store { return blockExec.store } +// ClearValidatorCache clears the validator caches to free memory +func (blockExec *BlockExecutor) ClearValidatorCache() { + blockExec.validatorCacheMutex.Lock() + blockExec.validatorCache = make(map[int64]*types.ValidatorSet) + blockExec.validatorCacheMutex.Unlock() + + blockExec.abciValidatorCacheMutex.Lock() + blockExec.abciValidatorCache = make(map[string]abci.Validator) + blockExec.abciValidatorCacheMutex.Unlock() +} + // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { @@ -170,7 +192,7 @@ func (blockExec *BlockExecutor) ProcessProposal( Height: block.Height, Time: block.Time, Txs: block.Txs.ToSliceOfBytes(), - ProposedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), + ProposedLastCommit: blockExec.BuildLastCommitInfoFromStoreWithCache(block, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), ProposerAddress: block.ProposerAddress, NextValidatorsHash: block.NextValidatorsHash, @@ -232,7 +254,7 @@ func (blockExec *BlockExecutor) applyBlock(state State, blockID types.BlockID, b ProposerAddress: block.ProposerAddress, Height: block.Height, Time: block.Time, - DecidedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), + DecidedLastCommit: blockExec.BuildLastCommitInfoFromStoreWithCache(block, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), Txs: block.Txs.ToSliceOfBytes(), }) @@ -345,7 +367,7 @@ func (blockExec *BlockExecutor) ExtendVote( Height: vote.Height, Time: block.Time, Txs: block.Txs.ToSliceOfBytes(), - ProposedLastCommit: buildLastCommitInfoFromStore(block, blockExec.store, state.InitialHeight), + ProposedLastCommit: blockExec.BuildLastCommitInfoFromStoreWithCache(block, state.InitialHeight), Misbehavior: block.Evidence.Evidence.ToABCI(), NextValidatorsHash: block.NextValidatorsHash, ProposerAddress: block.ProposerAddress, @@ -474,6 +496,38 @@ func buildLastCommitInfoFromStore(block *types.Block, store Store, initialHeight return BuildLastCommitInfo(block, lastValSet, initialHeight) } +// BuildLastCommitInfoFromStoreWithCache is an optimized version that uses caching +func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *types.Block, initialHeight int64) abci.CommitInfo { + if block.Height == initialHeight { // check for initial height before loading validators + // there is no last commit for the initial height. + // return an empty value. + return abci.CommitInfo{} + } + + height := block.Height - 1 + + // Try to get validators from cache first + blockExec.validatorCacheMutex.RLock() + lastValSet, found := blockExec.validatorCache[height] + blockExec.validatorCacheMutex.RUnlock() + + if !found { + // Load from store and cache + var err error + lastValSet, err = blockExec.store.LoadValidators(height) + if err != nil { + panic(fmt.Errorf("failed to load validator set at height %d: %w", height, err)) + } + + // Cache the result + blockExec.validatorCacheMutex.Lock() + blockExec.validatorCache[height] = lastValSet + blockExec.validatorCacheMutex.Unlock() + } + + return blockExec.BuildLastCommitInfoWithCache(block, lastValSet, initialHeight) +} + // BuildLastCommitInfo builds a CommitInfo from the given block and validator set. // If you want to load the validator set from the store instead of providing it, // use buildLastCommitInfoFromStore. @@ -513,6 +567,64 @@ func BuildLastCommitInfo(block *types.Block, lastValSet *types.ValidatorSet, ini } } +// BuildLastCommitInfoWithCache is an optimized version that uses caching for ABCI validators +func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, lastValSet *types.ValidatorSet, initialHeight int64) abci.CommitInfo { + if block.Height == initialHeight { + // there is no last commit for the initial height. + // return an empty value. + return abci.CommitInfo{} + } + + var ( + commitSize = block.LastCommit.Size() + valSetLen = len(lastValSet.Validators) + ) + + // ensure that the size of the validator set in the last commit matches + // the size of the validator set in the state store. + if commitSize != valSetLen { + panic(fmt.Sprintf( + "commit size (%d) doesn't match validator set length (%d) at height %d\n\n%v\n\n%v", + commitSize, valSetLen, block.Height, block.LastCommit.Signatures, lastValSet.Validators, + )) + } + + votes := make([]abci.VoteInfo, block.LastCommit.Size()) + for i, val := range lastValSet.Validators { + commitSig := block.LastCommit.Signatures[i] + + // Create cache key for this validator + cacheKey := fmt.Sprintf("%s_%d", val.PubKey.Address(), val.VotingPower) + + // Try to get ABCI validator from cache + blockExec.abciValidatorCacheMutex.RLock() + abciVal, found := blockExec.abciValidatorCache[cacheKey] + blockExec.abciValidatorCacheMutex.RUnlock() + + if !found { + // Convert to ABCI validator and cache + abciVal = abci.Validator{ + Address: val.PubKey.Address(), + Power: val.VotingPower, + } + + blockExec.abciValidatorCacheMutex.Lock() + blockExec.abciValidatorCache[cacheKey] = abciVal + blockExec.abciValidatorCacheMutex.Unlock() + } + + votes[i] = abci.VoteInfo{ + Validator: abciVal, + BlockIdFlag: cmtproto.BlockIDFlag(commitSig.BlockIDFlag), + } + } + + return abci.CommitInfo{ + Round: block.LastCommit.Round, + Votes: votes, + } +} + // buildExtendedCommitInfoFromStore populates an ABCI extended commit from the // corresponding CometBFT extended commit ec, using the stored validator set // from ec. It requires ec to include the original precommit votes along with @@ -800,6 +912,48 @@ func ExecCommitBlock( return resp.AppHash, nil } +// ExecCommitBlockWithCache is an optimized version that uses caching for better performance +func (blockExec *BlockExecutor) ExecCommitBlockWithCache( + appConnConsensus proxy.AppConnConsensus, + block *types.Block, + logger log.Logger, + initialHeight int64, +) ([]byte, error) { + commitInfo := blockExec.BuildLastCommitInfoFromStoreWithCache(block, initialHeight) + + resp, err := appConnConsensus.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ + Hash: block.Hash(), + NextValidatorsHash: block.NextValidatorsHash, + ProposerAddress: block.ProposerAddress, + Height: block.Height, + Time: block.Time, + DecidedLastCommit: commitInfo, + Misbehavior: block.Evidence.Evidence.ToABCI(), + Txs: block.Txs.ToSliceOfBytes(), + }) + if err != nil { + logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) + return nil, err + } + + // Assert that the application correctly returned tx results for each of the transactions provided in the block + if len(block.Txs) != len(resp.TxResults) { + return nil, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Txs), len(resp.TxResults)) + } + + logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", resp.AppHash)) + + // Commit block + _, err = appConnConsensus.Commit(context.TODO()) + if err != nil { + logger.Error("client error during proxyAppConn.Commit", "err", err) + return nil, err + } + + // ResponseCommit has no error or log + return resp.AppHash, nil +} + func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64, state State) (uint64, error) { base := blockExec.blockStore.Base() if retainHeight <= base { From 861cb160d79f90d8010fdadfec6fa8706e5aef8b Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 24 Sep 2025 13:57:45 +0300 Subject: [PATCH 02/14] Update execution_test.go --- state/execution_test.go | 139 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/state/execution_test.go b/state/execution_test.go index f546c6ed8f6..8b6d0243977 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1138,3 +1138,142 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc }, } } + +// TestBuildLastCommitInfoWithCache tests the caching functionality +func TestBuildLastCommitInfoWithCache(t *testing.T) { + // Create test validators + val1, _ := types.RandValidator(true, 10) + val2, _ := types.RandValidator(true, 20) + valSet := types.NewValidatorSet([]*types.Validator{val1, val2}) + + // Create a mock store + mockStore := &mocks.Store{} + mockStore.On("LoadValidators", int64(1)).Return(valSet, nil) + + // Create block executor with cache + blockExec := sm.NewBlockExecutor( + mockStore, + log.NewNopLogger(), + nil, // proxyApp + nil, // mempool + nil, // evpool + nil, // blockStore + ) + + // Create a test block + block := &types.Block{ + Header: types.Header{ + Height: 2, + }, + LastCommit: &types.Commit{ + Height: 1, + Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + // First call - should load from store and cache + commitInfo1 := blockExec.BuildLastCommitInfoFromStoreWithCache(block, 1) + require.NotEmpty(t, commitInfo1.Votes) + require.Len(t, commitInfo1.Votes, 2) + + // Second call - should use cache (store should not be called again) + commitInfo2 := blockExec.BuildLastCommitInfoFromStoreWithCache(block, 1) + require.Equal(t, commitInfo1, commitInfo2) + + // Verify that LoadValidators was called only once + mockStore.AssertNumberOfCalls(t, "LoadValidators", 1) + + // Test cache clearing + blockExec.ClearValidatorCache() + + // Third call - should load from store again + commitInfo3 := blockExec.BuildLastCommitInfoFromStoreWithCache(block, 1) + require.Equal(t, commitInfo1, commitInfo3) + + // Verify that LoadValidators was called twice now + mockStore.AssertNumberOfCalls(t, "LoadValidators", 2) +} + +// BenchmarkBuildLastCommitInfoWithCache benchmarks the caching performance +func BenchmarkBuildLastCommitInfoWithCache(b *testing.B) { + // Create test validators + val1, _ := types.RandValidator(true, 10) + val2, _ := types.RandValidator(true, 20) + valSet := types.NewValidatorSet([]*types.Validator{val1, val2}) + + // Create a mock store + mockStore := &mocks.Store{} + mockStore.On("LoadValidators", int64(1)).Return(valSet, nil) + + // Create block executor with cache + blockExec := sm.NewBlockExecutor( + mockStore, + log.NewNopLogger(), + nil, // proxyApp + nil, // mempool + nil, // evpool + nil, // blockStore + ) + + // Create a test block + block := &types.Block{ + Header: types.Header{ + Height: 2, + }, + LastCommit: &types.Commit{ + Height: 1, + Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block, 1) + } + }) +} + +// BenchmarkBuildLastCommitInfoOriginal benchmarks the original version without caching +func BenchmarkBuildLastCommitInfoOriginal(b *testing.B) { + // Create test validators + val1, _ := types.RandValidator(true, 10) + val2, _ := types.RandValidator(true, 20) + valSet := types.NewValidatorSet([]*types.Validator{val1, val2}) + + // Create a mock store + mockStore := &mocks.Store{} + mockStore.On("LoadValidators", int64(1)).Return(valSet, nil) + + // Create a test block + block := &types.Block{ + Header: types.Header{ + Height: 2, + }, + LastCommit: &types.Commit{ + Height: 1, + Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + // Simulate the original behavior by calling LoadValidators each time + valSet, _ := mockStore.LoadValidators(1) + _ = sm.BuildLastCommitInfo(block, valSet, 1) + } + }) +} From b1c2ebd8831037be51f8f4601f03b1023d81e9bf Mon Sep 17 00:00:00 2001 From: Gengar Date: Thu, 2 Oct 2025 14:03:58 +0300 Subject: [PATCH 03/14] Update execution.go --- state/execution.go | 108 +++++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 44 deletions(-) diff --git a/state/execution.go b/state/execution.go index 8ed40695011..1b444192856 100644 --- a/state/execution.go +++ b/state/execution.go @@ -52,6 +52,9 @@ type BlockExecutor struct { abciValidatorCache map[string]abci.Validator abciValidatorCacheMutex sync.RWMutex + // cache management + maxCacheSize int + logger log.Logger metrics *Metrics @@ -87,6 +90,7 @@ func NewBlockExecutor( blockStore: blockStore, validatorCache: make(map[int64]*types.ValidatorSet), abciValidatorCache: make(map[string]abci.Validator), + maxCacheSize: 1000, // Limit cache size to prevent memory leaks } for _, option := range options { @@ -111,6 +115,57 @@ func (blockExec *BlockExecutor) ClearValidatorCache() { blockExec.abciValidatorCacheMutex.Unlock() } +// GetCacheSize returns the current cache sizes for testing +func (blockExec *BlockExecutor) GetCacheSize() (validatorCacheSize, abciValidatorCacheSize int) { + blockExec.validatorCacheMutex.RLock() + validatorCacheSize = len(blockExec.validatorCache) + blockExec.validatorCacheMutex.RUnlock() + + blockExec.abciValidatorCacheMutex.RLock() + abciValidatorCacheSize = len(blockExec.abciValidatorCache) + blockExec.abciValidatorCacheMutex.RUnlock() + return +} + +// SetMaxCacheSize sets the maximum cache size for testing +func (blockExec *BlockExecutor) SetMaxCacheSize(size int) { + blockExec.maxCacheSize = size +} + +// cleanupOldCacheEntries removes old entries from caches to prevent memory leaks +func (blockExec *BlockExecutor) cleanupOldCacheEntries() { + blockExec.validatorCacheMutex.Lock() + if len(blockExec.validatorCache) > blockExec.maxCacheSize { + // Remove oldest entries (simple FIFO cleanup) + // In a real implementation, you might want to use LRU or time-based cleanup + newCache := make(map[int64]*types.ValidatorSet) + count := 0 + for height, valSet := range blockExec.validatorCache { + if count < blockExec.maxCacheSize/2 { // Keep half of the cache + newCache[height] = valSet + count++ + } + } + blockExec.validatorCache = newCache + } + blockExec.validatorCacheMutex.Unlock() + + blockExec.abciValidatorCacheMutex.Lock() + if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize { + // Remove oldest entries (simple FIFO cleanup) + newCache := make(map[string]abci.Validator) + count := 0 + for key, val := range blockExec.abciValidatorCache { + if count < blockExec.maxCacheSize/2 { // Keep half of the cache + newCache[key] = val + count++ + } + } + blockExec.abciValidatorCache = newCache + } + blockExec.abciValidatorCacheMutex.Unlock() +} + // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { @@ -523,6 +578,9 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *typ blockExec.validatorCacheMutex.Lock() blockExec.validatorCache[height] = lastValSet blockExec.validatorCacheMutex.Unlock() + + // Cleanup old cache entries if needed + blockExec.cleanupOldCacheEntries() } return blockExec.BuildLastCommitInfoWithCache(block, lastValSet, initialHeight) @@ -593,8 +651,8 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, for i, val := range lastValSet.Validators { commitSig := block.LastCommit.Signatures[i] - // Create cache key for this validator - cacheKey := fmt.Sprintf("%s_%d", val.PubKey.Address(), val.VotingPower) + // Use validator address as cache key (already computed) + cacheKey := string(val.Address) // Try to get ABCI validator from cache blockExec.abciValidatorCacheMutex.RLock() @@ -604,13 +662,16 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, if !found { // Convert to ABCI validator and cache abciVal = abci.Validator{ - Address: val.PubKey.Address(), + Address: val.Address, Power: val.VotingPower, } blockExec.abciValidatorCacheMutex.Lock() blockExec.abciValidatorCache[cacheKey] = abciVal blockExec.abciValidatorCacheMutex.Unlock() + + // Cleanup old cache entries if needed + blockExec.cleanupOldCacheEntries() } votes[i] = abci.VoteInfo{ @@ -912,47 +973,6 @@ func ExecCommitBlock( return resp.AppHash, nil } -// ExecCommitBlockWithCache is an optimized version that uses caching for better performance -func (blockExec *BlockExecutor) ExecCommitBlockWithCache( - appConnConsensus proxy.AppConnConsensus, - block *types.Block, - logger log.Logger, - initialHeight int64, -) ([]byte, error) { - commitInfo := blockExec.BuildLastCommitInfoFromStoreWithCache(block, initialHeight) - - resp, err := appConnConsensus.FinalizeBlock(context.TODO(), &abci.RequestFinalizeBlock{ - Hash: block.Hash(), - NextValidatorsHash: block.NextValidatorsHash, - ProposerAddress: block.ProposerAddress, - Height: block.Height, - Time: block.Time, - DecidedLastCommit: commitInfo, - Misbehavior: block.Evidence.Evidence.ToABCI(), - Txs: block.Txs.ToSliceOfBytes(), - }) - if err != nil { - logger.Error("error in proxyAppConn.FinalizeBlock", "err", err) - return nil, err - } - - // Assert that the application correctly returned tx results for each of the transactions provided in the block - if len(block.Txs) != len(resp.TxResults) { - return nil, fmt.Errorf("expected tx results length to match size of transactions in block. Expected %d, got %d", len(block.Txs), len(resp.TxResults)) - } - - logger.Info("executed block", "height", block.Height, "app_hash", fmt.Sprintf("%X", resp.AppHash)) - - // Commit block - _, err = appConnConsensus.Commit(context.TODO()) - if err != nil { - logger.Error("client error during proxyAppConn.Commit", "err", err) - return nil, err - } - - // ResponseCommit has no error or log - return resp.AppHash, nil -} func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64, state State) (uint64, error) { base := blockExec.blockStore.Base() From 41ce1a4c7535b629a0b78847325c034bb06d0799 Mon Sep 17 00:00:00 2001 From: Gengar Date: Thu, 2 Oct 2025 14:04:20 +0300 Subject: [PATCH 04/14] Update execution_test.go --- state/execution_test.go | 72 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/state/execution_test.go b/state/execution_test.go index f72ebdeebfd..9179f88c939 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1180,7 +1180,7 @@ func TestBuildLastCommitInfoWithCache(t *testing.T) { // Verify that LoadValidators was called only once mockStore.AssertNumberOfCalls(t, "LoadValidators", 1) - // Test cache clearing + // Test cache clearing (only used in tests) blockExec.ClearValidatorCache() // Third call - should load from store again @@ -1191,6 +1191,76 @@ func TestBuildLastCommitInfoWithCache(t *testing.T) { mockStore.AssertNumberOfCalls(t, "LoadValidators", 2) } +// TestCacheCleanup tests the cache cleanup mechanism +func TestCacheCleanup(t *testing.T) { + // Create test validators + val1, _ := types.RandValidator(true, 10) + val2, _ := types.RandValidator(true, 20) + valSet := types.NewValidatorSet([]*types.Validator{val1, val2}) + + // Create a mock store + mockStore := &mocks.Store{} + mockStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) + + // Create block executor with small cache size for testing + blockExec := sm.NewBlockExecutor( + mockStore, + log.NewNopLogger(), + nil, // proxyApp + nil, // mempool + nil, // evpool + nil, // blockStore + ) + + // Set small cache size for testing + blockExec.SetMaxCacheSize(2) + + // Create test blocks + block1 := &types.Block{ + Header: types.Header{Height: 2}, + LastCommit: &types.Commit{ + Height: 1, Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + block2 := &types.Block{ + Header: types.Header{Height: 3}, + LastCommit: &types.Commit{ + Height: 2, Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + block3 := &types.Block{ + Header: types.Header{Height: 4}, + LastCommit: &types.Commit{ + Height: 3, Round: 0, + Signatures: []types.CommitSig{ + {BlockIDFlag: types.BlockIDFlagCommit}, + {BlockIDFlag: types.BlockIDFlagCommit}, + }, + }, + } + + // Fill cache beyond limit + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block1, 1) + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block2, 1) + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block3, 1) + + // Verify cache cleanup was triggered + validatorCacheSize, _ := blockExec.GetCacheSize() + + // Cache should be cleaned up to maxCacheSize/2 + require.LessOrEqual(t, validatorCacheSize, 2) // maxCacheSize/2 + tolerance +} + // BenchmarkBuildLastCommitInfoWithCache benchmarks the caching performance func BenchmarkBuildLastCommitInfoWithCache(b *testing.B) { // Create test validators From 4c90be463a368d2baa5bc82ce9648653651fffff Mon Sep 17 00:00:00 2001 From: Gengar Date: Thu, 2 Oct 2025 15:42:12 +0300 Subject: [PATCH 05/14] Update execution.go --- state/execution.go | 45 ++++++++++----------------------------------- 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/state/execution.go b/state/execution.go index 1b444192856..4164db9ea76 100644 --- a/state/execution.go +++ b/state/execution.go @@ -104,17 +104,6 @@ func (blockExec *BlockExecutor) Store() Store { return blockExec.store } -// ClearValidatorCache clears the validator caches to free memory -func (blockExec *BlockExecutor) ClearValidatorCache() { - blockExec.validatorCacheMutex.Lock() - blockExec.validatorCache = make(map[int64]*types.ValidatorSet) - blockExec.validatorCacheMutex.Unlock() - - blockExec.abciValidatorCacheMutex.Lock() - blockExec.abciValidatorCache = make(map[string]abci.Validator) - blockExec.abciValidatorCacheMutex.Unlock() -} - // GetCacheSize returns the current cache sizes for testing func (blockExec *BlockExecutor) GetCacheSize() (validatorCacheSize, abciValidatorCacheSize int) { blockExec.validatorCacheMutex.RLock() @@ -136,32 +125,19 @@ func (blockExec *BlockExecutor) SetMaxCacheSize(size int) { func (blockExec *BlockExecutor) cleanupOldCacheEntries() { blockExec.validatorCacheMutex.Lock() if len(blockExec.validatorCache) > blockExec.maxCacheSize { - // Remove oldest entries (simple FIFO cleanup) - // In a real implementation, you might want to use LRU or time-based cleanup - newCache := make(map[int64]*types.ValidatorSet) - count := 0 - for height, valSet := range blockExec.validatorCache { - if count < blockExec.maxCacheSize/2 { // Keep half of the cache - newCache[height] = valSet - count++ - } - } - blockExec.validatorCache = newCache + // Simple cleanup: clear half of the cache + // Since Go maps don't guarantee iteration order, we'll clear the entire cache + // and let it rebuild naturally. This is simpler and avoids the FIFO issue. + blockExec.validatorCache = make(map[int64]*types.ValidatorSet) } blockExec.validatorCacheMutex.Unlock() blockExec.abciValidatorCacheMutex.Lock() if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize { - // Remove oldest entries (simple FIFO cleanup) - newCache := make(map[string]abci.Validator) - count := 0 - for key, val := range blockExec.abciValidatorCache { - if count < blockExec.maxCacheSize/2 { // Keep half of the cache - newCache[key] = val - count++ - } - } - blockExec.abciValidatorCache = newCache + // Simple cleanup: clear half of the cache + // Since Go maps don't guarantee iteration order, we'll clear the entire cache + // and let it rebuild naturally. This is simpler and avoids the FIFO issue. + blockExec.abciValidatorCache = make(map[string]abci.Validator) } blockExec.abciValidatorCacheMutex.Unlock() } @@ -578,7 +554,7 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *typ blockExec.validatorCacheMutex.Lock() blockExec.validatorCache[height] = lastValSet blockExec.validatorCacheMutex.Unlock() - + // Cleanup old cache entries if needed blockExec.cleanupOldCacheEntries() } @@ -669,7 +645,7 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, blockExec.abciValidatorCacheMutex.Lock() blockExec.abciValidatorCache[cacheKey] = abciVal blockExec.abciValidatorCacheMutex.Unlock() - + // Cleanup old cache entries if needed blockExec.cleanupOldCacheEntries() } @@ -973,7 +949,6 @@ func ExecCommitBlock( return resp.AppHash, nil } - func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64, state State) (uint64, error) { base := blockExec.blockStore.Base() if retainHeight <= base { From 9939620c060d05ae7da2c1457b434a1d05ffca4a Mon Sep 17 00:00:00 2001 From: Gengar Date: Thu, 2 Oct 2025 15:42:31 +0300 Subject: [PATCH 06/14] Update execution_test.go --- state/execution_test.go | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/state/execution_test.go b/state/execution_test.go index 9179f88c939..38e89dc1fdf 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1180,15 +1180,7 @@ func TestBuildLastCommitInfoWithCache(t *testing.T) { // Verify that LoadValidators was called only once mockStore.AssertNumberOfCalls(t, "LoadValidators", 1) - // Test cache clearing (only used in tests) - blockExec.ClearValidatorCache() - - // Third call - should load from store again - commitInfo3 := blockExec.BuildLastCommitInfoFromStoreWithCache(block, 1) - require.Equal(t, commitInfo1, commitInfo3) - - // Verify that LoadValidators was called twice now - mockStore.AssertNumberOfCalls(t, "LoadValidators", 2) + // Test that cache is working - second call should not hit the store } // TestCacheCleanup tests the cache cleanup mechanism @@ -1211,7 +1203,7 @@ func TestCacheCleanup(t *testing.T) { nil, // evpool nil, // blockStore ) - + // Set small cache size for testing blockExec.SetMaxCacheSize(2) @@ -1256,7 +1248,7 @@ func TestCacheCleanup(t *testing.T) { // Verify cache cleanup was triggered validatorCacheSize, _ := blockExec.GetCacheSize() - + // Cache should be cleaned up to maxCacheSize/2 require.LessOrEqual(t, validatorCacheSize, 2) // maxCacheSize/2 + tolerance } From fad2eb4cdffbcdad1bf66ca444c0054cb5faef46 Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 15 Oct 2025 13:36:00 +0300 Subject: [PATCH 07/14] Update execution.go --- state/execution.go | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/state/execution.go b/state/execution.go index 4164db9ea76..78823cd87b5 100644 --- a/state/execution.go +++ b/state/execution.go @@ -537,25 +537,30 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *typ height := block.Height - 1 - // Try to get validators from cache first + // Try to get validators from cache first with double-checked locking blockExec.validatorCacheMutex.RLock() lastValSet, found := blockExec.validatorCache[height] blockExec.validatorCacheMutex.RUnlock() if !found { - // Load from store and cache + // Load from store var err error lastValSet, err = blockExec.store.LoadValidators(height) if err != nil { panic(fmt.Errorf("failed to load validator set at height %d: %w", height, err)) } - // Cache the result + // Double-checked locking: acquire write lock and check again blockExec.validatorCacheMutex.Lock() - blockExec.validatorCache[height] = lastValSet + // Check again in case another goroutine added it + if existingValSet, exists := blockExec.validatorCache[height]; exists { + lastValSet = existingValSet + } else { + blockExec.validatorCache[height] = lastValSet + } blockExec.validatorCacheMutex.Unlock() - // Cleanup old cache entries if needed + // Cleanup old cache entries if needed (outside of lock to avoid deadlock) blockExec.cleanupOldCacheEntries() } @@ -630,23 +635,29 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, // Use validator address as cache key (already computed) cacheKey := string(val.Address) - // Try to get ABCI validator from cache + // Try to get ABCI validator from cache with double-checked locking blockExec.abciValidatorCacheMutex.RLock() abciVal, found := blockExec.abciValidatorCache[cacheKey] blockExec.abciValidatorCacheMutex.RUnlock() if !found { - // Convert to ABCI validator and cache + // Convert to ABCI validator abciVal = abci.Validator{ Address: val.Address, Power: val.VotingPower, } + // Double-checked locking: acquire write lock and check again blockExec.abciValidatorCacheMutex.Lock() - blockExec.abciValidatorCache[cacheKey] = abciVal + // Check again in case another goroutine added it + if existingVal, exists := blockExec.abciValidatorCache[cacheKey]; exists { + abciVal = existingVal + } else { + blockExec.abciValidatorCache[cacheKey] = abciVal + } blockExec.abciValidatorCacheMutex.Unlock() - // Cleanup old cache entries if needed + // Cleanup old cache entries if needed (outside of lock to avoid deadlock) blockExec.cleanupOldCacheEntries() } From 20fb9b5217270a859301933433053e12f4fb816a Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 15 Oct 2025 14:00:13 +0300 Subject: [PATCH 08/14] Update execution.go --- state/execution.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/state/execution.go b/state/execution.go index 78823cd87b5..f4a0d010a5d 100644 --- a/state/execution.go +++ b/state/execution.go @@ -123,8 +123,9 @@ func (blockExec *BlockExecutor) SetMaxCacheSize(size int) { // cleanupOldCacheEntries removes old entries from caches to prevent memory leaks func (blockExec *BlockExecutor) cleanupOldCacheEntries() { + // Only cleanup if cache is significantly over the limit to avoid frequent cleanup blockExec.validatorCacheMutex.Lock() - if len(blockExec.validatorCache) > blockExec.maxCacheSize { + if len(blockExec.validatorCache) > blockExec.maxCacheSize*2 { // Simple cleanup: clear half of the cache // Since Go maps don't guarantee iteration order, we'll clear the entire cache // and let it rebuild naturally. This is simpler and avoids the FIFO issue. @@ -133,7 +134,7 @@ func (blockExec *BlockExecutor) cleanupOldCacheEntries() { blockExec.validatorCacheMutex.Unlock() blockExec.abciValidatorCacheMutex.Lock() - if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize { + if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize*2 { // Simple cleanup: clear half of the cache // Since Go maps don't guarantee iteration order, we'll clear the entire cache // and let it rebuild naturally. This is simpler and avoids the FIFO issue. From 60be2e5233efa9e9991c87245de7d4688a24432b Mon Sep 17 00:00:00 2001 From: Gengar Date: Wed, 15 Oct 2025 14:14:11 +0300 Subject: [PATCH 09/14] Update execution_test.go --- state/execution_test.go | 77 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 5 deletions(-) diff --git a/state/execution_test.go b/state/execution_test.go index 38e89dc1fdf..999428e6e1e 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1205,7 +1205,7 @@ func TestCacheCleanup(t *testing.T) { ) // Set small cache size for testing - blockExec.SetMaxCacheSize(2) + blockExec.SetMaxCacheSize(1) // Create test blocks block1 := &types.Block{ @@ -1241,16 +1241,83 @@ func TestCacheCleanup(t *testing.T) { }, } - // Fill cache beyond limit + // Fill cache beyond limit (need to exceed maxCacheSize * 2 = 2) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block1, 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block2, 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block3, 1) + // Add one more to trigger cleanup (total 4 > maxCacheSize * 2 = 2) + block4 := &types.Block{ + Header: types.Header{Height: 5}, + LastCommit: &types.Commit{ + Height: 4, + Signatures: []types.CommitSig{ + { + ValidatorAddress: val1.Address, + Timestamp: time.Now(), + Signature: []byte("signature4_1"), + }, + { + ValidatorAddress: val2.Address, + Timestamp: time.Now(), + Signature: []byte("signature4_2"), + }, + }, + }, + } + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block4, 1) + + // Add one more to ensure cleanup is triggered + block5 := &types.Block{ + Header: types.Header{Height: 6}, + LastCommit: &types.Commit{ + Height: 5, + Signatures: []types.CommitSig{ + { + ValidatorAddress: val1.Address, + Timestamp: time.Now(), + Signature: []byte("signature5_1"), + }, + { + ValidatorAddress: val2.Address, + Timestamp: time.Now(), + Signature: []byte("signature5_2"), + }, + }, + }, + } + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block5, 1) + + // Add one more to exceed threshold (total 6 > maxCacheSize * 2 = 2) + block6 := &types.Block{ + Header: types.Header{Height: 7}, + LastCommit: &types.Commit{ + Height: 6, + Signatures: []types.CommitSig{ + { + ValidatorAddress: val1.Address, + Timestamp: time.Now(), + Signature: []byte("signature6_1"), + }, + { + ValidatorAddress: val2.Address, + Timestamp: time.Now(), + Signature: []byte("signature6_2"), + }, + }, + }, + } + _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block6, 1) + // Verify cache cleanup was triggered - validatorCacheSize, _ := blockExec.GetCacheSize() + validatorCacheSize, abciValidatorCacheSize := blockExec.GetCacheSize() + + // Validator cache should be cleaned up (cleared completely when over limit) + require.Equal(t, 0, validatorCacheSize) // Cache is cleared when over maxCacheSize * 2 - // Cache should be cleaned up to maxCacheSize/2 - require.LessOrEqual(t, validatorCacheSize, 2) // maxCacheSize/2 + tolerance + // ABCI validator cache may still have entries as it uses different keys + // but should not exceed the threshold significantly + require.LessOrEqual(t, abciValidatorCacheSize, 4) // Reasonable upper bound } // BenchmarkBuildLastCommitInfoWithCache benchmarks the caching performance From e15f6247a7c054a46d7ca1aa94fbdf8e8279ad2c Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 20 Oct 2025 22:56:07 +0300 Subject: [PATCH 10/14] Update execution_test.go --- state/execution_test.go | 72 ++++++----------------------------------- 1 file changed, 10 insertions(+), 62 deletions(-) diff --git a/state/execution_test.go b/state/execution_test.go index 999428e6e1e..b5d0c710696 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1230,94 +1230,42 @@ func TestCacheCleanup(t *testing.T) { }, } - block3 := &types.Block{ - Header: types.Header{Height: 4}, - LastCommit: &types.Commit{ - Height: 3, Round: 0, - Signatures: []types.CommitSig{ - {BlockIDFlag: types.BlockIDFlagCommit}, - {BlockIDFlag: types.BlockIDFlagCommit}, - }, - }, - } - - // Fill cache beyond limit (need to exceed maxCacheSize * 2 = 2) + // Fill cache beyond limit (need to exceed maxCacheSize = 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block1, 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block2, 1) - _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block3, 1) - // Add one more to trigger cleanup (total 4 > maxCacheSize * 2 = 2) + // Add one more to trigger cleanup (total 3 > maxCacheSize = 1) block4 := &types.Block{ - Header: types.Header{Height: 5}, + Header: types.Header{Height: 4}, LastCommit: &types.Commit{ - Height: 4, + Height: 3, Signatures: []types.CommitSig{ { ValidatorAddress: val1.Address, Timestamp: time.Now(), - Signature: []byte("signature4_1"), + Signature: []byte("signature3_1"), }, { ValidatorAddress: val2.Address, Timestamp: time.Now(), - Signature: []byte("signature4_2"), + Signature: []byte("signature3_2"), }, }, }, } _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block4, 1) - // Add one more to ensure cleanup is triggered - block5 := &types.Block{ - Header: types.Header{Height: 6}, - LastCommit: &types.Commit{ - Height: 5, - Signatures: []types.CommitSig{ - { - ValidatorAddress: val1.Address, - Timestamp: time.Now(), - Signature: []byte("signature5_1"), - }, - { - ValidatorAddress: val2.Address, - Timestamp: time.Now(), - Signature: []byte("signature5_2"), - }, - }, - }, - } - _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block5, 1) - - // Add one more to exceed threshold (total 6 > maxCacheSize * 2 = 2) - block6 := &types.Block{ - Header: types.Header{Height: 7}, - LastCommit: &types.Commit{ - Height: 6, - Signatures: []types.CommitSig{ - { - ValidatorAddress: val1.Address, - Timestamp: time.Now(), - Signature: []byte("signature6_1"), - }, - { - ValidatorAddress: val2.Address, - Timestamp: time.Now(), - Signature: []byte("signature6_2"), - }, - }, - }, - } - _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block6, 1) - // Verify cache cleanup was triggered validatorCacheSize, abciValidatorCacheSize := blockExec.GetCacheSize() // Validator cache should be cleaned up (cleared completely when over limit) - require.Equal(t, 0, validatorCacheSize) // Cache is cleared when over maxCacheSize * 2 + // Note: cleanup is triggered when adding new items, so we need to add one more + // to trigger the cleanup for the validator cache + require.LessOrEqual(t, validatorCacheSize, 1) // Should be 0 or 1 after cleanup // ABCI validator cache may still have entries as it uses different keys // but should not exceed the threshold significantly - require.LessOrEqual(t, abciValidatorCacheSize, 4) // Reasonable upper bound + require.LessOrEqual(t, abciValidatorCacheSize, 2) // Reasonable upper bound } // BenchmarkBuildLastCommitInfoWithCache benchmarks the caching performance From e1a1d027b1f39d76e75249029bf77f5ba8acdfab Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 20 Oct 2025 22:56:36 +0300 Subject: [PATCH 11/14] Update execution_test.go --- state/execution_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/state/execution_test.go b/state/execution_test.go index b5d0c710696..8d2b229aad1 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1230,6 +1230,7 @@ func TestCacheCleanup(t *testing.T) { }, } + // Fill cache beyond limit (need to exceed maxCacheSize = 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block1, 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block2, 1) From c4796dfc5aa0989a4def5e4a26128d1da853e3eb Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 20 Oct 2025 22:57:25 +0300 Subject: [PATCH 12/14] Update execution_test.go --- state/execution_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/state/execution_test.go b/state/execution_test.go index 8d2b229aad1..b5d0c710696 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1230,7 +1230,6 @@ func TestCacheCleanup(t *testing.T) { }, } - // Fill cache beyond limit (need to exceed maxCacheSize = 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block1, 1) _ = blockExec.BuildLastCommitInfoFromStoreWithCache(block2, 1) From ea909ffc12c56473ac3dce6bc4d6ab91e59cd333 Mon Sep 17 00:00:00 2001 From: Gengar Date: Mon, 20 Oct 2025 22:57:43 +0300 Subject: [PATCH 13/14] Update execution.go --- state/execution.go | 77 +++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/state/execution.go b/state/execution.go index f4a0d010a5d..a864a9744bc 100644 --- a/state/execution.go +++ b/state/execution.go @@ -90,7 +90,7 @@ func NewBlockExecutor( blockStore: blockStore, validatorCache: make(map[int64]*types.ValidatorSet), abciValidatorCache: make(map[string]abci.Validator), - maxCacheSize: 1000, // Limit cache size to prevent memory leaks + maxCacheSize: 2000, // Limit cache size to prevent memory leaks } for _, option := range options { @@ -123,24 +123,41 @@ func (blockExec *BlockExecutor) SetMaxCacheSize(size int) { // cleanupOldCacheEntries removes old entries from caches to prevent memory leaks func (blockExec *BlockExecutor) cleanupOldCacheEntries() { - // Only cleanup if cache is significantly over the limit to avoid frequent cleanup - blockExec.validatorCacheMutex.Lock() - if len(blockExec.validatorCache) > blockExec.maxCacheSize*2 { - // Simple cleanup: clear half of the cache - // Since Go maps don't guarantee iteration order, we'll clear the entire cache - // and let it rebuild naturally. This is simpler and avoids the FIFO issue. - blockExec.validatorCache = make(map[int64]*types.ValidatorSet) - } - blockExec.validatorCacheMutex.Unlock() - - blockExec.abciValidatorCacheMutex.Lock() - if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize*2 { - // Simple cleanup: clear half of the cache - // Since Go maps don't guarantee iteration order, we'll clear the entire cache - // and let it rebuild naturally. This is simpler and avoids the FIFO issue. - blockExec.abciValidatorCache = make(map[string]abci.Validator) - } - blockExec.abciValidatorCacheMutex.Unlock() + // Check validator cache size with read lock first + blockExec.validatorCacheMutex.RLock() + validatorCacheSize := len(blockExec.validatorCache) + blockExec.validatorCacheMutex.RUnlock() + + if validatorCacheSize > blockExec.maxCacheSize { + // Only acquire write lock when we actually need to clean up + blockExec.validatorCacheMutex.Lock() + // Double-check in case another goroutine cleaned it up + if len(blockExec.validatorCache) > blockExec.maxCacheSize { + // Simple cleanup: clear the entire cache + // Since Go maps don't guarantee iteration order, we'll clear the entire cache + // and let it rebuild naturally. This is simpler and avoids the FIFO issue. + blockExec.validatorCache = make(map[int64]*types.ValidatorSet) + } + blockExec.validatorCacheMutex.Unlock() + } + + // Check ABCI validator cache size with read lock first + blockExec.abciValidatorCacheMutex.RLock() + abciValidatorCacheSize := len(blockExec.abciValidatorCache) + blockExec.abciValidatorCacheMutex.RUnlock() + + if abciValidatorCacheSize > blockExec.maxCacheSize { + // Only acquire write lock when we actually need to clean up + blockExec.abciValidatorCacheMutex.Lock() + // Double-check in case another goroutine cleaned it up + if len(blockExec.abciValidatorCache) > blockExec.maxCacheSize { + // Simple cleanup: clear the entire cache + // Since Go maps don't guarantee iteration order, we'll clear the entire cache + // and let it rebuild naturally. This is simpler and avoids the FIFO issue. + blockExec.abciValidatorCache = make(map[string]abci.Validator) + } + blockExec.abciValidatorCacheMutex.Unlock() + } } // SetEventBus - sets the event bus for publishing block related events. @@ -538,7 +555,7 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *typ height := block.Height - 1 - // Try to get validators from cache first with double-checked locking + // Try to get validators from cache first blockExec.validatorCacheMutex.RLock() lastValSet, found := blockExec.validatorCache[height] blockExec.validatorCacheMutex.RUnlock() @@ -551,14 +568,9 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoFromStoreWithCache(block *typ panic(fmt.Errorf("failed to load validator set at height %d: %w", height, err)) } - // Double-checked locking: acquire write lock and check again + // Store in cache blockExec.validatorCacheMutex.Lock() - // Check again in case another goroutine added it - if existingValSet, exists := blockExec.validatorCache[height]; exists { - lastValSet = existingValSet - } else { - blockExec.validatorCache[height] = lastValSet - } + blockExec.validatorCache[height] = lastValSet blockExec.validatorCacheMutex.Unlock() // Cleanup old cache entries if needed (outside of lock to avoid deadlock) @@ -636,7 +648,7 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, // Use validator address as cache key (already computed) cacheKey := string(val.Address) - // Try to get ABCI validator from cache with double-checked locking + // Try to get ABCI validator from cache blockExec.abciValidatorCacheMutex.RLock() abciVal, found := blockExec.abciValidatorCache[cacheKey] blockExec.abciValidatorCacheMutex.RUnlock() @@ -648,14 +660,9 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, Power: val.VotingPower, } - // Double-checked locking: acquire write lock and check again + // Store in cache blockExec.abciValidatorCacheMutex.Lock() - // Check again in case another goroutine added it - if existingVal, exists := blockExec.abciValidatorCache[cacheKey]; exists { - abciVal = existingVal - } else { - blockExec.abciValidatorCache[cacheKey] = abciVal - } + blockExec.abciValidatorCache[cacheKey] = abciVal blockExec.abciValidatorCacheMutex.Unlock() // Cleanup old cache entries if needed (outside of lock to avoid deadlock) From 2833386b925b1b0c3956b504f3160e728b7838a1 Mon Sep 17 00:00:00 2001 From: Gengar Date: Fri, 31 Oct 2025 00:18:13 +0200 Subject: [PATCH 14/14] Refactor ABCI validator conversion logic Refactor ABCI validator conversion to use canonical helper for consistent field population. --- state/execution.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/state/execution.go b/state/execution.go index a864a9744bc..4ab87c3b2d1 100644 --- a/state/execution.go +++ b/state/execution.go @@ -654,11 +654,10 @@ func (blockExec *BlockExecutor) BuildLastCommitInfoWithCache(block *types.Block, blockExec.abciValidatorCacheMutex.RUnlock() if !found { - // Convert to ABCI validator - abciVal = abci.Validator{ - Address: val.Address, - Power: val.VotingPower, - } + // Convert to ABCI validator using the canonical helper to ensure + // all fields (e.g. PubKey) are populated identically to the + // non-cached path. + abciVal = types.TM2PB.Validator(val) // Store in cache blockExec.abciValidatorCacheMutex.Lock()