diff --git a/.github/workflows/claude-code-review.yml b/.github/workflows/claude-code-review.yml
deleted file mode 100644
index b5e8cfd4d..000000000
--- a/.github/workflows/claude-code-review.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: Claude Code Review
-
-on:
- pull_request:
- types: [opened, synchronize, ready_for_review, reopened]
- # Optional: Only run on specific file changes
- # paths:
- # - "src/**/*.ts"
- # - "src/**/*.tsx"
- # - "src/**/*.js"
- # - "src/**/*.jsx"
-
-jobs:
- claude-review:
- # Optional: Filter by PR author
- # if: |
- # github.event.pull_request.user.login == 'external-contributor' ||
- # github.event.pull_request.user.login == 'new-developer' ||
- # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR'
-
- runs-on: ubuntu-latest
- permissions:
- contents: read
- pull-requests: read
- issues: read
- id-token: write
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 1
-
- - name: Run Claude Code Review
- id: claude-review
- uses: anthropics/claude-code-action@v1
- with:
- claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
- plugin_marketplaces: 'https://github.com/anthropics/claude-code.git'
- plugins: 'code-review@claude-code-plugins'
- prompt: '/code-review:code-review ${{ github.repository }}/pull/${{ github.event.pull_request.number }}'
- # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
- # or https://code.claude.com/docs/en/cli-reference for available options
-
diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml
deleted file mode 100644
index d300267f1..000000000
--- a/.github/workflows/claude.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Claude Code
-
-on:
- issue_comment:
- types: [created]
- pull_request_review_comment:
- types: [created]
- issues:
- types: [opened, assigned]
- pull_request_review:
- types: [submitted]
-
-jobs:
- claude:
- if: |
- (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
- (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
- (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
- (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
- runs-on: ubuntu-latest
- permissions:
- contents: read
- pull-requests: read
- issues: read
- id-token: write
- actions: read # Required for Claude to read CI results on PRs
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 1
-
- - name: Run Claude Code
- id: claude
- uses: anthropics/claude-code-action@v1
- with:
- claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
-
- # This is an optional setting that allows Claude to read CI results on PRs
- additional_permissions: |
- actions: read
-
- # Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
- # prompt: 'Update the pull request description to include a summary of changes.'
-
- # Optional: Add claude_args to customize behavior and configuration
- # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
- # or https://code.claude.com/docs/en/cli-reference for available options
- # claude_args: '--allowed-tools Bash(gh pr:*)'
-
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index ae50182dd..2b24db077 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -16,7 +16,7 @@ permissions:
jobs:
units:
runs-on: "${{ github.repository_owner == 'erpc' && 'blacksmith-8vcpu-ubuntu-2404' || 'ubuntu-24.04' }}"
- timeout-minutes: 20
+ timeout-minutes: 30
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0
diff --git a/architecture/evm/eth_call.go b/architecture/evm/eth_call.go
index ac7e4d848..cf71c4c63 100644
--- a/architecture/evm/eth_call.go
+++ b/architecture/evm/eth_call.go
@@ -2,31 +2,204 @@ package evm
import (
"context"
+ "fmt"
+ "sync"
"github.com/erpc/erpc/common"
)
+// Global batcher manager for network-level Multicall3 batching
+var (
+ globalBatcherManager *BatcherManager
+ batcherManagerOnce sync.Once
+)
+
+// defaultMulticall3AggregationConfig is the default config when Multicall3Aggregation
+// is not explicitly configured. Enabled by default to match documented behavior.
+var defaultMulticall3AggregationConfig = func() *common.Multicall3AggregationConfig {
+ cfg := &common.Multicall3AggregationConfig{Enabled: true}
+ cfg.SetDefaults()
+ return cfg
+}()
+
+// GetBatcherManager returns the global batcher manager.
+func GetBatcherManager() *BatcherManager {
+ batcherManagerOnce.Do(func() {
+ globalBatcherManager = NewBatcherManager()
+ })
+ return globalBatcherManager
+}
+
+// ShutdownBatcherManager shuts down the global batcher manager.
+// Should be called during application shutdown.
+func ShutdownBatcherManager() {
+ if globalBatcherManager != nil {
+ globalBatcherManager.Shutdown()
+ }
+}
+
+// networkForwarder wraps a Network to implement Forwarder interface.
+type networkForwarder struct {
+ network common.Network
+}
+
+func (f *networkForwarder) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return f.network.Forward(ctx, req)
+}
+
+func (f *networkForwarder) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ cache := f.network.Cache()
+ if cache == nil || cache.IsObjectNull() {
+ return nil
+ }
+ return cache.Set(ctx, req, resp)
+}
+
+// projectPreForward_eth_call is the pre-forward hook for eth_call requests.
+// It handles Multicall3 batching when enabled, aggregating multiple eth_call requests
+// into a single Multicall3 call for improved throughput.
+//
+// Returns:
+// - handled: true if the request was handled (either batched or forwarded directly)
+// - response: the response if handled, nil otherwise
+// - error: any error that occurred
+//
+// The function will forward the request directly (bypassing batching) when:
+// - Multicall3 aggregation is disabled in config
+// - The request is not eligible for batching (has gas/value/from fields, etc.)
+// - The batcher queue is full or at capacity
+// - The request's deadline is too tight for batching
func projectPreForward_eth_call(ctx context.Context, network common.Network, nq *common.NormalizedRequest) (bool, *common.NormalizedResponse, error) {
jrq, err := nq.JsonRpcRequest()
if err != nil {
return false, nil, nil
}
+ // Normalize params: ensure block param is present
jrq.RLock()
- if len(jrq.Params) != 1 {
- jrq.RUnlock()
+ paramsLen := len(jrq.Params)
+ jrq.RUnlock()
+
+ if paramsLen == 0 {
return false, nil, nil
}
- jrq.RUnlock()
- // Some upstreams require the block number to be specified as a parameter.
- jrq.Lock()
- jrq.Params = []interface{}{
- jrq.Params[0],
- "latest",
+ // Add "latest" block param if missing (only 1 param)
+ if paramsLen == 1 {
+ jrq.Lock()
+ jrq.Params = append(jrq.Params, "latest")
+ jrq.Unlock()
+ }
+
+ // Get Multicall3 aggregation config, using defaults if not explicitly configured
+ cfg := network.Config()
+ var aggCfg *common.Multicall3AggregationConfig
+ if cfg != nil && cfg.Evm != nil && cfg.Evm.Multicall3Aggregation != nil {
+ aggCfg = cfg.Evm.Multicall3Aggregation
+ } else {
+ // Use default config (enabled by default)
+ aggCfg = defaultMulticall3AggregationConfig
}
- jrq.Unlock()
- resp, err := network.Forward(ctx, nq)
- return true, resp, err
+ // Check if Multicall3 aggregation is explicitly disabled
+ if !aggCfg.Enabled {
+ // Batching disabled, use normal forward
+ resp, err := network.Forward(ctx, nq)
+ return true, resp, err
+ }
+
+ // Check eligibility for batching
+ eligible, reason := IsEligibleForBatching(nq, aggCfg)
+ if !eligible {
+ // Not eligible, forward normally
+ if logger := network.Logger(); logger != nil {
+ logger.Debug().
+ Str("reason", reason).
+ Str("method", "eth_call").
+ Msg("request not eligible for multicall3 batching")
+ }
+ resp, err := network.Forward(ctx, nq)
+ return true, resp, err
+ }
+
+ // Extract call info for batching key
+ _, _, blockRef, err := ExtractCallInfo(nq)
+ if err != nil {
+ resp, err := network.Forward(ctx, nq)
+ return true, resp, err
+ }
+
+ // Build batching key
+ projectId := network.ProjectId()
+ if projectId == "" {
+ projectId = fmt.Sprintf("network:%s", network.Id())
+ }
+
+ userId := ""
+ if aggCfg.AllowCrossUserBatching == nil || !*aggCfg.AllowCrossUserBatching {
+ userId = nq.UserId()
+ }
+
+ key := BatchingKey{
+ ProjectId: projectId,
+ NetworkId: network.Id(),
+ BlockRef: blockRef,
+ DirectivesKey: DeriveDirectivesKey(nq.Directives()),
+ UserId: userId,
+ }
+
+ // Check cache before batching (unless skip-cache-read is set)
+ if !nq.SkipCacheRead() {
+ cache := network.Cache()
+ if cache != nil && !cache.IsObjectNull() {
+ cachedResp, cacheErr := cache.Get(ctx, nq)
+ if cacheErr != nil {
+ // Log cache errors but continue to batching
+ if logger := network.Logger(); logger != nil {
+ logger.Warn().
+ Err(cacheErr).
+ Str("networkId", network.Id()).
+ Msg("multicall3 pre-batch cache get failed, continuing to batch")
+ }
+ } else if cachedResp != nil && !cachedResp.IsObjectNull(ctx) {
+ // Cache hit - return cached response directly
+ cachedResp.SetFromCache(true)
+ return true, cachedResp, nil
+ }
+ }
+ }
+
+ // Get or create batcher for this project+network
+ mgr := GetBatcherManager()
+ forwarder := &networkForwarder{network: network}
+ batcher := mgr.GetOrCreate(projectId, network.Id(), aggCfg, forwarder, network.Logger())
+ if batcher == nil {
+ // Batching disabled, forward normally
+ resp, err := network.Forward(ctx, nq)
+ return true, resp, err
+ }
+
+ // Enqueue request
+ entry, bypass, err := batcher.Enqueue(ctx, key, nq)
+ if err != nil || bypass {
+ // Log enqueue errors for debugging (bypass without error is normal)
+ if err != nil && network.Logger() != nil {
+ network.Logger().Debug().
+ Err(err).
+ Str("projectId", projectId).
+ Str("networkId", network.Id()).
+ Msg("multicall3 enqueue failed, forwarding normally")
+ }
+ // Bypass batching, forward normally
+ resp, err := network.Forward(ctx, nq)
+ return true, resp, err
+ }
+
+ // Wait for batch result
+ select {
+ case result := <-entry.ResultCh:
+ return true, result.Response, result.Error
+ case <-ctx.Done():
+ return true, nil, ctx.Err()
+ }
}
diff --git a/architecture/evm/eth_call_test.go b/architecture/evm/eth_call_test.go
new file mode 100644
index 000000000..a3f294b28
--- /dev/null
+++ b/architecture/evm/eth_call_test.go
@@ -0,0 +1,306 @@
+package evm
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/util"
+ "github.com/rs/zerolog"
+ "github.com/stretchr/testify/require"
+)
+
+// mockNetworkForEthCall implements common.Network for testing eth_call pre-forward hook
+type mockNetworkForEthCall struct {
+ networkId string
+ projectId string
+ cfg *common.NetworkConfig
+ forwardFn func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error)
+ mu sync.Mutex
+ callCount int
+}
+
+func (m *mockNetworkForEthCall) Id() string { return m.networkId }
+func (m *mockNetworkForEthCall) Label() string { return m.networkId }
+func (m *mockNetworkForEthCall) ProjectId() string { return m.projectId }
+func (m *mockNetworkForEthCall) Architecture() common.NetworkArchitecture {
+ return common.ArchitectureEvm
+}
+func (m *mockNetworkForEthCall) Config() *common.NetworkConfig { return m.cfg }
+func (m *mockNetworkForEthCall) Logger() *zerolog.Logger { return nil }
+func (m *mockNetworkForEthCall) GetMethodMetrics(method string) common.TrackedMetrics { return nil }
+func (m *mockNetworkForEthCall) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ m.mu.Lock()
+ m.callCount++
+ m.mu.Unlock()
+ if m.forwardFn != nil {
+ return m.forwardFn(ctx, req)
+ }
+ return nil, nil
+}
+func (m *mockNetworkForEthCall) GetFinality(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) common.DataFinalityState {
+ return common.DataFinalityStateUnknown
+}
+func (m *mockNetworkForEthCall) EvmHighestLatestBlockNumber(ctx context.Context) int64 { return 0 }
+func (m *mockNetworkForEthCall) EvmHighestFinalizedBlockNumber(ctx context.Context) int64 { return 0 }
+func (m *mockNetworkForEthCall) EvmLeaderUpstream(ctx context.Context) common.Upstream { return nil }
+func (m *mockNetworkForEthCall) Cache() common.CacheDAL { return nil }
+
+func (m *mockNetworkForEthCall) GetCallCount() int {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.callCount
+}
+
+func TestProjectPreForward_eth_call_Batching(t *testing.T) {
+ // Create valid multicall response for 2 calls
+ // Each result: success=true with some return data
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xde, 0xad, 0xbe, 0xef}},
+ {Success: true, ReturnData: []byte{0xca, 0xfe, 0xba, 0xbe}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hexEncode(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ cfg := &common.NetworkConfig{
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 1,
+ Multicall3Aggregation: &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 20,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ },
+ },
+ }
+ cfg.Evm.Multicall3Aggregation.SetDefaults()
+
+ network := &mockNetworkForEthCall{
+ networkId: "evm:1",
+ projectId: "test-project",
+ cfg: cfg,
+ forwardFn: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return mockResp, nil
+ },
+ }
+
+ ctx := context.Background()
+
+ // Create two requests with different targets
+ // Use only 1 param initially (no block param) - the pre-forward hook should add "latest"
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+ req1.SetNetwork(network)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x2222222222222222222222222222222222222222",
+ "data": "0x05060708",
+ },
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+ req2.SetNetwork(network)
+
+ // Both should be batched into one call
+ var resp1, resp2 *common.NormalizedResponse
+ var err1, err2 error
+ done := make(chan struct{}, 2)
+
+ go func() {
+ _, resp1, err1 = projectPreForward_eth_call(ctx, network, req1)
+ done <- struct{}{}
+ }()
+
+ go func() {
+ _, resp2, err2 = projectPreForward_eth_call(ctx, network, req2)
+ done <- struct{}{}
+ }()
+
+ // Wait with timeout
+ for i := 0; i < 2; i++ {
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for batched requests")
+ }
+ }
+
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ require.NotNil(t, resp1)
+ require.NotNil(t, resp2)
+
+ // Should have been batched into ONE call
+ require.Equal(t, 1, network.GetCallCount(), "requests should be batched into one multicall")
+}
+
+func TestProjectPreForward_eth_call_NoBatching_Disabled(t *testing.T) {
+ // Test that requests are forwarded normally when batching is disabled
+ jrr, _ := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ cfg := &common.NetworkConfig{
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 1,
+ // Explicitly disable batching (nil config uses default which has Enabled: true)
+ Multicall3Aggregation: &common.Multicall3AggregationConfig{
+ Enabled: false,
+ },
+ },
+ }
+
+ network := &mockNetworkForEthCall{
+ networkId: "evm:1",
+ projectId: "test-project",
+ cfg: cfg,
+ forwardFn: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return mockResp, nil
+ },
+ }
+
+ ctx := context.Background()
+ // Use only 1 param - the pre-forward hook will add "latest" and forward
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ req.SetNetwork(network)
+
+ handled, resp, err := projectPreForward_eth_call(ctx, network, req)
+ require.True(t, handled)
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, 1, network.GetCallCount())
+}
+
+func TestProjectPreForward_eth_call_NoBatching_Ineligible(t *testing.T) {
+ // Test that ineligible requests are forwarded normally (e.g., with "from" field)
+ jrr, _ := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ cfg := &common.NetworkConfig{
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 1,
+ Multicall3Aggregation: &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 20,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ },
+ },
+ }
+ cfg.Evm.Multicall3Aggregation.SetDefaults()
+
+ network := &mockNetworkForEthCall{
+ networkId: "evm:1",
+ projectId: "test-project",
+ cfg: cfg,
+ forwardFn: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return mockResp, nil
+ },
+ }
+
+ ctx := context.Background()
+ // Request with "from" field is ineligible for batching
+ // Use only 1 param so the pre-forward hook processes it
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ "from": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ },
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ req.SetNetwork(network)
+
+ handled, resp, err := projectPreForward_eth_call(ctx, network, req)
+ require.True(t, handled)
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, 1, network.GetCallCount())
+}
+
+func TestProjectPreForward_eth_call_AddsBlockParam(t *testing.T) {
+ // Test that missing block param is added as "latest"
+ jrr, _ := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ var capturedReq *common.NormalizedRequest
+ cfg := &common.NetworkConfig{
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 1,
+ // Explicitly disable batching to test block param normalization
+ // (nil config uses default which has Enabled: true)
+ Multicall3Aggregation: &common.Multicall3AggregationConfig{
+ Enabled: false,
+ },
+ },
+ }
+
+ network := &mockNetworkForEthCall{
+ networkId: "evm:1",
+ projectId: "test-project",
+ cfg: cfg,
+ forwardFn: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ capturedReq = req
+ return mockResp, nil
+ },
+ }
+
+ ctx := context.Background()
+ // Request with only 1 param (no block param)
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ req.SetNetwork(network)
+
+ handled, _, err := projectPreForward_eth_call(ctx, network, req)
+ require.True(t, handled)
+ require.NoError(t, err)
+
+ // Verify block param was added
+ capturedJrq, err := capturedReq.JsonRpcRequest()
+ require.NoError(t, err)
+ require.Len(t, capturedJrq.Params, 2)
+ require.Equal(t, "latest", capturedJrq.Params[1])
+}
+
+// hexEncode is a helper to encode bytes as hex string
+func hexEncode(b []byte) string {
+ const hexChars = "0123456789abcdef"
+ dst := make([]byte, len(b)*2)
+ for i, v := range b {
+ dst[i*2] = hexChars[v>>4]
+ dst[i*2+1] = hexChars[v&0x0f]
+ }
+ return string(dst)
+}
diff --git a/architecture/evm/eth_getBlockByNumber_test.go b/architecture/evm/eth_getBlockByNumber_test.go
index 8e5559409..685dffc4a 100644
--- a/architecture/evm/eth_getBlockByNumber_test.go
+++ b/architecture/evm/eth_getBlockByNumber_test.go
@@ -70,6 +70,10 @@ func (t *testNetwork) GetFinality(ctx context.Context, req *common.NormalizedReq
return common.DataFinalityStateFinalized
}
+func (t *testNetwork) Cache() common.CacheDAL {
+ return nil
+}
+
func TestEnforceNonNullTaggedBlocks(t *testing.T) {
t.Run("TaggedBlockWithEnforcementDisabled_ReturnsNull", func(t *testing.T) {
// Create a request with a block tag ("pending") and directive disabled
diff --git a/architecture/evm/multicall3.go b/architecture/evm/multicall3.go
new file mode 100644
index 000000000..39dba73e9
--- /dev/null
+++ b/architecture/evm/multicall3.go
@@ -0,0 +1,451 @@
+// Package evm includes Multicall3 helpers for aggregating eth_call batches.
+// Multicall3 aggregate3((address,bool,bytes)[]) expects ABI-encoded calls and
+// returns a dynamic array of (bool success, bytes returnData) with offsets
+// relative to the array head. This file encodes calldata and decodes results.
+package evm
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/util"
+ "golang.org/x/crypto/sha3"
+)
+
+// multicall3RequestCounter provides unique IDs for multicall3 requests to prevent collisions
+var multicall3RequestCounter uint64
+
+const multicall3Address = "0xcA11bde05977b3631167028862bE2a173976CA11"
+
+var ErrMulticall3BatchNotEligible = errors.New("multicall3 batch not eligible")
+
+// safeUint64ToInt converts uint64 to int with overflow protection.
+// Returns an error if the value would overflow on the current platform.
+func safeUint64ToInt(v uint64) (int, error) {
+ if v > uint64(math.MaxInt) {
+ return 0, fmt.Errorf("integer overflow: %d exceeds max int", v)
+ }
+ return int(v), nil
+}
+
+const (
+ abiWordSize = 32
+ aggregate3ElementHeadLen = 3 * abiWordSize // address + allowFailure + data offset
+ evmAddressLength = 20
+)
+
+type Multicall3Call struct {
+ Request *common.NormalizedRequest
+ Target []byte
+ CallData []byte
+}
+
+type Multicall3Result struct {
+ Success bool
+ ReturnData []byte
+}
+
+func NewMulticall3Call(req *common.NormalizedRequest, targetHex, dataHex string) (Multicall3Call, error) {
+ if req == nil {
+ return Multicall3Call{}, ErrMulticall3BatchNotEligible
+ }
+ targetBytes, err := common.HexToBytes(targetHex)
+ if err != nil || len(targetBytes) != evmAddressLength {
+ return Multicall3Call{}, ErrMulticall3BatchNotEligible
+ }
+ callData, err := common.HexToBytes(dataHex)
+ if err != nil {
+ return Multicall3Call{}, ErrMulticall3BatchNotEligible
+ }
+ return Multicall3Call{
+ Request: req,
+ Target: targetBytes,
+ CallData: callData,
+ }, nil
+}
+
+func NormalizeBlockParam(param interface{}) (string, error) {
+ if param == nil {
+ return "latest", nil
+ }
+
+ blockNumberStr, blockHash, err := util.ParseBlockParameter(param)
+ if err != nil {
+ return "", err
+ }
+ if blockHash != nil {
+ return fmt.Sprintf("0x%x", blockHash), nil
+ }
+ if blockNumberStr == "" {
+ return "", errors.New("block parameter is empty")
+ }
+ if strings.HasPrefix(blockNumberStr, "0x") {
+ bn, err := common.HexToInt64(blockNumberStr)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%d", bn), nil
+ }
+ return blockNumberStr, nil
+}
+
+func BuildMulticall3Request(requests []*common.NormalizedRequest, blockParam interface{}) (*common.NormalizedRequest, []Multicall3Call, error) {
+ if len(requests) < 1 {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+
+ if blockParam == nil {
+ blockParam = "latest"
+ }
+
+ calls := make([]Multicall3Call, 0, len(requests))
+ for _, req := range requests {
+ if req == nil {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+
+ jrq, err := req.JsonRpcRequest()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ jrq.RLock()
+ method := jrq.Method
+ params := jrq.Params
+ jrq.RUnlock()
+
+ if !strings.EqualFold(method, "eth_call") {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+ if len(params) < 1 || len(params) > 2 {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+
+ callObj, ok := params[0].(map[string]interface{})
+ if !ok {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+
+ targetHex, ok := callObj["to"].(string)
+ if !ok || targetHex == "" {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+
+ dataHex := "0x"
+ if dataValue, ok := callObj["data"]; ok {
+ dataStr, ok := dataValue.(string)
+ if !ok {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+ dataHex = dataStr
+ } else if inputValue, ok := callObj["input"]; ok {
+ inputStr, ok := inputValue.(string)
+ if !ok {
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+ dataHex = inputStr
+ }
+
+ for key := range callObj {
+ switch key {
+ case "to", "data", "input":
+ continue
+ default:
+ return nil, nil, ErrMulticall3BatchNotEligible
+ }
+ }
+
+ call, err := NewMulticall3Call(req, targetHex, dataHex)
+ if err != nil {
+ return nil, nil, err
+ }
+ calls = append(calls, call)
+ }
+
+ encodedCalls, err := encodeAggregate3Calls(calls)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ callObj := map[string]interface{}{
+ "to": multicall3Address,
+ "data": "0x" + hex.EncodeToString(encodedCalls),
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{callObj, blockParam})
+ // Use atomic counter combined with timestamp for guaranteed unique IDs
+ counter := atomic.AddUint64(&multicall3RequestCounter, 1)
+ jrq.ID = fmt.Sprintf("multicall3-%d-%d", time.Now().UnixNano(), counter)
+
+ nrq := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ nrq.CopyHttpContextFrom(requests[0])
+ if dirs := requests[0].Directives(); dirs != nil {
+ nrq.SetDirectives(dirs.Clone())
+ }
+
+ return nrq, calls, nil
+}
+
+func DecodeMulticall3Aggregate3Result(data []byte) ([]Multicall3Result, error) {
+ if len(data) < 32 {
+ return nil, errors.New("multicall3 result too short")
+ }
+
+ offset, err := readUint256(data[:32])
+ if err != nil {
+ return nil, err
+ }
+ base, err := safeUint64ToInt(offset)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 result offset overflow: %w", err)
+ }
+ if base < 0 || base+32 > len(data) {
+ return nil, errors.New("multicall3 result offset out of bounds")
+ }
+
+ count, err := readUint256(data[base : base+32])
+ if err != nil {
+ return nil, err
+ }
+ if count == 0 {
+ return []Multicall3Result{}, nil
+ }
+
+ countInt, err := safeUint64ToInt(count)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 result count overflow: %w", err)
+ }
+
+ offsetsStart := base + 32
+ // Check bounds before multiplication to prevent integer overflow
+ // Each element needs 32 bytes in the offset table
+ maxElements := (len(data) - offsetsStart) / 32
+ if countInt > maxElements {
+ return nil, errors.New("multicall3 result count exceeds available data")
+ }
+ offsetsEnd := offsetsStart + countInt*32
+ if offsetsEnd > len(data) {
+ return nil, errors.New("multicall3 result offsets out of bounds")
+ }
+
+ results := make([]Multicall3Result, countInt)
+ for i := 0; i < countInt; i++ {
+ offsetStart := offsetsStart + i*32
+ offsetVal, err := readUint256(data[offsetStart : offsetStart+32])
+ if err != nil {
+ return nil, err
+ }
+ // Element offsets are relative to where the offset table starts (after length word)
+ offsetValInt, err := safeUint64ToInt(offsetVal)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 result element offset overflow: %w", err)
+ }
+ elemStart := offsetsStart + offsetValInt
+ if elemStart < offsetsStart || elemStart+64 > len(data) {
+ return nil, errors.New("multicall3 result element out of bounds")
+ }
+
+ success, err := readBool(data[elemStart : elemStart+32])
+ if err != nil {
+ return nil, err
+ }
+
+ dataOffset, err := readUint256(data[elemStart+32 : elemStart+64])
+ if err != nil {
+ return nil, err
+ }
+ dataOffsetInt, err := safeUint64ToInt(dataOffset)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 result data offset overflow: %w", err)
+ }
+ bytesStart := elemStart + dataOffsetInt
+ if bytesStart < elemStart || bytesStart+32 > len(data) {
+ return nil, errors.New("multicall3 result bytes offset out of bounds")
+ }
+
+ dataLen, err := readUint256(data[bytesStart : bytesStart+32])
+ if err != nil {
+ return nil, err
+ }
+ dataLenInt, err := safeUint64ToInt(dataLen)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 result data length overflow: %w", err)
+ }
+ dataStart := bytesStart + 32
+ dataEnd := dataStart + dataLenInt
+ if dataStart < bytesStart || dataEnd > len(data) {
+ return nil, errors.New("multicall3 result bytes length out of bounds")
+ }
+
+ returnData := append([]byte(nil), data[dataStart:dataEnd]...)
+ results[i] = Multicall3Result{
+ Success: success,
+ ReturnData: returnData,
+ }
+ }
+
+ return results, nil
+}
+
+// ShouldFallbackMulticall3 determines if an error should trigger fallback to individual requests.
+// Returns true only when the multicall3 contract is unavailable (unsupported endpoint) or when
+// there are specific execution errors indicating the contract doesn't exist on this network.
+// Other execution errors (like reverts) should NOT trigger fallback as they would also fail individually.
+func ShouldFallbackMulticall3(err error) bool {
+ if err == nil {
+ return false
+ }
+ // Always fallback if endpoint is unsupported (e.g., network doesn't support eth_call)
+ if common.HasErrorCode(err, common.ErrCodeEndpointUnsupported) {
+ return true
+ }
+ // For execution errors, only fallback if it indicates multicall3 contract unavailability
+ if common.HasErrorCode(err, common.ErrCodeEndpointExecutionException) {
+ errStr := strings.ToLower(err.Error())
+ // Check for indicators that the multicall3 contract doesn't exist.
+ // Different providers use different error messages, so we match multiple patterns.
+ // NOTE: We intentionally do NOT include "execution reverted" as that pattern is too
+ // broad and would match legitimate contract reverts. Legitimate reverts should NOT
+ // trigger fallback - they would also revert when called individually.
+ contractUnavailablePatterns := []string{
+ "contract not found",
+ "no code at address",
+ "code is empty",
+ "not a contract",
+ "invalid opcode", // can indicate missing contract
+ "missing trie node", // pre-deployment block query
+ "does not exist",
+ "account not found",
+ }
+ for _, pattern := range contractUnavailablePatterns {
+ if strings.Contains(errStr, pattern) {
+ return true
+ }
+ }
+ // Other execution errors (like authentication, rate limits, etc.) should not fallback
+ return false
+ }
+ return false
+}
+
+func encodeAggregate3Calls(calls []Multicall3Call) ([]byte, error) {
+ arrayData, err := encodeAggregate3Array(calls)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make([]byte, 0, 4+abiWordSize+len(arrayData))
+ out = append(out, multicall3Aggregate3Selector...)
+ out = append(out, encodeUint64(abiWordSize)...)
+ out = append(out, arrayData...)
+ return out, nil
+}
+
+func encodeAggregate3Array(calls []Multicall3Call) ([]byte, error) {
+ // ABI array: length (1 word) + offsets (1 word each) + element data.
+ // Offsets are relative to start of array data (right after length word),
+ // so the first element starts at offset = N*32 (after the N offset words).
+ offsetTableSize := abiWordSize * len(calls)
+ elements := make([][]byte, len(calls))
+ offsets := make([]uint64, len(calls))
+ // offsetTableSize is derived from len(calls) which is bounded by int,
+ // so this conversion to uint64 is safe (always non-negative).
+ cur := uint64(offsetTableSize) // #nosec G115
+
+ for i, call := range calls {
+ elem := encodeAggregate3Element(call)
+ elements[i] = elem
+ offsets[i] = cur
+ cur += uint64(len(elem))
+ }
+
+ // cur accumulates sizes of in-memory slices, so it must fit in int.
+ // Add explicit check for safety.
+ capacity, err := safeUint64ToInt(cur)
+ if err != nil {
+ return nil, fmt.Errorf("multicall3 encoded data too large: %w", err)
+ }
+ out := make([]byte, 0, capacity)
+ out = append(out, encodeUint64(uint64(len(calls)))...)
+ for _, off := range offsets {
+ out = append(out, encodeUint64(off)...)
+ }
+ for _, elem := range elements {
+ out = append(out, elem...)
+ }
+ return out, nil
+}
+
+func encodeAggregate3Element(call Multicall3Call) []byte {
+ head := make([]byte, 0, aggregate3ElementHeadLen)
+ head = append(head, encodeAddress(call.Target)...)
+ head = append(head, encodeBool(true)...)
+ head = append(head, encodeUint64(aggregate3ElementHeadLen)...)
+ tail := encodeBytes(call.CallData)
+ return append(head, tail...)
+}
+
+func encodeAddress(addr []byte) []byte {
+ out := make([]byte, 32)
+ copy(out[32-len(addr):], addr)
+ return out
+}
+
+func encodeBool(value bool) []byte {
+ out := make([]byte, 32)
+ if value {
+ out[31] = 1
+ }
+ return out
+}
+
+func encodeUint64(value uint64) []byte {
+ out := make([]byte, 32)
+ binary.BigEndian.PutUint64(out[24:], value)
+ return out
+}
+
+func encodeBytes(data []byte) []byte {
+ out := make([]byte, 0, abiWordSize+len(data)+abiWordSize)
+ out = append(out, encodeUint64(uint64(len(data)))...)
+ out = append(out, data...)
+ pad := (abiWordSize - (len(data) % abiWordSize)) % abiWordSize
+ if pad > 0 {
+ out = append(out, make([]byte, pad)...)
+ }
+ return out
+}
+
+func readUint256(data []byte) (uint64, error) {
+ if len(data) != 32 {
+ return 0, errors.New("invalid uint256 length")
+ }
+ val := new(big.Int).SetBytes(data)
+ if !val.IsUint64() {
+ return 0, errors.New("uint256 overflows uint64")
+ }
+ return val.Uint64(), nil
+}
+
+func readBool(data []byte) (bool, error) {
+ val, err := readUint256(data)
+ if err != nil {
+ return false, err
+ }
+ return val != 0, nil
+}
+
+var multicall3Aggregate3Selector = func() []byte {
+ hasher := sha3.NewLegacyKeccak256()
+ hasher.Write([]byte("aggregate3((address,bool,bytes)[])"))
+ sum := hasher.Sum(nil)
+ return sum[:4]
+}()
diff --git a/architecture/evm/multicall3_batcher.go b/architecture/evm/multicall3_batcher.go
new file mode 100644
index 000000000..7eff15640
--- /dev/null
+++ b/architecture/evm/multicall3_batcher.go
@@ -0,0 +1,1253 @@
+package evm
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "runtime/debug"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/telemetry"
+ "github.com/rs/zerolog"
+)
+
+// stopAndDrainTimer safely stops a timer and drains its channel if needed.
+// This pattern is required because timer.Stop() returns false if the timer already fired,
+// and in that case the channel must be drained to avoid goroutine leaks.
+func stopAndDrainTimer(timer *time.Timer) {
+ if !timer.Stop() {
+ select {
+ case <-timer.C:
+ default:
+ }
+ }
+}
+
+// Forwarder is the interface for forwarding requests through the network layer.
+type Forwarder interface {
+ Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error)
+ // SetCache writes a response to the cache for a request.
+ // Returns nil if caching is disabled or not available.
+ SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error
+}
+
+// Batcher aggregates eth_call requests into Multicall3 batches.
+type Batcher struct {
+ cfg *common.Multicall3AggregationConfig
+ forwarder Forwarder
+ logger *zerolog.Logger
+ batches map[string]*Batch // keyed by BatchingKey.String()
+ mu sync.RWMutex
+ queueSize int64 // counter for backpressure
+ shutdown chan struct{}
+ shutdownOnce sync.Once
+ wg sync.WaitGroup
+
+ // runtimeBypass holds contracts detected at runtime that revert when called via Multicall3
+ // but succeed when called individually. This in-memory cache resets on process restart.
+ // For persistent bypass configuration, use the BypassContracts config field.
+ // Note: This map can grow without bound; in practice this is limited by the number of
+ // unique contracts that fail via multicall3 but succeed individually (typically few).
+ // Protected by runtimeBypassMu.
+ runtimeBypass map[string]bool
+ runtimeBypassMu sync.RWMutex
+}
+
+// NewBatcher creates a new Multicall3 batcher.
+// Returns nil if cfg is nil or disabled - callers should check the return value.
+// The logger parameter is optional (can be nil) - if nil, debug logging is disabled.
+// Panics if forwarder is nil (programming error - caller must provide a valid forwarder).
+func NewBatcher(cfg *common.Multicall3AggregationConfig, forwarder Forwarder, logger *zerolog.Logger) *Batcher {
+ if cfg == nil || !cfg.Enabled {
+ return nil
+ }
+ if forwarder == nil {
+ panic("NewBatcher: forwarder cannot be nil")
+ }
+ b := &Batcher{
+ cfg: cfg,
+ forwarder: forwarder,
+ logger: logger,
+ batches: make(map[string]*Batch),
+ shutdown: make(chan struct{}),
+ runtimeBypass: make(map[string]bool),
+ }
+ return b
+}
+
+// logBypass logs a debug message when a request bypasses batching.
+// Does nothing if logger is nil.
+func (b *Batcher) logBypass(key BatchingKey, reason string) {
+ if b.logger == nil {
+ return
+ }
+ b.logger.Debug().
+ Str("reason", reason).
+ Str("projectId", key.ProjectId).
+ Str("networkId", key.NetworkId).
+ Str("blockRef", key.BlockRef).
+ Msg("request bypassing multicall3 batching")
+}
+
+// isRuntimeBypassed checks if a contract address is in the runtime bypass cache.
+// The address should be lowercase hex without 0x prefix.
+func (b *Batcher) isRuntimeBypassed(addrHex string) bool {
+ b.runtimeBypassMu.RLock()
+ defer b.runtimeBypassMu.RUnlock()
+ return b.runtimeBypass[addrHex]
+}
+
+// addRuntimeBypass adds a contract address to the runtime bypass cache.
+// The address should be lowercase hex without 0x prefix.
+func (b *Batcher) addRuntimeBypass(addrHex string, projectId, networkId string) {
+ b.runtimeBypassMu.Lock()
+ defer b.runtimeBypassMu.Unlock()
+ if !b.runtimeBypass[addrHex] {
+ b.runtimeBypass[addrHex] = true
+ telemetry.MetricMulticall3RuntimeBypassTotal.WithLabelValues(projectId, networkId).Inc()
+ if b.logger != nil {
+ b.logger.Info().
+ Str("contract", "0x"+addrHex).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Msg("auto-detected contract that reverts via multicall3, added to runtime bypass")
+ }
+ }
+}
+
+// IsRuntimeBypassed checks if a contract address should bypass batching due to runtime detection.
+// This is a public method for external callers (e.g., tests, diagnostics) to query the runtime
+// bypass cache. The internal Enqueue method uses isRuntimeBypassed for actual bypass checks.
+// The address can be provided with or without 0x prefix, and is case-insensitive.
+func (b *Batcher) IsRuntimeBypassed(targetHex string) bool {
+ if targetHex == "" {
+ return false
+ }
+ // Normalize: lowercase and remove 0x/0X prefix
+ normalized := strings.ToLower(targetHex)
+ normalized = strings.TrimPrefix(normalized, "0x")
+ return b.isRuntimeBypassed(normalized)
+}
+
+// Enqueue adds a request to a batch. Returns:
+// - entry: the batch entry (nil if bypass)
+// - bypass: true if request should be forwarded individually
+// - error: any error during processing
+func (b *Batcher) Enqueue(ctx context.Context, key BatchingKey, req *common.NormalizedRequest) (*BatchEntry, bool, error) {
+ // Validate batching key
+ if err := key.Validate(); err != nil {
+ b.logBypass(key, fmt.Sprintf("invalid_key: %v", err))
+ return nil, true, err
+ }
+
+ // Extract call info
+ target, callData, _, err := ExtractCallInfo(req)
+ if err != nil {
+ b.logBypass(key, fmt.Sprintf("extract_call_info_error: %v", err))
+ return nil, true, err
+ }
+
+ // Validate target address length (must be 20 bytes for EVM)
+ if len(target) != 20 {
+ err := fmt.Errorf("invalid target address length: got %d, expected 20", len(target))
+ b.logBypass(key, fmt.Sprintf("invalid_target: %v", err))
+ return nil, true, err
+ }
+
+ // Check runtime bypass cache (contracts detected as reverting via multicall3)
+ targetHex := strings.ToLower(hex.EncodeToString(target))
+ if b.isRuntimeBypassed(targetHex) {
+ b.logBypass(key, "runtime_bypass_detected")
+ return nil, true, nil
+ }
+
+ // Derive call key for deduplication
+ callKey, err := DeriveCallKey(req)
+ if err != nil {
+ b.logBypass(key, fmt.Sprintf("derive_call_key_error: %v", err))
+ return nil, true, err
+ }
+
+ // Get deadline from context (if any)
+ // We don't create a synthetic deadline for no-timeout requests to avoid
+ // causing unnecessary timeouts on the upstream multicall call.
+ now := time.Now()
+ deadline, hasDeadline := ctx.Deadline()
+
+ // Check if deadline is too tight (only if there's a deadline)
+ minWait := time.Duration(b.cfg.MinWaitMs) * time.Millisecond
+ if hasDeadline && deadline.Before(now.Add(minWait)) {
+ telemetry.MetricMulticall3QueueOverflowTotal.WithLabelValues(key.ProjectId, key.NetworkId, "deadline_too_tight").Inc()
+ b.logBypass(key, "deadline_too_tight")
+ return nil, true, nil
+ }
+
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Check caps
+ if b.queueSize >= int64(b.cfg.MaxQueueSize) {
+ telemetry.MetricMulticall3QueueOverflowTotal.WithLabelValues(key.ProjectId, key.NetworkId, "queue_full").Inc()
+ b.logBypass(key, "queue_full")
+ return nil, true, nil
+ }
+ if len(b.batches) >= b.cfg.MaxPendingBatches {
+ // Check if this is a new batch key
+ if _, exists := b.batches[key.String()]; !exists {
+ telemetry.MetricMulticall3QueueOverflowTotal.WithLabelValues(key.ProjectId, key.NetworkId, "max_batches").Inc()
+ b.logBypass(key, "max_pending_batches")
+ return nil, true, nil
+ }
+ }
+
+ // Get or create batch
+ keyStr := key.String()
+ batch, exists := b.batches[keyStr]
+ if !exists {
+ // If OnlyIfPending is true, bypass batching when no batch is pending
+ if b.cfg.OnlyIfPending {
+ b.logBypass(key, "only_if_pending_no_batch")
+ return nil, true, nil
+ }
+ flushTime := now.Add(time.Duration(b.cfg.WindowMs) * time.Millisecond)
+ batch = NewBatch(key, flushTime)
+ b.batches[keyStr] = batch
+
+ // Start flush timer
+ b.wg.Add(1)
+ go b.scheduleFlush(keyStr, batch)
+ }
+
+ // Check if batch is flushing - create new batch if so
+ batch.mu.Lock()
+ if batch.Flushing {
+ batch.mu.Unlock()
+ // If OnlyIfPending is true, bypass batching since the current batch is flushing
+ // and we'd need to create a new one
+ if b.cfg.OnlyIfPending {
+ b.logBypass(key, "only_if_pending_batch_flushing")
+ return nil, true, nil
+ }
+ // Create new batch for this key
+ flushTime := now.Add(time.Duration(b.cfg.WindowMs) * time.Millisecond)
+ batch = NewBatch(key, flushTime)
+ b.batches[keyStr] = batch
+
+ b.wg.Add(1)
+ go b.scheduleFlush(keyStr, batch)
+
+ batch.mu.Lock()
+ }
+
+ // Check if batch is at capacity (unique calls, not entries)
+ uniqueCalls := len(batch.CallKeys)
+ if _, isDupe := batch.CallKeys[callKey]; !isDupe {
+ if uniqueCalls >= b.cfg.MaxCalls {
+ batch.mu.Unlock()
+ b.logBypass(key, "batch_full")
+ return nil, true, nil
+ }
+ }
+
+ // Check calldata size cap
+ currentSize := 0
+ for _, entries := range batch.CallKeys {
+ if len(entries) > 0 {
+ currentSize += len(entries[0].CallData)
+ }
+ }
+ if _, isDupe := batch.CallKeys[callKey]; !isDupe {
+ if currentSize+len(callData) > b.cfg.MaxCalldataBytes {
+ batch.mu.Unlock()
+ b.logBypass(key, "calldata_too_large")
+ return nil, true, nil
+ }
+ }
+
+ // Create entry
+ entry := &BatchEntry{
+ Ctx: ctx,
+ Request: req,
+ CallKey: callKey,
+ Target: target,
+ CallData: callData,
+ ResultCh: make(chan BatchResult, 1),
+ CreatedAt: now,
+ Deadline: deadline,
+ }
+
+ // Add to batch
+ batch.Entries = append(batch.Entries, entry)
+ batch.CallKeys[callKey] = append(batch.CallKeys[callKey], entry)
+
+ // Update flush time based on deadline (deadline-aware)
+ // Only update if the request has a deadline - requests without deadlines
+ // should not cause early flushes.
+ if hasDeadline {
+ safetyMargin := time.Duration(b.cfg.SafetyMarginMs) * time.Millisecond
+ proposedFlush := deadline.Add(-safetyMargin)
+ if proposedFlush.Before(batch.FlushTime) {
+ batch.FlushTime = proposedFlush
+ // Clamp to minimum wait
+ minFlush := now.Add(minWait)
+ if batch.FlushTime.Before(minFlush) {
+ batch.FlushTime = minFlush
+ }
+ // Notify the flush goroutine that FlushTime was shortened
+ select {
+ case batch.notifyCh <- struct{}{}:
+ default:
+ // Already has a pending notification
+ }
+ }
+ }
+
+ batch.mu.Unlock()
+ b.queueSize++
+ telemetry.MetricMulticall3QueueLen.WithLabelValues(key.ProjectId, key.NetworkId).Inc()
+
+ return entry, false, nil
+}
+
+// scheduleFlush waits until flush time and then flushes the batch.
+func (b *Batcher) scheduleFlush(keyStr string, batch *Batch) {
+ defer b.wg.Done()
+ defer func() {
+ if r := recover(); r != nil {
+ // Record metric regardless of logger availability
+ telemetry.MetricMulticall3PanicTotal.WithLabelValues(batch.Key.ProjectId, batch.Key.NetworkId, "scheduleFlush").Inc()
+
+ // Log panic with stack trace if logger available
+ if b.logger != nil {
+ b.logger.Error().
+ Str("panic", fmt.Sprintf("%v", r)).
+ Str("stack", string(debug.Stack())).
+ Str("batchKey", keyStr).
+ Msg("panic in scheduleFlush goroutine")
+ }
+ // Deliver error to all entries in the batch
+ batch.mu.Lock()
+ entries := batch.Entries
+ batch.mu.Unlock()
+ panicErr := common.NewErrJsonRpcExceptionInternal(
+ 0,
+ common.JsonRpcErrorServerSideException,
+ fmt.Sprintf("internal error: panic in batch scheduler: %v", r),
+ nil,
+ nil,
+ )
+ b.deliverError(entries, panicErr, batch.Key.ProjectId, batch.Key.NetworkId)
+ }
+ }()
+
+ for {
+ batch.mu.Lock()
+ flushTime := batch.FlushTime
+ batch.mu.Unlock()
+
+ waitDuration := time.Until(flushTime)
+ if waitDuration <= 0 {
+ b.flush(keyStr, batch)
+ return
+ }
+
+ timer := time.NewTimer(waitDuration)
+ select {
+ case <-timer.C:
+ b.flush(keyStr, batch)
+ return
+ case <-batch.notifyCh:
+ // FlushTime was shortened, stop current timer and recalculate
+ stopAndDrainTimer(timer)
+ continue
+ case <-b.shutdown:
+ stopAndDrainTimer(timer)
+ // On shutdown, flush the batch with error to avoid orphaned entries
+ b.flushWithShutdownError(keyStr, batch)
+ return
+ }
+ }
+}
+
+// flush processes a batch and delivers results.
+func (b *Batcher) flush(keyStr string, batch *Batch) {
+ batch.mu.Lock()
+ if batch.Flushing {
+ batch.mu.Unlock()
+ return
+ }
+ batch.Flushing = true
+ entries := batch.Entries
+ callKeys := batch.CallKeys
+ batch.mu.Unlock()
+
+ // Remove from active batches
+ b.mu.Lock()
+ if b.batches[keyStr] == batch {
+ delete(b.batches, keyStr)
+ }
+ // Defensive: ensure queueSize doesn't go negative
+ entriesLen := int64(len(entries))
+ if b.queueSize >= entriesLen {
+ b.queueSize -= entriesLen
+ } else {
+ b.queueSize = 0
+ }
+ b.mu.Unlock()
+
+ // Decrement queue length metric
+ telemetry.MetricMulticall3QueueLen.WithLabelValues(batch.Key.ProjectId, batch.Key.NetworkId).Sub(float64(len(entries)))
+
+ if len(entries) == 0 {
+ return
+ }
+
+ // Capture flush time for wait time calculations
+ flushTime := time.Now()
+
+ // Build ordered unique calls list by iterating entries slice (which preserves insertion order)
+ // and deduplicating based on CallKey. This ensures deterministic ordering.
+ type uniqueCall struct {
+ callKey string
+ entry *BatchEntry // first entry for this callKey
+ }
+ seenCallKeys := make(map[string]bool)
+ uniqueCalls := make([]uniqueCall, 0, len(callKeys))
+
+ for _, entry := range entries {
+ if !seenCallKeys[entry.CallKey] {
+ seenCallKeys[entry.CallKey] = true
+ uniqueCalls = append(uniqueCalls, uniqueCall{
+ callKey: entry.CallKey,
+ entry: entry,
+ })
+ }
+ }
+
+ // Emit batching metrics
+ projectId := batch.Key.ProjectId
+ networkId := batch.Key.NetworkId
+
+ // Record batch size (unique calls)
+ telemetry.MetricMulticall3BatchSize.WithLabelValues(projectId, networkId).Observe(float64(len(uniqueCalls)))
+
+ // Record wait time for each entry
+ for _, entry := range entries {
+ waitMs := float64(flushTime.Sub(entry.CreatedAt).Milliseconds())
+ telemetry.MetricMulticall3BatchWaitMs.WithLabelValues(projectId, networkId).Observe(waitMs)
+ }
+
+ // Record dedupe count if there were duplicates
+ totalEntries := len(entries)
+ uniqueCount := len(uniqueCalls)
+ if totalEntries > uniqueCount {
+ dedupeCount := totalEntries - uniqueCount
+ telemetry.MetricMulticall3DedupeTotal.WithLabelValues(projectId, networkId).Add(float64(dedupeCount))
+ }
+
+ // Build requests for BuildMulticall3Request
+ requests := make([]*common.NormalizedRequest, len(uniqueCalls))
+ for i, uc := range uniqueCalls {
+ requests[i] = uc.entry.Request
+ }
+
+ // Build the multicall3 request
+ blockParam, err := blockParamForMulticall(batch.Key.BlockRef)
+ if err != nil {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, networkId, "invalid_block_param").Inc()
+ b.deliverError(entries, err, projectId, networkId)
+ return
+ }
+ mcReq, _, err := BuildMulticall3Request(requests, blockParam)
+ if err != nil {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, networkId, "build_failed").Inc()
+ b.deliverError(entries, err, projectId, networkId)
+ return
+ }
+
+ // Mark request as composite type for metrics/tracing
+ mcReq.SetCompositeType(common.CompositeTypeMulticall3)
+
+ // Create a context with the earliest deadline from all entries.
+ // We don't use a single entry's context to avoid canceling the whole batch
+ // if one entry's context is canceled.
+ var earliestDeadline time.Time
+ for _, entry := range entries {
+ if entry.Deadline.IsZero() {
+ continue
+ }
+ if earliestDeadline.IsZero() || entry.Deadline.Before(earliestDeadline) {
+ earliestDeadline = entry.Deadline
+ }
+ }
+ var ctx context.Context
+ var cancel context.CancelFunc
+ if !earliestDeadline.IsZero() {
+ ctx, cancel = context.WithDeadline(context.Background(), earliestDeadline)
+ defer cancel()
+ } else {
+ ctx = context.Background()
+ }
+
+ // Forward the multicall request
+ mcResp, err := b.forwarder.Forward(ctx, mcReq)
+ if err != nil {
+ // Check if we should fallback to individual requests
+ if ShouldFallbackMulticall3(err) {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, networkId, "forward_error").Inc()
+ b.fallbackIndividual(entries, projectId, networkId)
+ return
+ }
+ // Wrap context errors with batching context for better debugging (after fallback check)
+ if ctx.Err() != nil {
+ err = fmt.Errorf("multicall3 batch forward failed (batch size: %d): %w", len(uniqueCalls), err)
+ }
+ b.deliverError(entries, err, projectId, networkId)
+ return
+ }
+
+ // Decode the multicall response
+ results, err := b.decodeMulticallResponse(mcResp)
+ if err != nil {
+ // Release the multicall response before fallback/error
+ mcResp.Release()
+ // Check if we should fallback to individual requests
+ if ShouldFallbackMulticall3(err) {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, networkId, "decode_error").Inc()
+ b.fallbackIndividual(entries, projectId, networkId)
+ return
+ }
+ b.deliverError(entries, err, projectId, networkId)
+ return
+ }
+
+ // Verify result count matches unique calls
+ if len(results) != len(uniqueCalls) {
+ mcResp.Release()
+ b.deliverError(entries, fmt.Errorf("multicall3 result count mismatch: got %d, expected %d", len(results), len(uniqueCalls)), projectId, networkId)
+ return
+ }
+
+ // Check if per-call caching is enabled (defaults to true)
+ cachePerCall := b.cfg.CachePerCall == nil || *b.cfg.CachePerCall
+
+ // Map results to entries, fanning out deduplicated results
+ for i, uc := range uniqueCalls {
+ result := results[i]
+ entriesForCall := callKeys[uc.callKey]
+
+ if result.Success {
+ // Build success response for each entry
+ resultHex := "0x" + hex.EncodeToString(result.ReturnData)
+
+ // For per-call caching, we only need to cache once per unique call
+ // Use the first entry's request for the cache write
+ var cachedOnce bool
+
+ for _, entry := range entriesForCall {
+ jrr, err := common.NewJsonRpcResponse(entry.Request.ID(), resultHex, nil)
+ if err != nil {
+ if b.logger != nil {
+ b.logger.Warn().
+ Err(err).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("callKey", uc.callKey).
+ Msg("multicall3 response construction failed for entry")
+ }
+ b.sendResult(entry, BatchResult{Error: err}, projectId, networkId)
+ continue
+ }
+ resp := common.NewNormalizedResponse().WithRequest(entry.Request).WithJsonRpcResponse(jrr)
+ // Propagate upstream metadata from multicall response
+ resp.SetUpstream(mcResp.Upstream())
+ resp.SetFromCache(mcResp.FromCache())
+
+ // Write to cache once per unique call (not once per duplicate entry)
+ if cachePerCall && !cachedOnce {
+ // Use background context for cache write to avoid request deadline issues
+ if err := b.forwarder.SetCache(context.Background(), entry.Request, resp); err != nil {
+ // Cache write failures are non-critical but we track them for observability
+ telemetry.MetricMulticall3CacheWriteErrorsTotal.WithLabelValues(projectId, networkId).Inc()
+ if b.logger != nil {
+ b.logger.Warn().
+ Err(err).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("callKey", uc.callKey).
+ Msg("multicall3 per-call cache write failed")
+ }
+ }
+ cachedOnce = true
+ }
+
+ b.sendResult(entry, BatchResult{Response: resp}, projectId, networkId)
+ }
+ } else {
+ // Call reverted in multicall3 - check if we should try auto-detection
+ if b.cfg.AutoDetectBypass && len(entriesForCall) > 0 {
+ // Try forwarding the first entry individually to see if it succeeds
+ firstEntry := entriesForCall[0]
+ targetHex := strings.ToLower(hex.EncodeToString(firstEntry.Target))
+
+ // Skip retry if already in runtime bypass (shouldn't happen, but defensive)
+ if !b.isRuntimeBypassed(targetHex) {
+ telemetry.MetricMulticall3AutoDetectRetryTotal.WithLabelValues(projectId, networkId, "attempt").Inc()
+
+ // Use the entry's context for the retry, but with a bounded fallback if it's already cancelled
+ retryCtx := firstEntry.Ctx
+ var retryCancel context.CancelFunc
+ select {
+ case <-retryCtx.Done():
+ // Original context cancelled - use bounded timeout for auto-detection
+ retryCtx, retryCancel = context.WithTimeout(context.Background(), 30*time.Second)
+ if b.logger != nil {
+ b.logger.Debug().
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("contract", "0x"+targetHex).
+ Err(firstEntry.Ctx.Err()).
+ Msg("multicall3 auto-detect retry using fallback context (original cancelled)")
+ }
+ default:
+ }
+
+ retryResp, retryErr := b.forwarder.Forward(retryCtx, firstEntry.Request)
+ if retryCancel != nil {
+ retryCancel()
+ }
+ if retryErr == nil && retryResp != nil {
+ // Individual call succeeded! This contract needs bypass
+ b.addRuntimeBypass(targetHex, projectId, networkId)
+ telemetry.MetricMulticall3AutoDetectRetryTotal.WithLabelValues(projectId, networkId, "detected").Inc()
+
+ // Extract the result from the retry response to create per-entry responses
+ retryJrr, err := retryResp.JsonRpcResponse()
+ if err != nil {
+ if b.logger != nil {
+ b.logger.Warn().
+ Err(err).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("contract", "0x"+targetHex).
+ Msg("multicall3 auto-detect failed to extract retry response")
+ }
+ // Fallback: propagate error to all entries
+ for _, entry := range entriesForCall {
+ b.sendResult(entry, BatchResult{Error: err}, projectId, networkId)
+ }
+ retryResp.Release()
+ continue
+ }
+
+ // Get the result value to clone for each entry
+ resultValue := retryJrr.GetResultString()
+
+ // Deliver fresh response to each entry with correct request ID
+ for _, entry := range entriesForCall {
+ jrr, err := common.NewJsonRpcResponse(entry.Request.ID(), resultValue, nil)
+ if err != nil {
+ if b.logger != nil {
+ b.logger.Warn().
+ Err(err).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("contract", "0x"+targetHex).
+ Msg("multicall3 auto-detect response construction failed")
+ }
+ b.sendResult(entry, BatchResult{Error: err}, projectId, networkId)
+ continue
+ }
+ resp := common.NewNormalizedResponse().WithRequest(entry.Request).WithJsonRpcResponse(jrr)
+ resp.SetUpstream(retryResp.Upstream())
+ resp.SetFromCache(retryResp.FromCache())
+ b.sendResult(entry, BatchResult{Response: resp}, projectId, networkId)
+ }
+
+ // Release the original retry response after all entries are processed
+ retryResp.Release()
+ continue // Move to next unique call
+ }
+ // Individual call also failed - not a bypass candidate
+ telemetry.MetricMulticall3AutoDetectRetryTotal.WithLabelValues(projectId, networkId, "same_error").Inc()
+ if b.logger != nil {
+ b.logger.Debug().
+ Err(retryErr).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Str("contract", "0x"+targetHex).
+ Msg("multicall3 auto-detect retry also failed, not adding to bypass")
+ }
+ if retryResp != nil {
+ retryResp.Release()
+ }
+ }
+ }
+
+ // Build error for reverted call with proper JSON-RPC format
+ dataHex := "0x" + hex.EncodeToString(result.ReturnData)
+ revertErr := common.NewErrEndpointExecutionException(
+ common.NewErrJsonRpcExceptionInternal(
+ int(common.JsonRpcErrorEvmReverted), // original code
+ common.JsonRpcErrorEvmReverted, // normalized code
+ "execution reverted",
+ nil,
+ map[string]interface{}{
+ "data": dataHex,
+ "multicall3": true,
+ "stage": "per-call",
+ },
+ ),
+ )
+ for _, entry := range entriesForCall {
+ b.sendResult(entry, BatchResult{Error: revertErr}, projectId, networkId)
+ }
+ }
+ }
+
+ // Release the multicall response after all results have been mapped
+ mcResp.Release()
+}
+
+// decodeMulticallResponse extracts and decodes the multicall3 result from a response.
+func (b *Batcher) decodeMulticallResponse(resp *common.NormalizedResponse) ([]Multicall3Result, error) {
+ if resp == nil {
+ return nil, fmt.Errorf("nil response")
+ }
+
+ jrr, err := resp.JsonRpcResponse()
+ if err != nil {
+ return nil, err
+ }
+ if jrr == nil {
+ return nil, fmt.Errorf("nil json-rpc response")
+ }
+
+ // Check for JSON-RPC error
+ if jrr.Error != nil {
+ return nil, common.NewErrEndpointExecutionException(jrr.Error)
+ }
+
+ // Get result as hex string (JSON encoded, so has quotes)
+ resultStr := jrr.GetResultString()
+ if resultStr == "" || resultStr == "null" {
+ return nil, fmt.Errorf("empty result")
+ }
+
+ // Parse the JSON string to get the hex value
+ var hexStr string
+ if err := common.SonicCfg.UnmarshalFromString(resultStr, &hexStr); err != nil {
+ return nil, fmt.Errorf("failed to parse result: %w", err)
+ }
+
+ // Decode the hex bytes
+ resultBytes, err := common.HexToBytes(hexStr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode hex result: %w", err)
+ }
+
+ // Decode the multicall3 result
+ return DecodeMulticall3Aggregate3Result(resultBytes)
+}
+
+// sendResult safely sends a result to an entry, skipping if context is cancelled.
+// Returns true if sent, false if skipped due to cancelled context.
+// Records a metric when context is cancelled to track abandoned requests.
+func (b *Batcher) sendResult(entry *BatchEntry, result BatchResult, projectId, networkId string) bool {
+ // Check if the entry's context is cancelled - no point sending if caller has given up
+ select {
+ case <-entry.Ctx.Done():
+ telemetry.MetricMulticall3AbandonedTotal.WithLabelValues(projectId, networkId).Inc()
+ if b.logger != nil {
+ b.logger.Warn().
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Err(entry.Ctx.Err()).
+ Msg("multicall3 batch result not delivered: caller context cancelled")
+ }
+ // Release response if present to avoid memory leak
+ if result.Response != nil {
+ result.Response.Release()
+ }
+ return false // Caller abandoned request, skip sending
+ default:
+ }
+ // ResultCh is buffered size 1, so this won't block
+ entry.ResultCh <- result
+ return true
+}
+
+// deliverError sends an error to all entries in a batch.
+// Skips entries whose context has been cancelled.
+func (b *Batcher) deliverError(entries []*BatchEntry, err error, projectId, networkId string) {
+ result := BatchResult{Error: err}
+ for _, entry := range entries {
+ b.sendResult(entry, result, projectId, networkId)
+ }
+}
+
+// fallbackIndividual forwards each entry individually when multicall3 fails.
+// Uses parallel goroutines for concurrent forwarding with panic recovery.
+// Records metrics for each fallback request outcome.
+func (b *Batcher) fallbackIndividual(entries []*BatchEntry, projectId, networkId string) {
+ var wg sync.WaitGroup
+ for _, entry := range entries {
+ wg.Add(1)
+ go func(e *BatchEntry) {
+ defer wg.Done()
+ defer func() {
+ if r := recover(); r != nil {
+ // Log panic with stack trace
+ if b.logger != nil {
+ b.logger.Error().
+ Str("panic", fmt.Sprintf("%v", r)).
+ Str("stack", string(debug.Stack())).
+ Str("projectId", projectId).
+ Str("networkId", networkId).
+ Msg("panic in fallback forward goroutine")
+ }
+ // Send error to entry
+ err := fmt.Errorf("panic in fallback forward: %v", r)
+ b.sendResult(e, BatchResult{Error: err}, projectId, networkId)
+ telemetry.MetricMulticall3FallbackRequestsTotal.WithLabelValues(projectId, networkId, "panic").Inc()
+ }
+ }()
+ // Skip if context is already cancelled
+ select {
+ case <-e.Ctx.Done():
+ b.sendResult(e, BatchResult{Error: e.Ctx.Err()}, projectId, networkId)
+ telemetry.MetricMulticall3FallbackRequestsTotal.WithLabelValues(projectId, networkId, "cancelled").Inc()
+ return
+ default:
+ }
+ resp, err := b.forwarder.Forward(e.Ctx, e.Request)
+ b.sendResult(e, BatchResult{Response: resp, Error: err}, projectId, networkId)
+ if err != nil {
+ telemetry.MetricMulticall3FallbackRequestsTotal.WithLabelValues(projectId, networkId, "error").Inc()
+ } else {
+ telemetry.MetricMulticall3FallbackRequestsTotal.WithLabelValues(projectId, networkId, "success").Inc()
+ }
+ }(entry)
+ }
+ wg.Wait()
+}
+
+// Shutdown stops the batcher and waits for pending operations.
+// Safe to call multiple times.
+func (b *Batcher) Shutdown() {
+ b.shutdownOnce.Do(func() {
+ close(b.shutdown)
+ })
+ b.wg.Wait()
+}
+
+// flushWithShutdownError delivers shutdown errors to all pending entries.
+func (b *Batcher) flushWithShutdownError(keyStr string, batch *Batch) {
+ batch.mu.Lock()
+ if batch.Flushing {
+ batch.mu.Unlock()
+ return
+ }
+ batch.Flushing = true
+ entries := batch.Entries
+ batch.mu.Unlock()
+
+ // Remove from active batches
+ b.mu.Lock()
+ if b.batches[keyStr] == batch {
+ delete(b.batches, keyStr)
+ }
+ entriesLen := int64(len(entries))
+ if b.queueSize >= entriesLen {
+ b.queueSize -= entriesLen
+ } else {
+ b.queueSize = 0
+ }
+ b.mu.Unlock()
+
+ // Decrement queue length metric
+ telemetry.MetricMulticall3QueueLen.WithLabelValues(batch.Key.ProjectId, batch.Key.NetworkId).Sub(float64(len(entries)))
+
+ // Deliver shutdown error to all entries
+ shutdownErr := common.NewErrJsonRpcExceptionInternal(
+ 0,
+ common.JsonRpcErrorServerSideException,
+ "batcher shutting down",
+ nil,
+ nil,
+ )
+ b.deliverError(entries, shutdownErr, batch.Key.ProjectId, batch.Key.NetworkId)
+}
+
+// DirectivesKeyVersion should be bumped when the set of directives
+// included in the key changes. This prevents cross-node key mismatches.
+const DirectivesKeyVersion = 1
+
+// BatchingKey uniquely identifies a batch for grouping eth_call requests.
+type BatchingKey struct {
+ ProjectId string
+ NetworkId string
+ BlockRef string
+ DirectivesKey string
+ UserId string // empty if cross-user batching is allowed
+}
+
+// Validate checks that the BatchingKey has required fields set.
+// Returns an error if any required field is empty.
+func (k BatchingKey) Validate() error {
+ if k.ProjectId == "" {
+ return fmt.Errorf("BatchingKey.ProjectId is required")
+ }
+ if k.NetworkId == "" {
+ return fmt.Errorf("BatchingKey.NetworkId is required")
+ }
+ if k.BlockRef == "" {
+ return fmt.Errorf("BatchingKey.BlockRef is required")
+ }
+ return nil
+}
+
+func (k BatchingKey) String() string {
+ // Use null byte separator to prevent key collisions from field values containing the separator
+ return fmt.Sprintf("%s\x00%s\x00%s\x00%s\x00%s", k.ProjectId, k.NetworkId, k.BlockRef, k.DirectivesKey, k.UserId)
+}
+
+// DeriveDirectivesKey creates a stable, versioned key from relevant directives.
+// Only includes directives that affect batching behavior.
+func DeriveDirectivesKey(dirs *common.RequestDirectives) string {
+ if dirs == nil {
+ return fmt.Sprintf("v%d:", DirectivesKeyVersion)
+ }
+
+ parts := make([]string, 0, 5)
+ if dirs.UseUpstream != "" {
+ parts = append(parts, fmt.Sprintf("use-upstream=%s", dirs.UseUpstream))
+ }
+ if dirs.SkipInterpolation {
+ parts = append(parts, "skip-interpolation=true")
+ }
+ if dirs.RetryEmpty {
+ parts = append(parts, "retry-empty=true")
+ }
+ if dirs.RetryPending {
+ parts = append(parts, "retry-pending=true")
+ }
+ if dirs.SkipCacheRead {
+ parts = append(parts, "skip-cache-read=true")
+ }
+
+ sort.Strings(parts)
+ return fmt.Sprintf("v%d:%s", DirectivesKeyVersion, strings.Join(parts, ","))
+}
+
+// DeriveCallKey creates a unique key for deduplication within a batch.
+// For eth_call, uses target + calldata + blockRef to create a deterministic key
+// that doesn't depend on JSON map key ordering.
+func DeriveCallKey(req *common.NormalizedRequest) (string, error) {
+ if req == nil {
+ return "", fmt.Errorf("request is nil")
+ }
+
+ // Extract the call components deterministically
+ target, callData, blockRef, err := ExtractCallInfo(req)
+ if err != nil {
+ return "", err
+ }
+
+ // Create key from the extracted components (deterministic order)
+ return fmt.Sprintf("eth_call:%x:%x:%s", target, callData, blockRef), nil
+}
+
+// BatchEntry represents a request waiting in a batch.
+type BatchEntry struct {
+ Ctx context.Context // Original request context (for individual fallback)
+ Request *common.NormalizedRequest // The original eth_call request
+ CallKey string // Deduplication key (target + calldata + blockRef)
+ Target []byte // Contract address (20 bytes)
+ CallData []byte // Encoded function call data
+ ResultCh chan BatchResult // Channel to receive the result (buffered, size 1)
+ CreatedAt time.Time // When the entry was created (for wait time metrics)
+ Deadline time.Time // Deadline from context (for deadline-aware flushing)
+}
+
+// BatchResult is the outcome delivered to a waiting request.
+type BatchResult struct {
+ Response *common.NormalizedResponse
+ Error error
+}
+
+// Batch holds pending requests for a single batching key.
+// All entries in a batch share the same project, network, block reference, directives, and user ID.
+type Batch struct {
+ Key BatchingKey // Composite key identifying this batch
+ Entries []*BatchEntry // All entries (may include duplicates)
+ CallKeys map[string][]*BatchEntry // Map from call key to entries (for deduplication)
+ FlushTime time.Time // When this batch should be flushed (deadline-aware)
+ Flushing bool // True once flush has started (prevents double-flush)
+ notifyCh chan struct{} // Signals flush time was shortened (buffered, size 1)
+ mu sync.Mutex // Protects all fields
+}
+
+func NewBatch(key BatchingKey, flushTime time.Time) *Batch {
+ return &Batch{
+ Key: key,
+ Entries: make([]*BatchEntry, 0, 16),
+ CallKeys: make(map[string][]*BatchEntry),
+ FlushTime: flushTime,
+ notifyCh: make(chan struct{}, 1),
+ }
+}
+
+// ineligibleCallFields are fields that make an eth_call ineligible for batching.
+// Multicall3 aggregate3 only supports target + calldata, not gas/value/etc.
+var ineligibleCallFields = []string{
+ "from", "gas", "gasPrice", "maxFeePerGas", "maxPriorityFeePerGas", "value",
+}
+
+var allowedCallFields = map[string]bool{
+ "to": true,
+ "data": true,
+ "input": true,
+}
+
+// allowedBlockTags are block tags that can be batched by default.
+var allowedBlockTags = map[string]bool{
+ "latest": true,
+ "finalized": true,
+ "safe": true,
+ "earliest": true,
+}
+
+// IsEligibleForBatching checks if a request can be batched via Multicall3.
+// Returns (eligible, reason) where reason explains why not eligible.
+func IsEligibleForBatching(req *common.NormalizedRequest, cfg *common.Multicall3AggregationConfig) (bool, string) {
+ if req == nil {
+ return false, "request is nil"
+ }
+ if cfg == nil || !cfg.Enabled {
+ return false, "batching disabled"
+ }
+
+ jrq, err := req.JsonRpcRequest()
+ if err != nil {
+ return false, fmt.Sprintf("json-rpc error: %v", err)
+ }
+
+ jrq.RLock()
+ method := strings.ToLower(jrq.Method)
+ params := jrq.Params
+ jrq.RUnlock()
+
+ // Must be eth_call
+ if method != "eth_call" {
+ return false, "not eth_call"
+ }
+
+ // Must have 1-3 params (call object, optional block, optional state override)
+ if len(params) < 1 {
+ return false, fmt.Sprintf("invalid param count: %d", len(params))
+ }
+
+ // Check for state override (3rd param) - not supported with multicall3
+ if len(params) > 2 {
+ return false, "has state override"
+ }
+
+ // Parse call object
+ callObj, ok := params[0].(map[string]interface{})
+ if !ok {
+ return false, "invalid call object type"
+ }
+
+ // Must have 'to' address
+ toVal, hasTo := callObj["to"]
+ if !hasTo {
+ return false, "missing to address"
+ }
+ toStr, ok := toVal.(string)
+ if !ok || toStr == "" {
+ return false, "invalid to address"
+ }
+
+ // Check if contract should bypass multicall3 batching
+ // (e.g., contracts that check msg.sender code size like Chronicle Oracle)
+ if cfg.ShouldBypassContractHex(toStr) {
+ return false, "contract in bypass list"
+ }
+
+ // Check for ineligible fields
+ for _, field := range ineligibleCallFields {
+ if _, has := callObj[field]; has {
+ return false, fmt.Sprintf("has %s field", field)
+ }
+ }
+
+ // Reject unsupported call object fields early to avoid batcher failures.
+ for field := range callObj {
+ if !allowedCallFields[field] {
+ return false, fmt.Sprintf("unsupported call field: %s", field)
+ }
+ }
+
+ // Recursion guard: don't batch calls to multicall3 contract
+ if strings.EqualFold(toStr, multicall3Address) {
+ return false, "already multicall"
+ }
+
+ // Check block tag
+ blockTag := "latest"
+ if len(params) >= 2 && params[1] != nil {
+ // Check for EIP-1898 block params with requireCanonical: false
+ // These cannot be safely batched because the flag would be lost when
+ // rebuilding the block param as {blockHash: "0x..."}
+ if blockObj, ok := params[1].(map[string]interface{}); ok {
+ if reqCanonical, hasReqCanonical := blockObj["requireCanonical"]; hasReqCanonical {
+ if reqCanonicalBool, ok := reqCanonical.(bool); ok && !reqCanonicalBool {
+ return false, "has requireCanonical:false"
+ }
+ }
+ }
+
+ normalized, err := NormalizeBlockParam(params[1])
+ if err != nil {
+ return false, fmt.Sprintf("invalid block param: %v", err)
+ }
+ blockTag = strings.ToLower(normalized)
+ }
+
+ // Check if pending tag is allowed
+ if blockTag == "pending" && !cfg.AllowPendingTagBatching {
+ return false, "pending tag not allowed"
+ }
+
+ // Check if block tag is eligible for batching:
+ // - Known named tags (latest, finalized, safe, earliest) are always allowed
+ // - pending is allowed if AllowPendingTagBatching is true (checked above)
+ // - Numeric block numbers (decimal strings after normalization) are allowed
+ // - Block hashes (0x + 64 hex chars) are allowed
+ if !isBlockRefEligibleForBatching(blockTag) {
+ return false, fmt.Sprintf("block tag not allowed: %s", blockTag)
+ }
+
+ return true, ""
+}
+
+// isBlockRefEligibleForBatching checks if a normalized block reference is eligible for batching.
+// It allows: known block tags, numeric block numbers, and block hashes.
+func isBlockRefEligibleForBatching(blockRef string) bool {
+ // Check known block tags (including pending, which is handled separately)
+ if allowedBlockTags[blockRef] || blockRef == "pending" {
+ return true
+ }
+
+ // Check if it's a numeric block number (decimal string after normalization)
+ if len(blockRef) > 0 && blockRef[0] >= '0' && blockRef[0] <= '9' {
+ return true
+ }
+
+ // Check if it's a block hash (0x + 64 hex chars = 66 chars total for 32 bytes)
+ if strings.HasPrefix(blockRef, "0x") && len(blockRef) == 66 {
+ return true
+ }
+
+ return false
+}
+
+func blockParamForMulticall(blockRef string) (interface{}, error) {
+ if blockRef == "" {
+ return "latest", nil
+ }
+ if strings.HasPrefix(blockRef, "0x") {
+ // Check if this is a block hash (66 chars = 0x + 64 hex chars = 32 bytes)
+ // Block hashes need to be wrapped in EIP-1898 format for correct interpretation
+ if len(blockRef) == 66 {
+ return map[string]interface{}{"blockHash": blockRef}, nil
+ }
+ // Regular hex block number - pass through
+ return blockRef, nil
+ }
+ if isDecimalBlockRef(blockRef) {
+ blockNum, err := strconv.ParseInt(blockRef, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return fmt.Sprintf("0x%x", blockNum), nil
+ }
+ return blockRef, nil
+}
+
+func isDecimalBlockRef(blockRef string) bool {
+ if blockRef == "" {
+ return false
+ }
+ for i := 0; i < len(blockRef); i++ {
+ if blockRef[i] < '0' || blockRef[i] > '9' {
+ return false
+ }
+ }
+ return true
+}
+
+// ExtractCallInfo extracts target and calldata from an eligible eth_call request.
+// PRECONDITION: req must have passed IsEligibleForBatching - this function assumes
+// the request structure has been validated.
+func ExtractCallInfo(req *common.NormalizedRequest) (target []byte, callData []byte, blockRef string, err error) {
+ jrq, err := req.JsonRpcRequest()
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ jrq.RLock()
+ params := jrq.Params
+ jrq.RUnlock()
+
+ callObj, ok := params[0].(map[string]interface{})
+ if !ok {
+ return nil, nil, "", fmt.Errorf("invalid call object type")
+ }
+
+ toVal, ok := callObj["to"]
+ if !ok {
+ return nil, nil, "", fmt.Errorf("missing to address")
+ }
+ toStr, ok := toVal.(string)
+ if !ok {
+ return nil, nil, "", fmt.Errorf("invalid to address type")
+ }
+
+ target, err = common.HexToBytes(toStr)
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ dataHex := "0x"
+ if dataVal, ok := callObj["data"]; ok {
+ if dataStr, ok := dataVal.(string); ok {
+ dataHex = dataStr
+ }
+ } else if inputVal, ok := callObj["input"]; ok {
+ if inputStr, ok := inputVal.(string); ok {
+ dataHex = inputStr
+ }
+ }
+
+ callData, err = common.HexToBytes(dataHex)
+ if err != nil {
+ return nil, nil, "", err
+ }
+
+ blockRef = "latest"
+ if len(params) >= 2 && params[1] != nil {
+ blockRef, err = NormalizeBlockParam(params[1])
+ if err != nil {
+ return nil, nil, "", err
+ }
+ }
+
+ return target, callData, blockRef, nil
+}
diff --git a/architecture/evm/multicall3_batcher_test.go b/architecture/evm/multicall3_batcher_test.go
new file mode 100644
index 000000000..b9b8728e6
--- /dev/null
+++ b/architecture/evm/multicall3_batcher_test.go
@@ -0,0 +1,3144 @@
+package evm
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBatchingKey(t *testing.T) {
+ key1 := BatchingKey{
+ ProjectId: "proj1",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: "use-upstream=alchemy",
+ UserId: "",
+ }
+ key2 := BatchingKey{
+ ProjectId: "proj1",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: "use-upstream=alchemy",
+ UserId: "",
+ }
+ key3 := BatchingKey{
+ ProjectId: "proj1",
+ NetworkId: "evm:1",
+ BlockRef: "12345",
+ DirectivesKey: "use-upstream=alchemy",
+ UserId: "",
+ }
+
+ require.Equal(t, key1.String(), key2.String())
+ require.NotEqual(t, key1.String(), key3.String())
+}
+
+func TestDirectivesKeyDerivation(t *testing.T) {
+ dirs := &common.RequestDirectives{}
+ dirs.UseUpstream = "alchemy"
+ dirs.SkipCacheRead = true
+ dirs.RetryEmpty = true
+
+ key := DeriveDirectivesKey(dirs)
+ require.Contains(t, key, "use-upstream=alchemy")
+ require.Contains(t, key, "skip-cache-read=true")
+ require.Contains(t, key, "retry-empty=true")
+}
+
+func TestCallKeyDerivation(t *testing.T) {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcdef",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ key, err := DeriveCallKey(req)
+ require.NoError(t, err)
+ require.NotEmpty(t, key)
+
+ // Same request should produce same key
+ key2, err := DeriveCallKey(req)
+ require.NoError(t, err)
+ require.Equal(t, key, key2)
+}
+
+func TestDirectivesKeyDerivation_Nil(t *testing.T) {
+ key := DeriveDirectivesKey(nil)
+ require.Equal(t, "v1:", key)
+}
+
+func TestDirectivesKeyDerivation_VersionPrefix(t *testing.T) {
+ dirs := &common.RequestDirectives{}
+ dirs.UseUpstream = "alchemy"
+
+ key := DeriveDirectivesKey(dirs)
+ require.True(t, strings.HasPrefix(key, "v1:"), "key should have version prefix")
+}
+
+func TestCallKeyDerivation_NilRequest(t *testing.T) {
+ key, err := DeriveCallKey(nil)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "request is nil")
+ require.Empty(t, key)
+}
+
+func TestIsEligibleForBatching(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AllowPendingTagBatching: false,
+ }
+ cfg.SetDefaults()
+
+ tests := []struct {
+ name string
+ method string
+ params []interface{}
+ eligible bool
+ reason string
+ }{
+ {
+ name: "eligible basic eth_call",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "latest",
+ },
+ eligible: true,
+ },
+ {
+ name: "eligible with finalized tag",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "finalized",
+ },
+ eligible: true,
+ },
+ {
+ name: "ineligible - pending tag",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "pending",
+ },
+ eligible: false,
+ reason: "pending tag not allowed",
+ },
+ {
+ name: "ineligible - has from field",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcd",
+ "from": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ },
+ "latest",
+ },
+ eligible: false,
+ reason: "has from field",
+ },
+ {
+ name: "ineligible - has value field",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcd",
+ "value": "0x1",
+ },
+ "latest",
+ },
+ eligible: false,
+ reason: "has value field",
+ },
+ {
+ name: "ineligible - unsupported call field",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcd",
+ "accessList": []interface{}{},
+ },
+ "latest",
+ },
+ eligible: false,
+ reason: "unsupported call field",
+ },
+ {
+ name: "ineligible - has state override (3rd param)",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "latest",
+ map[string]interface{}{}, // state override
+ },
+ eligible: false,
+ reason: "has state override",
+ },
+ {
+ name: "ineligible - not eth_call",
+ method: "eth_getBalance",
+ params: []interface{}{"0x1234567890123456789012345678901234567890", "latest"},
+ eligible: false,
+ reason: "not eth_call",
+ },
+ {
+ name: "ineligible - already multicall (recursion guard)",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0xcA11bde05977b3631167028862bE2a173976CA11", // multicall3 address
+ "data": "0x82ad56cb", // aggregate3 selector
+ },
+ "latest",
+ },
+ eligible: false,
+ reason: "already multicall",
+ },
+ {
+ name: "eligible with safe tag",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "safe",
+ },
+ eligible: true,
+ },
+ {
+ name: "eligible with earliest tag",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "earliest",
+ },
+ eligible: true,
+ },
+ {
+ name: "eligible with numeric block number (hex)",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "0x1234",
+ },
+ eligible: true,
+ },
+ {
+ name: "eligible with block hash",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", // 32-byte hash
+ },
+ eligible: true,
+ },
+ {
+ name: "ineligible - unknown block tag",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ "unknown_tag",
+ },
+ eligible: false,
+ reason: "block tag not allowed",
+ },
+ {
+ name: "ineligible - EIP-1898 block param with requireCanonical:false",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ map[string]interface{}{
+ "blockHash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "requireCanonical": false,
+ },
+ },
+ eligible: false,
+ reason: "has requireCanonical:false",
+ },
+ {
+ name: "eligible - EIP-1898 block param with requireCanonical:true",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ map[string]interface{}{
+ "blockHash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "requireCanonical": true,
+ },
+ },
+ eligible: true,
+ },
+ {
+ name: "eligible - EIP-1898 block param without requireCanonical (default true)",
+ method: "eth_call",
+ params: []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0xabcd"},
+ map[string]interface{}{
+ "blockHash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ },
+ },
+ eligible: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ jrq := common.NewJsonRpcRequest(tt.method, tt.params)
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ eligible, reason := IsEligibleForBatching(req, cfg)
+ require.Equal(t, tt.eligible, eligible, "reason: %s", reason)
+ if !tt.eligible {
+ require.Contains(t, reason, tt.reason)
+ }
+ })
+ }
+}
+
+func TestExtractCallInfo(t *testing.T) {
+ tests := []struct {
+ name string
+ params []interface{}
+ expectedTarget string
+ expectedData string
+ expectedBlock string
+ expectError bool
+ }{
+ {
+ name: "basic extraction with data field",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcdef",
+ },
+ "latest",
+ },
+ expectedTarget: "0x1234567890123456789012345678901234567890",
+ expectedData: "0xabcdef",
+ expectedBlock: "latest",
+ },
+ {
+ name: "extraction with input field instead of data",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "input": "0x12345678",
+ },
+ "finalized",
+ },
+ expectedTarget: "0x1234567890123456789012345678901234567890",
+ expectedData: "0x12345678",
+ expectedBlock: "finalized",
+ },
+ {
+ name: "extraction with empty data",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ },
+ "latest",
+ },
+ expectedTarget: "0x1234567890123456789012345678901234567890",
+ expectedData: "0x",
+ expectedBlock: "latest",
+ },
+ {
+ name: "extraction with no block param (defaults to latest)",
+ params: []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xaa",
+ },
+ },
+ expectedTarget: "0x1234567890123456789012345678901234567890",
+ expectedData: "0xaa",
+ expectedBlock: "latest",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ jrq := common.NewJsonRpcRequest("eth_call", tt.params)
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ target, data, blockRef, err := ExtractCallInfo(req)
+ if tt.expectError {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ require.Equal(t, tt.expectedTarget, "0x"+hex.EncodeToString(target))
+ require.Equal(t, tt.expectedData, "0x"+hex.EncodeToString(data))
+ require.Equal(t, tt.expectedBlock, blockRef)
+ })
+ }
+}
+
+func TestIsEligibleForBatching_AllowPendingTag(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AllowPendingTagBatching: true,
+ }
+ cfg.SetDefaults()
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcd",
+ },
+ "pending",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ eligible, reason := IsEligibleForBatching(req, cfg)
+ require.True(t, eligible, "pending should be allowed when AllowPendingTagBatching is true: %s", reason)
+}
+
+func TestIsEligibleForBatching_BypassContracts(t *testing.T) {
+ // Chronicle Oracle feed address (example contract that checks msg.sender code size)
+ chronicleOracleAddr := "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"
+ otherContract := "0x1234567890123456789012345678901234567890"
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{chronicleOracleAddr},
+ }
+ cfg.SetDefaults()
+
+ tests := []struct {
+ name string
+ to string
+ eligible bool
+ reason string
+ }{
+ {
+ name: "bypass contract - exact match",
+ to: chronicleOracleAddr,
+ eligible: false,
+ reason: "contract in bypass list",
+ },
+ {
+ name: "bypass contract - lowercase",
+ to: "0x057f30e63a69175c69a4af5656b8c9ee647de3d0",
+ eligible: false,
+ reason: "contract in bypass list",
+ },
+ {
+ name: "bypass contract - uppercase",
+ to: "0x057F30E63A69175C69A4AF5656B8C9EE647DE3D0",
+ eligible: false,
+ reason: "contract in bypass list",
+ },
+ {
+ name: "non-bypass contract allowed",
+ to: otherContract,
+ eligible: true,
+ reason: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": tt.to,
+ "data": "0xfeaf968c", // latestRoundData() selector
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ eligible, reason := IsEligibleForBatching(req, cfg)
+ require.Equal(t, tt.eligible, eligible, "eligibility mismatch for %s", tt.name)
+ if tt.reason != "" {
+ require.Contains(t, reason, tt.reason)
+ }
+ })
+ }
+}
+
+func TestIsEligibleForBatching_BypassContractsEmpty(t *testing.T) {
+ // When BypassContracts is empty, all contracts should be eligible
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{},
+ }
+ cfg.SetDefaults()
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "data": "0xfeaf968c",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ eligible, reason := IsEligibleForBatching(req, cfg)
+ require.True(t, eligible, "should be eligible when bypass list is empty: %s", reason)
+}
+
+func TestIsEligibleForBatching_MultipleBypassContracts(t *testing.T) {
+ // Test with multiple bypass contracts
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0", // Chronicle Oracle
+ "0xABCDEF0123456789ABCDEF0123456789ABCDEF01", // Another contract
+ },
+ }
+ cfg.SetDefaults()
+
+ tests := []struct {
+ name string
+ to string
+ eligible bool
+ }{
+ {
+ name: "first bypass contract",
+ to: "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ eligible: false,
+ },
+ {
+ name: "second bypass contract",
+ to: "0xABCDEF0123456789ABCDEF0123456789ABCDEF01",
+ eligible: false,
+ },
+ {
+ name: "non-bypass contract",
+ to: "0x1111111111111111111111111111111111111111",
+ eligible: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": tt.to,
+ "data": "0xabcd",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ eligible, _ := IsEligibleForBatching(req, cfg)
+ require.Equal(t, tt.eligible, eligible)
+ })
+ }
+}
+
+func TestBatcherEnqueueAndFlush(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 2,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ // Create test requests
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcdef01",
+ },
+ "latest",
+ })
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x2234567890123456789012345678901234567890",
+ "data": "0xabcdef02",
+ },
+ "latest",
+ })
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Enqueue first request
+ entry1, bypass1, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ require.False(t, bypass1)
+ require.NotNil(t, entry1)
+
+ // Enqueue second request
+ entry2, bypass2, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+ require.False(t, bypass2)
+ require.NotNil(t, entry2)
+
+ // Check batch exists
+ batcher.mu.RLock()
+ batch, exists := batcher.batches[key.String()]
+ batcher.mu.RUnlock()
+ require.True(t, exists)
+ require.Len(t, batch.Entries, 2)
+
+ // Cleanup
+ batcher.Shutdown()
+}
+
+func TestBatcherDeduplication(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ // Two identical requests - using the same jrq to ensure call key consistency
+ // (JSON serialization of map[string]interface{} can have non-deterministic key order)
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1234567890123456789012345678901234567890",
+ "data": "0xabcdef01",
+ },
+ "latest",
+ })
+ jrq2 := common.NewJsonRpcRequest("eth_call", jrq1.Params) // Use same params object
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ entry1, _, _ := batcher.Enqueue(ctx, key, req1)
+ entry2, _, _ := batcher.Enqueue(ctx, key, req2)
+
+ // Both should share the same callKey slot
+ require.Equal(t, entry1.CallKey, entry2.CallKey)
+
+ batcher.mu.RLock()
+ batch := batcher.batches[key.String()]
+ batcher.mu.RUnlock()
+
+ // Two entries but deduplicated
+ require.Len(t, batch.Entries, 2)
+ require.Len(t, batch.CallKeys[entry1.CallKey], 2)
+
+ batcher.Shutdown()
+}
+
+func TestBatcherCapsEnforcement(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 2, // Very low limit
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Add requests up to cap
+ for i := 0; i < 2; i++ {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": fmt.Sprintf("0x%040d", i),
+ "data": "0xabcdef",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+ }
+
+ // Next request should trigger bypass (caps reached)
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x9999999999999999999999999999999999999999",
+ "data": "0xabcdef",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.True(t, bypass, "should bypass when caps reached")
+
+ batcher.Shutdown()
+}
+
+// mockForwarder implements Forwarder for testing
+type mockForwarder struct {
+ response *common.NormalizedResponse
+ err error
+ called int
+ cacheWrites int
+ mu sync.Mutex
+}
+
+func (m *mockForwarder) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.called++
+ return m.response, m.err
+}
+
+func (m *mockForwarder) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.cacheWrites++
+ return nil
+}
+
+func TestBatcherFlushAndResultMapping(t *testing.T) {
+ // Create valid multicall3 result with 2 calls
+ // Each call returns success=true with some data
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xde, 0xad, 0xbe, 0xef}},
+ {Success: true, ReturnData: []byte{0xca, 0xfe, 0xba, 0xbe}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10, // Short window for test
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false), // disable caching for test
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Add two requests
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x2222222222222222222222222222222222222222", "data": "0x02"},
+ "latest",
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry1, _, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ entry2, _, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+
+ // Wait for results
+ result1 := <-entry1.ResultCh
+ result2 := <-entry2.ResultCh
+
+ require.NoError(t, result1.Error)
+ require.NoError(t, result2.Error)
+ require.NotNil(t, result1.Response)
+ require.NotNil(t, result2.Response)
+
+ // Verify forwarder was called exactly once
+ forwarder.mu.Lock()
+ require.Equal(t, 1, forwarder.called)
+ forwarder.mu.Unlock()
+
+ // Verify the responses contain the expected data
+ jrr1, err := result1.Response.JsonRpcResponse()
+ require.NoError(t, err)
+ require.Equal(t, "\"0xdeadbeef\"", jrr1.GetResultString())
+
+ jrr2, err := result2.Response.JsonRpcResponse()
+ require.NoError(t, err)
+ require.Equal(t, "\"0xcafebabe\"", jrr2.GetResultString())
+
+ batcher.Shutdown()
+}
+
+func TestBatcherFlush_UsesHexBlockParam(t *testing.T) {
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0x01}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ blockParamCh := make(chan interface{}, 1)
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ jrq, err := req.JsonRpcRequest()
+ if err == nil {
+ jrq.RLock()
+ params := jrq.Params
+ jrq.RUnlock()
+ if len(params) > 1 {
+ blockParamCh <- params[1]
+ }
+ }
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ if err != nil {
+ return nil, err
+ }
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01",
+ },
+ "0x10",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, _, blockRef, err := ExtractCallInfo(req)
+ require.NoError(t, err)
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: blockRef,
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ entry, _, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+
+ result := <-entry.ResultCh
+ require.NoError(t, result.Error)
+
+ select {
+ case blockParam := <-blockParamCh:
+ paramStr, ok := blockParam.(string)
+ require.True(t, ok)
+ require.Equal(t, "0x10", paramStr)
+ case <-time.After(2 * time.Second):
+ require.Fail(t, "timed out waiting for block param")
+ }
+
+ batcher.Shutdown()
+}
+
+func TestBatcherFlushDeduplication(t *testing.T) {
+ // Create result with 1 call (deduplication means only 1 unique call is made)
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xab, 0xcd}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Add two IDENTICAL requests (same target and calldata)
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry1, _, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ entry2, _, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+
+ // Both should share the same call key
+ require.Equal(t, entry1.CallKey, entry2.CallKey)
+
+ // Wait for results
+ result1 := <-entry1.ResultCh
+ result2 := <-entry2.ResultCh
+
+ require.NoError(t, result1.Error)
+ require.NoError(t, result2.Error)
+
+ // Both should get the same result (fanned out)
+ jrr1, err := result1.Response.JsonRpcResponse()
+ require.NoError(t, err)
+ jrr2, err := result2.Response.JsonRpcResponse()
+ require.NoError(t, err)
+ require.Equal(t, jrr1.GetResultString(), jrr2.GetResultString())
+
+ // Forwarder should only be called once
+ forwarder.mu.Lock()
+ require.Equal(t, 1, forwarder.called)
+ forwarder.mu.Unlock()
+
+ batcher.Shutdown()
+}
+
+func TestBatcherFlushRevertHandling(t *testing.T) {
+ // Create result where second call reverts
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xde, 0xad, 0xbe, 0xef}},
+ {Success: false, ReturnData: []byte{0x08, 0xc3, 0x79, 0xa0}}, // Error(string) selector
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Add two requests
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x2222222222222222222222222222222222222222", "data": "0x02"},
+ "latest",
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry1, _, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ entry2, _, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+
+ // Wait for results
+ result1 := <-entry1.ResultCh
+ result2 := <-entry2.ResultCh
+
+ // First call should succeed
+ require.NoError(t, result1.Error)
+ require.NotNil(t, result1.Response)
+
+ // Second call should fail with revert error
+ require.Error(t, result2.Error)
+ require.Contains(t, result2.Error.Error(), "execution reverted")
+
+ batcher.Shutdown()
+}
+
+func TestBatcherFlushFallbackOnMulticall3Unavailable(t *testing.T) {
+ // Track individual calls made during fallback
+ var individualCalls []*common.NormalizedRequest
+ var mu sync.Mutex
+
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ // Check if this is a multicall3 request (to multicall3 address)
+ jrq, _ := req.JsonRpcRequest()
+ if jrq != nil && len(jrq.Params) > 0 {
+ if callObj, ok := jrq.Params[0].(map[string]interface{}); ok {
+ if toAddr, ok := callObj["to"].(string); ok {
+ if strings.EqualFold(toAddr, "0xcA11bde05977b3631167028862bE2a173976CA11") {
+ // This is a multicall3 request - return "contract not found" error
+ return nil, common.NewErrEndpointExecutionException(fmt.Errorf("contract not found"))
+ }
+ }
+ }
+ }
+
+ // Individual call - track and return success
+ individualCalls = append(individualCalls, req)
+ jrr, _ := common.NewJsonRpcResponse(req.ID(), "0xdeadbeef", nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Add two requests
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x2222222222222222222222222222222222222222", "data": "0x02"},
+ "latest",
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry1, _, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ entry2, _, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+
+ // Wait for results
+ result1 := <-entry1.ResultCh
+ result2 := <-entry2.ResultCh
+
+ // Both should succeed via fallback
+ require.NoError(t, result1.Error)
+ require.NoError(t, result2.Error)
+
+ // Verify individual fallback calls were made
+ mu.Lock()
+ require.Equal(t, 2, len(individualCalls))
+ mu.Unlock()
+
+ batcher.Shutdown()
+}
+
+// mockForwarderFunc allows custom forward behavior for testing
+type mockForwarderFunc struct {
+ forwardFunc func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error)
+}
+
+func (m *mockForwarderFunc) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return m.forwardFunc(ctx, req)
+}
+
+func (m *mockForwarderFunc) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ return nil
+}
+
+func TestBatcherCancellation(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 50,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ key := BatchingKey{
+ ProjectId: "test",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+ require.NotNil(t, entry)
+
+ // Cancel before flush
+ cancel()
+
+ // Batcher should shutdown gracefully
+ batcher.Shutdown()
+}
+
+func TestBatcherDeadlineAwareness(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 10,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ key := BatchingKey{
+ ProjectId: "test",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1234567890123456789012345678901234567890", "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ // Context with tight deadline - should bypass
+ tightCtx, cancel1 := context.WithDeadline(context.Background(), time.Now().Add(5*time.Millisecond))
+ defer cancel1()
+
+ _, bypass, err := batcher.Enqueue(tightCtx, key, req)
+ require.NoError(t, err)
+ require.True(t, bypass, "should bypass with tight deadline")
+
+ // Context with reasonable deadline - should batch
+ normalCtx, cancel2 := context.WithDeadline(context.Background(), time.Now().Add(200*time.Millisecond))
+ defer cancel2()
+
+ _, bypass, err = batcher.Enqueue(normalCtx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass, "should batch with normal deadline")
+
+ batcher.Shutdown()
+}
+
+func TestBatcherConcurrentFlush(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ // Mock forwarder that returns success for all batches
+ var callCount int
+ var mu sync.Mutex
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ mu.Lock()
+ callCount++
+ mu.Unlock()
+
+ // Return multicall3 results with 3 successful calls
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: true, ReturnData: []byte{0xbb}},
+ {Success: true, ReturnData: []byte{0xcc}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+ jrr, _ := common.NewJsonRpcResponse(nil, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ }
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ }
+
+ // Add first batch
+ for i := 0; i < 3; i++ {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": fmt.Sprintf("0x%040d", i), "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ batcher.Enqueue(ctx, key, req)
+ }
+
+ // Wait for first batch to start flushing
+ time.Sleep(15 * time.Millisecond)
+
+ // Add more requests - should go to new batch
+ for i := 3; i < 6; i++ {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": fmt.Sprintf("0x%040d", i), "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, _ := batcher.Enqueue(ctx, key, req)
+ _ = bypass // May bypass or create new batch - either is acceptable
+ }
+
+ // Wait for all flushes to complete
+ time.Sleep(20 * time.Millisecond)
+
+ batcher.Shutdown()
+
+ // Verify at least one batch was processed
+ mu.Lock()
+ require.GreaterOrEqual(t, callCount, 1)
+ mu.Unlock()
+}
+
+func TestNewBatcher_NilConfig(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ // Test with nil config
+ batcher := NewBatcher(nil, forwarder, nil)
+ require.Nil(t, batcher, "NewBatcher should return nil for nil config")
+}
+
+func TestNewBatcher_DisabledConfig(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ // Test with disabled config
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: false,
+ }
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.Nil(t, batcher, "NewBatcher should return nil for disabled config")
+}
+
+func TestNewBatcher_EnabledConfig(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ // Test with enabled config
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher, "NewBatcher should return non-nil for enabled config")
+ batcher.Shutdown()
+}
+
+func TestNewBatcher_NilForwarder_Panics(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ // Test that nil forwarder causes panic
+ require.Panics(t, func() {
+ NewBatcher(cfg, nil, nil)
+ }, "NewBatcher should panic when forwarder is nil")
+}
+
+// mockForwarderWithCacheError is a forwarder that returns errors from SetCache
+type mockForwarderWithCacheError struct {
+ response *common.NormalizedResponse
+ cacheError error
+ mu sync.Mutex
+ called int
+}
+
+func (m *mockForwarderWithCacheError) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.called++
+ return m.response, nil
+}
+
+func (m *mockForwarderWithCacheError) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ return m.cacheError
+}
+
+func TestBatcher_CacheWriteError_DoesNotFailRequest(t *testing.T) {
+ // Create multicall response for 1 call
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xde, 0xad, 0xbe, 0xef}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarderWithCacheError{
+ response: mockResp,
+ cacheError: fmt.Errorf("cache write failed"),
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(true), // Enable per-call caching
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for result - should succeed despite cache write error
+ select {
+ case result := <-entry.ResultCh:
+ require.NoError(t, result.Error, "request should succeed despite cache write error")
+ require.NotNil(t, result.Response)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for batched request")
+ }
+}
+
+func TestBatcher_ContextDeadlineError_WrappedWithBatchContext(t *testing.T) {
+ // Create a forwarder that simulates internal timeout (not waiting on ctx)
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ // Simulate a slow response that triggers ctx deadline internally
+ // But return the error immediately so entry context is still valid for delivery
+ return nil, context.DeadlineExceeded
+ },
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ SafetyMarginMs: 2,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create context with deadline longer than batch window so entry is still valid at delivery
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(500*time.Millisecond))
+ defer cancel()
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for result
+ select {
+ case result := <-entry.ResultCh:
+ require.Error(t, result.Error)
+ // The error wrapping happens when ctx.Err() != nil at forward time
+ // Since we return DeadlineExceeded but ctx isn't actually expired yet,
+ // the wrapping won't happen. This tests the error delivery path.
+ require.Contains(t, result.Error.Error(), "deadline exceeded")
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for batched request")
+ }
+}
+
+func TestBatcher_MaxCalldataBytes_Bypass(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 5,
+ MaxCalls: 100, // High limit
+ MaxCalldataBytes: 100, // Very low limit for testing
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // First request with small calldata - should be batched
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304", // 4 bytes
+ },
+ "latest",
+ })
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ entry1, bypass1, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ require.False(t, bypass1, "first request should be batched")
+ require.NotNil(t, entry1)
+
+ // Second request with large calldata - should bypass due to MaxCalldataBytes
+ largeData := "0x" + strings.Repeat("aa", 200) // 200 bytes
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x2222222222222222222222222222222222222222",
+ "data": largeData,
+ },
+ "latest",
+ })
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ _, bypass2, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+ require.True(t, bypass2, "second request should bypass due to MaxCalldataBytes")
+}
+
+func TestBatcher_OnlyIfPending_NoBatch(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 5,
+ MaxCalls: 100,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ OnlyIfPending: true, // Only batch if there's already a pending batch
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ forwarder := &mockForwarder{} // Not used in this test but required
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // First request should bypass - no pending batch exists
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ _, bypass1, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ require.True(t, bypass1, "first request should bypass when OnlyIfPending is true and no batch exists")
+}
+
+func TestBatcher_OnlyIfPending_WithExistingBatch(t *testing.T) {
+ // Create valid multicall3 result with 2 calls
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: true, ReturnData: []byte{0xbb}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 5,
+ MaxCalls: 100,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ OnlyIfPending: false, // Start with false to create a batch
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ ctx := context.Background()
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // First request creates a batch
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01",
+ },
+ "latest",
+ })
+ jrq1.ID = "req1"
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ entry1, bypass1, err := batcher.Enqueue(ctx, key, req1)
+ require.NoError(t, err)
+ require.False(t, bypass1, "first request should create batch")
+ require.NotNil(t, entry1)
+
+ // Second request should join the existing batch
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x2222222222222222222222222222222222222222",
+ "data": "0x02",
+ },
+ "latest",
+ })
+ jrq2.ID = "req2"
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry2, bypass2, err := batcher.Enqueue(ctx, key, req2)
+ require.NoError(t, err)
+ require.False(t, bypass2, "second request should join existing batch")
+ require.NotNil(t, entry2)
+
+ // Wait for results
+ result1 := <-entry1.ResultCh
+ result2 := <-entry2.ResultCh
+
+ require.NoError(t, result1.Error)
+ require.NoError(t, result2.Error)
+}
+
+func TestBatcher_DuplicateCallsShareResult(t *testing.T) {
+ // Create result with 1 unique call (both requests have same target+data)
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xde, 0xad, 0xbe, 0xef}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 100,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Three identical requests - should all share the same result
+ entries := make([]*BatchEntry, 3)
+ for i := 0; i < 3; i++ {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x12345678",
+ },
+ "latest",
+ })
+ jrq.ID = fmt.Sprintf("req%d", i)
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+ entries[i] = entry
+ }
+
+ // All entries should have the same callKey (deduplication)
+ require.Equal(t, entries[0].CallKey, entries[1].CallKey)
+ require.Equal(t, entries[1].CallKey, entries[2].CallKey)
+
+ // Wait for all results
+ for i, entry := range entries {
+ result := <-entry.ResultCh
+ require.NoError(t, result.Error, "entry %d should succeed", i)
+ require.NotNil(t, result.Response)
+
+ jrrResult, err := result.Response.JsonRpcResponse()
+ require.NoError(t, err)
+ require.Equal(t, "\"0xdeadbeef\"", jrrResult.GetResultString())
+ }
+
+ // Forwarder should only be called once (all requests batched into single multicall)
+ forwarder.mu.Lock()
+ require.Equal(t, 1, forwarder.called)
+ forwarder.mu.Unlock()
+}
+
+// TestBatcher_ShutdownDuringActiveFlush verifies that shutdown during an active
+// flush delivers shutdown errors to pending entries and cleans up properly.
+func TestBatcher_ShutdownDuringActiveFlush(t *testing.T) {
+ // Create a forwarder that blocks to simulate a long-running flush
+ flushStarted := make(chan struct{})
+ flushBlock := make(chan struct{})
+
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ close(flushStarted) // Signal that flush has started
+ <-flushBlock // Block until test unblocks
+ return nil, fmt.Errorf("should not reach here")
+ },
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 1,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Enqueue a request
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for flush to start (forwarder called)
+ select {
+ case <-flushStarted:
+ // Good - flush has started
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout waiting for flush to start")
+ }
+
+ // Now enqueue another request for a DIFFERENT batch key
+ // This creates a new batch that hasn't started flushing yet
+ key2 := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "0x12345", // Different block ref = different batch
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x2222222222222222222222222222222222222222",
+ "data": "0x05060708",
+ },
+ "0x12345",
+ })
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry2, bypass2, err2 := batcher.Enqueue(ctx, key2, req2)
+ require.NoError(t, err2)
+ require.False(t, bypass2)
+
+ // Call shutdown while first flush is blocked
+ // This should trigger flushWithShutdownError for the second batch
+ go func() {
+ time.Sleep(10 * time.Millisecond) // Give shutdown a head start
+ close(flushBlock) // Unblock the first flush
+ }()
+
+ batcher.Shutdown()
+
+ // The second entry should receive a shutdown error
+ select {
+ case result := <-entry2.ResultCh:
+ require.Error(t, result.Error)
+ require.Contains(t, result.Error.Error(), "shutting down")
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for shutdown error on entry2")
+ }
+
+ // First entry gets error because forwarder returns error after unblock
+ select {
+ case result := <-entry.ResultCh:
+ // Either an error from forwarder or from shutdown is acceptable
+ require.Error(t, result.Error)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result on entry1")
+ }
+}
+
+// TestBatcher_DoubleFlushPrevention verifies that concurrent flush calls
+// on the same batch don't result in double-processing (race condition test).
+func TestBatcher_DoubleFlushPrevention(t *testing.T) {
+ var forwardCallCount int64
+ var mu sync.Mutex
+
+ // Create a forwarder that counts calls
+ forwarder := &mockForwarderFunc{
+ forwardFunc: func(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ mu.Lock()
+ forwardCallCount++
+ mu.Unlock()
+
+ // Return multicall3 results with 1 successful call
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa, 0xbb}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+ jrr, _ := common.NewJsonRpcResponse(nil, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 1000, // Long window so we control when flush happens
+ MinWaitMs: 500,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Enqueue a request
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Get the batch directly from the batcher's internal map
+ keyStr := key.String()
+ batcher.mu.Lock()
+ batch := batcher.batches[keyStr]
+ batcher.mu.Unlock()
+ require.NotNil(t, batch, "batch should exist")
+
+ // Simulate concurrent flush calls (race condition scenario)
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ batcher.flush(keyStr, batch)
+ }()
+ }
+ wg.Wait()
+
+ // Verify forwarder was only called once (double-flush prevented)
+ mu.Lock()
+ finalCallCount := forwardCallCount
+ mu.Unlock()
+ require.Equal(t, int64(1), finalCallCount, "forwarder should only be called once despite concurrent flush attempts")
+
+ // Entry should receive exactly one result
+ select {
+ case result := <-entry.ResultCh:
+ require.NoError(t, result.Error)
+ require.NotNil(t, result.Response)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result")
+ }
+}
+
+// mockPanicForwarder panics when Forward is called to test panic recovery
+type mockPanicForwarder struct {
+ panicMessage string
+}
+
+func (m *mockPanicForwarder) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ panic(m.panicMessage)
+}
+
+func (m *mockPanicForwarder) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ return nil
+}
+
+func TestBatcher_ScheduleFlush_PanicRecovery(t *testing.T) {
+ forwarder := &mockPanicForwarder{panicMessage: "test panic in forwarder"}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Enqueue a request
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for the batch to flush (will panic and recover)
+ select {
+ case result := <-entry.ResultCh:
+ // Should receive an error due to the panic
+ require.Error(t, result.Error)
+ require.Contains(t, result.Error.Error(), "panic")
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result - panic recovery may have failed")
+ }
+}
+
+// mockFallbackThenPanicForwarder returns an error triggering fallback on first call,
+// then panics on subsequent (individual) calls to test fallback panic recovery
+type mockFallbackThenPanicForwarder struct {
+ callCount int
+ panicMessage string
+ mu sync.Mutex
+}
+
+func (m *mockFallbackThenPanicForwarder) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ m.mu.Lock()
+ m.callCount++
+ count := m.callCount
+ m.mu.Unlock()
+
+ if count == 1 {
+ // First call is the multicall - return error that triggers fallback
+ return nil, common.NewErrEndpointExecutionException(
+ fmt.Errorf("contract not found"),
+ )
+ }
+ // Subsequent calls (individual fallback) - panic
+ panic(m.panicMessage)
+}
+
+func (m *mockFallbackThenPanicForwarder) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ return nil
+}
+
+func TestBatcher_FallbackIndividual_PanicRecovery(t *testing.T) {
+ forwarder := &mockFallbackThenPanicForwarder{panicMessage: "test panic in fallback"}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 10,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Enqueue a request
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x1111111111111111111111111111111111111111",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for the batch to flush (multicall fails with "contract not found",
+ // triggers fallback, fallback panics and recovers)
+ select {
+ case result := <-entry.ResultCh:
+ // Should receive an error due to the panic in fallback
+ require.Error(t, result.Error)
+ require.Contains(t, result.Error.Error(), "panic in fallback forward")
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result - fallback panic recovery may have failed")
+ }
+}
+
+func TestBatcher_MaxQueueSize_Enforcement(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 1000, // Long window to prevent auto-flush
+ MinWaitMs: 5,
+ MaxCalls: 100,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 3, // Small queue for testing
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Fill up the queue
+ for i := 0; i < 3; i++ {
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": fmt.Sprintf("0x%040d", i+1),
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass, "request %d should be enqueued", i)
+ }
+
+ // Next request should bypass due to full queue
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x0000000000000000000000000000000000000099",
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.True(t, bypass, "4th request should bypass due to full queue")
+}
+
+func TestBatchingKey_Validate(t *testing.T) {
+ tests := []struct {
+ name string
+ key BatchingKey
+ wantErr bool
+ errMsg string
+ }{
+ {
+ name: "valid key",
+ key: BatchingKey{
+ ProjectId: "proj1",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: "v1:",
+ },
+ wantErr: false,
+ },
+ {
+ name: "missing project id",
+ key: BatchingKey{
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ },
+ wantErr: true,
+ errMsg: "ProjectId is required",
+ },
+ {
+ name: "missing network id",
+ key: BatchingKey{
+ ProjectId: "proj1",
+ BlockRef: "latest",
+ },
+ wantErr: true,
+ errMsg: "NetworkId is required",
+ },
+ {
+ name: "missing block ref",
+ key: BatchingKey{
+ ProjectId: "proj1",
+ NetworkId: "evm:1",
+ },
+ wantErr: true,
+ errMsg: "BlockRef is required",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.key.Validate()
+ if tt.wantErr {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tt.errMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestBatcher_InvalidTargetLength_Bypass(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // Request with invalid target address (21 bytes instead of 20)
+ // 42 hex chars = 21 bytes
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{
+ "to": "0x00112233445566778899aabbccddeeff00112233ab", // 21 bytes (42 hex chars)
+ "data": "0x01020304",
+ },
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+ _, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.Error(t, err)
+ require.True(t, bypass, "request with invalid target should bypass")
+ require.Contains(t, err.Error(), "invalid target address length")
+}
+
+func TestDecodeMulticallResponse_NilResponse(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Test with nil response
+ results, err := batcher.decodeMulticallResponse(nil)
+ require.Error(t, err)
+ require.Nil(t, results)
+ require.Contains(t, err.Error(), "nil response")
+}
+
+func TestDecodeMulticallResponse_NilJsonRpc(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Test with response that has no JsonRpcResponse set
+ resp := common.NewNormalizedResponse()
+ results, err := batcher.decodeMulticallResponse(resp)
+ require.Error(t, err)
+ require.Nil(t, results)
+ // Error comes from JsonRpcResponse() which returns an error when no body is available
+ require.Contains(t, err.Error(), "no body available to parse JsonRpcResponse")
+}
+
+func TestDecodeMulticallResponse_JsonRpcError(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create a response with JSON-RPC error
+ jrr := &common.JsonRpcResponse{
+ Error: common.NewErrJsonRpcExceptionExternal(
+ -32000,
+ "execution error",
+ "",
+ ),
+ }
+ resp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ results, err := batcher.decodeMulticallResponse(resp)
+ require.Error(t, err)
+ require.Nil(t, results)
+ require.Contains(t, err.Error(), "execution error")
+}
+
+func TestDecodeMulticallResponse_EmptyResult(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create a response with empty/null result
+ jrr, err := common.NewJsonRpcResponse(nil, nil, nil)
+ require.NoError(t, err)
+ resp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ results, err := batcher.decodeMulticallResponse(resp)
+ require.Error(t, err)
+ require.Nil(t, results)
+ require.Contains(t, err.Error(), "empty result")
+}
+
+func TestBlockParamForMulticall_BlockHashEIP1898(t *testing.T) {
+ tests := []struct {
+ name string
+ blockRef string
+ expected interface{}
+ wantErr bool
+ }{
+ {
+ name: "block hash wraps to EIP-1898",
+ blockRef: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ expected: map[string]interface{}{"blockHash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"},
+ wantErr: false,
+ },
+ {
+ name: "hex block number stays as-is",
+ blockRef: "0x10",
+ expected: "0x10",
+ wantErr: false,
+ },
+ {
+ name: "decimal block number converts to hex",
+ blockRef: "16",
+ expected: "0x10",
+ wantErr: false,
+ },
+ {
+ name: "named block tag stays as-is",
+ blockRef: "latest",
+ expected: "latest",
+ wantErr: false,
+ },
+ {
+ name: "finalized stays as-is",
+ blockRef: "finalized",
+ expected: "finalized",
+ wantErr: false,
+ },
+ {
+ name: "safe stays as-is",
+ blockRef: "safe",
+ expected: "safe",
+ wantErr: false,
+ },
+ {
+ name: "empty string becomes latest",
+ blockRef: "",
+ expected: "latest",
+ wantErr: false,
+ },
+ {
+ name: "short hex (not block hash) stays as-is",
+ blockRef: "0xabc123",
+ expected: "0xabc123",
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result, err := blockParamForMulticall(tt.blockRef)
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.expected, result)
+ }
+ })
+ }
+}
+
+func TestSendResult_ContextCancelled_ReleasesResponse(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create a cancelled context
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ entry := &BatchEntry{
+ Ctx: ctx,
+ ResultCh: make(chan BatchResult, 1),
+ }
+
+ // Create a mock response to track release
+ jrr, err := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ require.NoError(t, err)
+ resp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ // sendResult should return false because context is cancelled
+ sent := batcher.sendResult(entry, BatchResult{Response: resp}, "test-project", "evm:1")
+ require.False(t, sent, "sendResult should return false for cancelled context")
+
+ // ResultCh should be empty since the result was not sent
+ select {
+ case <-entry.ResultCh:
+ t.Fatal("result should not have been sent to channel")
+ default:
+ // Expected - channel is empty
+ }
+}
+
+func TestBatcher_SendResult_SuccessfulDelivery(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create a valid (non-cancelled) context
+ ctx := context.Background()
+
+ entry := &BatchEntry{
+ Ctx: ctx,
+ ResultCh: make(chan BatchResult, 1),
+ }
+
+ // Create a mock response
+ jrr, err := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ require.NoError(t, err)
+ resp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ // sendResult should return true for successful delivery
+ sent := batcher.sendResult(entry, BatchResult{Response: resp}, "test-project", "evm:1")
+ require.True(t, sent, "sendResult should return true for successful delivery")
+
+ // ResultCh should have the result
+ select {
+ case result := <-entry.ResultCh:
+ require.NotNil(t, result.Response)
+ require.NoError(t, result.Error)
+ default:
+ t.Fatal("result should have been sent to channel")
+ }
+}
+
+func TestBatcher_DeliverError_SkipsCancelledContexts(t *testing.T) {
+ forwarder := &mockForwarder{}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Create one cancelled and one active context
+ cancelledCtx, cancel := context.WithCancel(context.Background())
+ cancel()
+ activeCtx := context.Background()
+
+ entries := []*BatchEntry{
+ {Ctx: cancelledCtx, ResultCh: make(chan BatchResult, 1)},
+ {Ctx: activeCtx, ResultCh: make(chan BatchResult, 1)},
+ }
+
+ testErr := fmt.Errorf("test error")
+ batcher.deliverError(entries, testErr, "test-project", "evm:1")
+
+ // Cancelled context entry should not receive the error
+ select {
+ case <-entries[0].ResultCh:
+ t.Fatal("cancelled entry should not receive result")
+ default:
+ // Expected
+ }
+
+ // Active context entry should receive the error
+ select {
+ case result := <-entries[1].ResultCh:
+ require.Error(t, result.Error)
+ require.Equal(t, testErr, result.Error)
+ default:
+ t.Fatal("active entry should receive error")
+ }
+}
+
+func TestBatcher_MultipleDeadlinesPickEarliest(t *testing.T) {
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: true, ReturnData: []byte{0xbb}},
+ }
+ encodedResult := encodeAggregate3Results(results)
+ resultHex := "0x" + hex.EncodeToString(encodedResult)
+
+ jrr, err := common.NewJsonRpcResponse(nil, resultHex, nil)
+ require.NoError(t, err)
+ mockResp := common.NewNormalizedResponse().WithJsonRpcResponse(jrr)
+
+ forwarder := &mockForwarder{response: mockResp}
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 100,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ // First request with a later deadline
+ laterDeadline := time.Now().Add(200 * time.Millisecond)
+ ctx1, cancel1 := context.WithDeadline(context.Background(), laterDeadline)
+ defer cancel1()
+
+ jrq1 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x1111111111111111111111111111111111111111", "data": "0x01"},
+ "latest",
+ })
+ req1 := common.NewNormalizedRequestFromJsonRpcRequest(jrq1)
+
+ entry1, bypass1, err := batcher.Enqueue(ctx1, key, req1)
+ require.NoError(t, err)
+ require.False(t, bypass1)
+
+ // Get initial flush time
+ batcher.mu.Lock()
+ batch := batcher.batches[key.String()]
+ batcher.mu.Unlock()
+ require.NotNil(t, batch)
+ batch.mu.Lock()
+ initialFlushTime := batch.FlushTime
+ batch.mu.Unlock()
+
+ // Second request with an earlier deadline (should update flush time)
+ earlierDeadline := time.Now().Add(50 * time.Millisecond)
+ ctx2, cancel2 := context.WithDeadline(context.Background(), earlierDeadline)
+ defer cancel2()
+
+ jrq2 := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": "0x2222222222222222222222222222222222222222", "data": "0x02"},
+ "latest",
+ })
+ req2 := common.NewNormalizedRequestFromJsonRpcRequest(jrq2)
+
+ entry2, bypass2, err := batcher.Enqueue(ctx2, key, req2)
+ require.NoError(t, err)
+ require.False(t, bypass2)
+
+ // Check that flush time was updated to the earlier deadline
+ batch.mu.Lock()
+ updatedFlushTime := batch.FlushTime
+ batch.mu.Unlock()
+
+ require.True(t, updatedFlushTime.Before(initialFlushTime), "flush time should be updated to earlier deadline")
+
+ // Wait for both results
+ select {
+ case result1 := <-entry1.ResultCh:
+ require.NoError(t, result1.Error)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for entry1 result")
+ }
+
+ select {
+ case result2 := <-entry2.ResultCh:
+ require.NoError(t, result2.Error)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for entry2 result")
+ }
+}
+
+// dynamicMockForwarder allows returning different responses based on call count
+type dynamicMockForwarder struct {
+ mu sync.Mutex
+ calls []*common.NormalizedRequest
+ responses []*common.NormalizedResponse
+ errors []error
+}
+
+func (m *dynamicMockForwarder) Forward(ctx context.Context, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ callIndex := len(m.calls)
+ m.calls = append(m.calls, req)
+
+ if callIndex < len(m.responses) {
+ return m.responses[callIndex], m.errors[callIndex]
+ }
+
+ // Default: return last response/error
+ if len(m.responses) > 0 {
+ return m.responses[len(m.responses)-1], m.errors[len(m.errors)-1]
+ }
+ return nil, fmt.Errorf("no response configured")
+}
+
+func (m *dynamicMockForwarder) SetCache(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) error {
+ return nil
+}
+
+func (m *dynamicMockForwarder) CallCount() int {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return len(m.calls)
+}
+
+func TestBatcher_RuntimeBypass_Methods(t *testing.T) {
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Test address normalization
+ addr1 := "0x1111111111111111111111111111111111111111"
+ addr1Normalized := "1111111111111111111111111111111111111111"
+
+ // Initially not bypassed
+ require.False(t, batcher.isRuntimeBypassed(addr1Normalized))
+ require.False(t, batcher.IsRuntimeBypassed(addr1))
+
+ // Add to runtime bypass
+ batcher.addRuntimeBypass(addr1Normalized, "test-project", "evm:1")
+
+ // Now should be bypassed
+ require.True(t, batcher.isRuntimeBypassed(addr1Normalized))
+ require.True(t, batcher.IsRuntimeBypassed(addr1))
+
+ // Test case insensitivity
+ require.True(t, batcher.IsRuntimeBypassed("0x1111111111111111111111111111111111111111"))
+ require.True(t, batcher.IsRuntimeBypassed("0X1111111111111111111111111111111111111111"))
+
+ // Different address should not be bypassed
+ require.False(t, batcher.IsRuntimeBypassed("0x2222222222222222222222222222222222222222"))
+}
+
+func TestBatcher_AutoDetectBypass_DetectsRevertingContract(t *testing.T) {
+ // Create a multicall response where one call reverts
+ revertedResults := []Multicall3Result{
+ {Success: false, ReturnData: []byte{0x08, 0xc3, 0x79, 0xa0}}, // execution reverted
+ }
+ encodedRevert := encodeAggregate3Results(revertedResults)
+ revertResultHex := "0x" + hex.EncodeToString(encodedRevert)
+
+ multicallResp, err := common.NewJsonRpcResponse(nil, revertResultHex, nil)
+ require.NoError(t, err)
+ mockMulticallResp := common.NewNormalizedResponse().WithJsonRpcResponse(multicallResp)
+
+ // Create success response for individual call
+ individualResp, err := common.NewJsonRpcResponse(nil, "0xdeadbeef", nil)
+ require.NoError(t, err)
+ mockIndividualResp := common.NewNormalizedResponse().WithJsonRpcResponse(individualResp)
+
+ forwarder := &dynamicMockForwarder{
+ responses: []*common.NormalizedResponse{mockMulticallResp, mockIndividualResp},
+ errors: []error{nil, nil},
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: true, // Enable auto-detect
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ targetAddr := "0x1111111111111111111111111111111111111111"
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": targetAddr, "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ // Verify not runtime bypassed initially
+ require.False(t, batcher.IsRuntimeBypassed(targetAddr))
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for result
+ select {
+ case result := <-entry.ResultCh:
+ // Should succeed because the individual retry succeeded
+ require.NoError(t, result.Error)
+ require.NotNil(t, result.Response)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result")
+ }
+
+ // Verify contract was added to runtime bypass
+ require.True(t, batcher.IsRuntimeBypassed(targetAddr))
+
+ // Verify two calls were made (multicall + individual retry)
+ require.Equal(t, 2, forwarder.CallCount())
+}
+
+func TestBatcher_AutoDetectBypass_Disabled(t *testing.T) {
+ // Create a multicall response where one call reverts
+ revertedResults := []Multicall3Result{
+ {Success: false, ReturnData: []byte{0x08, 0xc3, 0x79, 0xa0}}, // execution reverted
+ }
+ encodedRevert := encodeAggregate3Results(revertedResults)
+ revertResultHex := "0x" + hex.EncodeToString(encodedRevert)
+
+ multicallResp, err := common.NewJsonRpcResponse(nil, revertResultHex, nil)
+ require.NoError(t, err)
+ mockMulticallResp := common.NewNormalizedResponse().WithJsonRpcResponse(multicallResp)
+
+ forwarder := &dynamicMockForwarder{
+ responses: []*common.NormalizedResponse{mockMulticallResp},
+ errors: []error{nil},
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: false, // Disabled
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ targetAddr := "0x1111111111111111111111111111111111111111"
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": targetAddr, "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for result
+ select {
+ case result := <-entry.ResultCh:
+ // Should fail with execution reverted error (no retry since AutoDetectBypass is false)
+ require.Error(t, result.Error)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result")
+ }
+
+ // Verify contract was NOT added to runtime bypass
+ require.False(t, batcher.IsRuntimeBypassed(targetAddr))
+
+ // Verify only one call was made (no retry)
+ require.Equal(t, 1, forwarder.CallCount())
+}
+
+func TestBatcher_AutoDetectBypass_SameErrorNoBypass(t *testing.T) {
+ // Create a multicall response where one call reverts
+ revertedResults := []Multicall3Result{
+ {Success: false, ReturnData: []byte{0x08, 0xc3, 0x79, 0xa0}}, // execution reverted
+ }
+ encodedRevert := encodeAggregate3Results(revertedResults)
+ revertResultHex := "0x" + hex.EncodeToString(encodedRevert)
+
+ multicallResp, err := common.NewJsonRpcResponse(nil, revertResultHex, nil)
+ require.NoError(t, err)
+ mockMulticallResp := common.NewNormalizedResponse().WithJsonRpcResponse(multicallResp)
+
+ // Individual retry also fails with an error
+ individualErr := fmt.Errorf("execution reverted")
+
+ forwarder := &dynamicMockForwarder{
+ responses: []*common.NormalizedResponse{mockMulticallResp, nil},
+ errors: []error{nil, individualErr},
+ }
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: true, // Enabled
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ targetAddr := "0x1111111111111111111111111111111111111111"
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": targetAddr, "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.False(t, bypass)
+
+ // Wait for result
+ select {
+ case result := <-entry.ResultCh:
+ // Should fail (both multicall and individual call failed)
+ require.Error(t, result.Error)
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for result")
+ }
+
+ // Verify contract was NOT added to runtime bypass (it fails both ways)
+ require.False(t, batcher.IsRuntimeBypassed(targetAddr))
+
+ // Verify two calls were made (multicall + individual retry)
+ require.Equal(t, 2, forwarder.CallCount())
+}
+
+func TestBatcher_RuntimeBypass_SkipsEnqueue(t *testing.T) {
+ // Test that once a contract is in runtime bypass, Enqueue returns bypass=true
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ CachePerCall: util.BoolPtr(false),
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Pre-populate runtime bypass
+ targetAddr := "0x1111111111111111111111111111111111111111"
+ targetAddrNormalized := "1111111111111111111111111111111111111111"
+ batcher.addRuntimeBypass(targetAddrNormalized, "test-project", "evm:1")
+
+ ctx := context.Background()
+ key := BatchingKey{
+ ProjectId: "test-project",
+ NetworkId: "evm:1",
+ BlockRef: "latest",
+ DirectivesKey: DeriveDirectivesKey(nil),
+ }
+
+ jrq := common.NewJsonRpcRequest("eth_call", []interface{}{
+ map[string]interface{}{"to": targetAddr, "data": "0x01"},
+ "latest",
+ })
+ req := common.NewNormalizedRequestFromJsonRpcRequest(jrq)
+
+ // Enqueue should return bypass=true
+ entry, bypass, err := batcher.Enqueue(ctx, key, req)
+ require.NoError(t, err)
+ require.True(t, bypass, "should bypass for runtime-bypassed contract")
+ require.Nil(t, entry, "entry should be nil when bypassing")
+}
+
+func TestBatcher_RuntimeBypass_ConcurrentAccess(t *testing.T) {
+ // Test that concurrent reads and writes to the runtime bypass cache are safe
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ AutoDetectBypass: true,
+ WindowMs: 50,
+ MinWaitMs: 5,
+ SafetyMarginMs: 5,
+ MaxCalls: 10,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+ batcher := NewBatcher(cfg, forwarder, nil)
+ require.NotNil(t, batcher)
+ defer batcher.Shutdown()
+
+ // Concurrent writes to the same address
+ var wg sync.WaitGroup
+ addr := "1111111111111111111111111111111111111111"
+
+ // Start 50 goroutines that add the same address
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ batcher.addRuntimeBypass(addr, "test-project", "evm:1")
+ }()
+ }
+
+ // Start 50 goroutines that read the address
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _ = batcher.isRuntimeBypassed(addr)
+ }()
+ }
+
+ // Start 50 goroutines that add different addresses
+ for i := 0; i < 50; i++ {
+ wg.Add(1)
+ go func(idx int) {
+ defer wg.Done()
+ uniqueAddr := fmt.Sprintf("%040d", idx)
+ batcher.addRuntimeBypass(uniqueAddr, "test-project", "evm:1")
+ }(i)
+ }
+
+ wg.Wait()
+
+ // Verify the shared address is bypassed
+ require.True(t, batcher.isRuntimeBypassed(addr))
+
+ // Verify all unique addresses are bypassed
+ for i := 0; i < 50; i++ {
+ uniqueAddr := fmt.Sprintf("%040d", i)
+ require.True(t, batcher.isRuntimeBypassed(uniqueAddr), "unique address %d should be bypassed", i)
+ }
+}
diff --git a/architecture/evm/multicall3_manager.go b/architecture/evm/multicall3_manager.go
new file mode 100644
index 000000000..44a49b05f
--- /dev/null
+++ b/architecture/evm/multicall3_manager.go
@@ -0,0 +1,77 @@
+package evm
+
+import (
+ "sync"
+
+ "github.com/erpc/erpc/common"
+ "github.com/rs/zerolog"
+)
+
+// BatcherManager manages per-project+network Multicall3 batchers.
+// It provides thread-safe access to batchers keyed by "projectId|networkId".
+// Each batcher handles batching for a specific project and network combination
+// to ensure proper isolation between projects.
+type BatcherManager struct {
+ batchers map[string]*Batcher // Key: "projectId|networkId"
+ mu sync.RWMutex
+}
+
+// NewBatcherManager creates a new batcher manager.
+func NewBatcherManager() *BatcherManager {
+ return &BatcherManager{
+ batchers: make(map[string]*Batcher),
+ }
+}
+
+// GetOrCreate returns the batcher for a project+network, creating one if needed.
+// The key combines projectId and networkId to ensure project isolation.
+// Returns nil if batching is disabled (cfg is nil or cfg.Enabled is false).
+// The logger parameter is optional (can be nil) - if nil, debug logging is disabled.
+func (m *BatcherManager) GetOrCreate(projectId, networkId string, cfg *common.Multicall3AggregationConfig, forwarder Forwarder, logger *zerolog.Logger) *Batcher {
+ // Use null byte separator to prevent key collisions from field values containing common separators
+ key := projectId + "\x00" + networkId
+
+ m.mu.RLock()
+ if b, ok := m.batchers[key]; ok {
+ m.mu.RUnlock()
+ return b
+ }
+ m.mu.RUnlock()
+
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ // Double-check after acquiring write lock
+ if b, ok := m.batchers[key]; ok {
+ return b
+ }
+
+ batcher := NewBatcher(cfg, forwarder, logger)
+ if batcher == nil {
+ // Don't store nil batchers - batching is disabled for this config
+ return nil
+ }
+ m.batchers[key] = batcher
+ return batcher
+}
+
+// Get returns the batcher for a project+network, or nil if not exists.
+func (m *BatcherManager) Get(projectId, networkId string) *Batcher {
+ key := projectId + "\x00" + networkId
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ return m.batchers[key]
+}
+
+// Shutdown stops all batchers.
+func (m *BatcherManager) Shutdown() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ for _, b := range m.batchers {
+ if b != nil {
+ b.Shutdown()
+ }
+ }
+ m.batchers = make(map[string]*Batcher)
+}
diff --git a/architecture/evm/multicall3_manager_test.go b/architecture/evm/multicall3_manager_test.go
new file mode 100644
index 000000000..be01b2110
--- /dev/null
+++ b/architecture/evm/multicall3_manager_test.go
@@ -0,0 +1,143 @@
+package evm
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/util"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBatcherManagerGetOrCreate(t *testing.T) {
+ mgr := NewBatcherManager()
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 100,
+ MaxPendingBatches: 20,
+ AllowCrossUserBatching: util.BoolPtr(true),
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+
+ // Get batcher for project+network
+ batcher1 := mgr.GetOrCreate("project1", "evm:1", cfg, forwarder, nil)
+ require.NotNil(t, batcher1)
+
+ // Same project+network should return same batcher
+ batcher2 := mgr.GetOrCreate("project1", "evm:1", cfg, forwarder, nil)
+ require.Same(t, batcher1, batcher2)
+
+ // Different network (same project) should return different batcher
+ batcher3 := mgr.GetOrCreate("project1", "evm:137", cfg, forwarder, nil)
+ require.NotSame(t, batcher1, batcher3)
+
+ // Different project (same network) should return different batcher
+ batcher4 := mgr.GetOrCreate("project2", "evm:1", cfg, forwarder, nil)
+ require.NotSame(t, batcher1, batcher4)
+
+ mgr.Shutdown()
+}
+
+func TestBatcherManagerConcurrency(t *testing.T) {
+ mgr := NewBatcherManager()
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+
+ var wg sync.WaitGroup
+ batchers := make([]*Batcher, 100)
+
+ // Concurrent access
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(idx int) {
+ defer wg.Done()
+ batchers[idx] = mgr.GetOrCreate("project1", "evm:1", cfg, forwarder, nil)
+ }(i)
+ }
+ wg.Wait()
+
+ // All should be the same batcher
+ for i := 1; i < 100; i++ {
+ require.Same(t, batchers[0], batchers[i])
+ }
+
+ mgr.Shutdown()
+}
+
+func TestBatcherManagerGet(t *testing.T) {
+ mgr := NewBatcherManager()
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+
+ // Get before create should return nil
+ batcher := mgr.Get("project1", "evm:1")
+ require.Nil(t, batcher)
+
+ // Create batcher
+ created := mgr.GetOrCreate("project1", "evm:1", cfg, forwarder, nil)
+ require.NotNil(t, created)
+
+ // Get after create should return the same batcher
+ retrieved := mgr.Get("project1", "evm:1")
+ require.Same(t, created, retrieved)
+
+ // Get for different network should return nil
+ other := mgr.Get("project1", "evm:137")
+ require.Nil(t, other)
+
+ // Get for different project should return nil
+ otherProject := mgr.Get("project2", "evm:1")
+ require.Nil(t, otherProject)
+
+ mgr.Shutdown()
+}
+
+func TestBatcherManagerShutdown(t *testing.T) {
+ mgr := NewBatcherManager()
+
+ cfg := &common.Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ }
+ cfg.SetDefaults()
+
+ forwarder := &mockForwarder{}
+
+ // Create multiple batchers across projects and networks
+ mgr.GetOrCreate("project1", "evm:1", cfg, forwarder, nil)
+ mgr.GetOrCreate("project1", "evm:137", cfg, forwarder, nil)
+ mgr.GetOrCreate("project2", "evm:1", cfg, forwarder, nil)
+
+ // Shutdown should clean up all batchers
+ mgr.Shutdown()
+
+ // After shutdown, batchers map should be empty
+ require.Nil(t, mgr.Get("project1", "evm:1"))
+ require.Nil(t, mgr.Get("project1", "evm:137"))
+ require.Nil(t, mgr.Get("project2", "evm:1"))
+}
diff --git a/architecture/evm/multicall3_test.go b/architecture/evm/multicall3_test.go
new file mode 100644
index 000000000..e1875d969
--- /dev/null
+++ b/architecture/evm/multicall3_test.go
@@ -0,0 +1,582 @@
+package evm
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/erpc/erpc/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSafeUint64ToInt(t *testing.T) {
+ tests := []struct {
+ name string
+ input uint64
+ want int
+ wantErr bool
+ }{
+ {
+ name: "zero",
+ input: 0,
+ want: 0,
+ wantErr: false,
+ },
+ {
+ name: "small positive",
+ input: 100,
+ want: 100,
+ wantErr: false,
+ },
+ {
+ name: "max int boundary",
+ input: uint64(math.MaxInt),
+ want: math.MaxInt,
+ wantErr: false,
+ },
+ {
+ name: "overflow - max int plus one",
+ input: uint64(math.MaxInt) + 1,
+ want: 0,
+ wantErr: true,
+ },
+ {
+ name: "overflow - max uint64",
+ input: math.MaxUint64,
+ want: 0,
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := safeUint64ToInt(tt.input)
+ if tt.wantErr {
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "overflow")
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestNormalizeBlockParam(t *testing.T) {
+ cases := []struct {
+ name string
+ param interface{}
+ want string
+ wantErr bool
+ }{
+ {
+ name: "nil",
+ param: nil,
+ want: "latest",
+ },
+ {
+ name: "hex number",
+ param: "0x10",
+ want: "16",
+ },
+ {
+ name: "tag",
+ param: "latest",
+ want: "latest",
+ },
+ {
+ name: "block hash",
+ param: "0x" + strings.Repeat("ab", 32),
+ want: "0x" + strings.Repeat("ab", 32),
+ },
+ {
+ name: "block hash object",
+ param: map[string]interface{}{"blockHash": "0x" + strings.Repeat("cd", 32)},
+ want: "0x" + strings.Repeat("cd", 32),
+ },
+ {
+ name: "block number object",
+ param: map[string]interface{}{"blockNumber": "0x2"},
+ want: "2",
+ },
+ {
+ name: "block tag object",
+ param: map[string]interface{}{"blockTag": "pending"},
+ want: "pending",
+ },
+ {
+ name: "empty string",
+ param: "",
+ wantErr: true,
+ },
+ {
+ name: "invalid type",
+ param: []int{1},
+ wantErr: true,
+ },
+ {
+ name: "invalid hex",
+ param: "0xzz",
+ wantErr: true,
+ },
+ {
+ name: "invalid block hash",
+ param: map[string]interface{}{"blockHash": "0x" + strings.Repeat("ab", 33)},
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := NormalizeBlockParam(tt.param)
+ if tt.wantErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestBuildMulticall3Request_Success(t *testing.T) {
+ call1 := map[string]interface{}{
+ "to": hexAddr(1),
+ "data": hexData(1),
+ }
+ call2 := map[string]interface{}{
+ "to": hexAddr(2),
+ "input": hexData(32),
+ }
+
+ req1 := newEthCallRequest(t, 1, call1, "latest")
+ req2 := newEthCallRequest(t, "req-2", call2, "latest")
+ req1.SetDirectives(&common.RequestDirectives{SkipCacheRead: true})
+ req1.SetUser(&common.User{Id: "user-1"})
+
+ mcReq, calls, err := BuildMulticall3Request([]*common.NormalizedRequest{req1, req2}, nil)
+ require.NoError(t, err)
+ require.Len(t, calls, 2)
+ require.NotNil(t, mcReq)
+
+ mcID, ok := mcReq.ID().(string)
+ require.True(t, ok)
+ assert.True(t, strings.HasPrefix(mcID, "multicall3-"))
+
+ require.NotNil(t, mcReq.User())
+ assert.Equal(t, "user-1", mcReq.User().Id)
+ require.NotNil(t, mcReq.Directives())
+ assert.True(t, mcReq.Directives().SkipCacheRead)
+
+ jrq, err := mcReq.JsonRpcRequest()
+ require.NoError(t, err)
+ require.NotNil(t, jrq)
+ assert.Equal(t, "eth_call", jrq.Method)
+ require.Len(t, jrq.Params, 2)
+
+ callObj, ok := jrq.Params[0].(map[string]interface{})
+ require.True(t, ok)
+ assert.Equal(t, multicall3Address, callObj["to"])
+
+ encodedCalls, err := encodeAggregate3Calls(calls)
+ require.NoError(t, err)
+ dataHex, ok := callObj["data"].(string)
+ require.True(t, ok)
+ assert.Equal(t, "0x"+fmt.Sprintf("%x", encodedCalls), dataHex)
+ assert.Equal(t, "latest", jrq.Params[1])
+
+ assert.Equal(t, 20, len(calls[0].Target))
+ assert.Equal(t, 1, len(calls[0].CallData))
+ assert.Equal(t, 32, len(calls[1].CallData))
+}
+
+func TestBuildMulticall3Request_LargeBatchOrder(t *testing.T) {
+ requests := make([]*common.NormalizedRequest, 0, 12)
+ for i := 0; i < 12; i++ {
+ callObj := map[string]interface{}{
+ "to": hexAddr(i + 1),
+ "data": hexData(i + 1),
+ }
+ requests = append(requests, newEthCallRequest(t, i+1, callObj, "latest"))
+ }
+
+ _, calls, err := BuildMulticall3Request(requests, "latest")
+ require.NoError(t, err)
+ require.Len(t, calls, len(requests))
+
+ for i, call := range calls {
+ assert.Same(t, requests[i], call.Request)
+ wantTarget, err := common.HexToBytes(hexAddr(i + 1))
+ require.NoError(t, err)
+ assert.Equal(t, wantTarget, call.Target)
+ wantData, err := common.HexToBytes(hexData(i + 1))
+ require.NoError(t, err)
+ assert.Equal(t, wantData, call.CallData)
+ }
+}
+
+func TestBuildMulticall3Request_Errors(t *testing.T) {
+ validCall := map[string]interface{}{
+ "to": hexAddr(3),
+ "data": "0x",
+ }
+
+ cases := []struct {
+ name string
+ requests []*common.NormalizedRequest
+ eligibleErr bool
+ unexpectedWrap bool
+ }{
+ {
+ name: "no requests",
+ requests: []*common.NormalizedRequest{},
+ eligibleErr: true,
+ },
+ {
+ name: "nil request",
+ requests: []*common.NormalizedRequest{nil},
+ eligibleErr: true,
+ },
+ {
+ name: "invalid json",
+ requests: []*common.NormalizedRequest{common.NewNormalizedRequest([]byte("{"))},
+ unexpectedWrap: true,
+ },
+ {
+ name: "wrong method",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_getBalance", []interface{}{hexAddr(1)}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "missing params",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "too many params",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{validCall, "latest", "extra"}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "call obj not map",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{123}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "missing to",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"data": "0x"}}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "data not string",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"to": hexAddr(1), "data": 1}}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "input not string",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"to": hexAddr(1), "input": 1}}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "extra key",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"to": hexAddr(1), "data": "0x", "value": "0x1"}}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "invalid to length",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"to": "0x1234", "data": "0x"}}, 1)},
+ eligibleErr: true,
+ },
+ {
+ name: "invalid data hex",
+ requests: []*common.NormalizedRequest{newJsonRpcRequest(t, "eth_call", []interface{}{map[string]interface{}{"to": hexAddr(1), "data": "0xzz"}}, 1)},
+ eligibleErr: true,
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ _, _, err := BuildMulticall3Request(tt.requests, "latest")
+ require.Error(t, err)
+ if tt.eligibleErr {
+ assert.ErrorIs(t, err, ErrMulticall3BatchNotEligible)
+ }
+ if tt.unexpectedWrap {
+ assert.False(t, errors.Is(err, ErrMulticall3BatchNotEligible))
+ }
+ })
+ }
+}
+
+func TestNewMulticall3Call(t *testing.T) {
+ req := newEthCallRequest(t, 1, map[string]interface{}{"to": hexAddr(1)}, "latest")
+
+ call, err := NewMulticall3Call(req, hexAddr(1), "0x")
+ require.NoError(t, err)
+ assert.Equal(t, req, call.Request)
+ assert.Len(t, call.Target, 20)
+ assert.Empty(t, call.CallData)
+
+ _, err = NewMulticall3Call(nil, hexAddr(1), "0x")
+ assert.ErrorIs(t, err, ErrMulticall3BatchNotEligible)
+
+ _, err = NewMulticall3Call(req, "0x1234", "0x")
+ assert.ErrorIs(t, err, ErrMulticall3BatchNotEligible)
+
+ _, err = NewMulticall3Call(req, hexAddr(1), "0xzz")
+ assert.ErrorIs(t, err, ErrMulticall3BatchNotEligible)
+}
+
+func TestDecodeMulticall3Aggregate3Result(t *testing.T) {
+ results := []Multicall3Result{
+ {Success: true, ReturnData: []byte{0x01, 0x02}},
+ {Success: false, ReturnData: nil},
+ }
+
+ encoded := encodeAggregate3Results(results)
+ decoded, err := DecodeMulticall3Aggregate3Result(encoded)
+ require.NoError(t, err)
+ assert.Equal(t, results, decoded)
+
+ emptyEncoded := encodeAggregate3Results(nil)
+ emptyDecoded, err := DecodeMulticall3Aggregate3Result(emptyEncoded)
+ require.NoError(t, err)
+ assert.Empty(t, emptyDecoded)
+}
+
+func TestDecodeMulticall3Aggregate3Result_Errors(t *testing.T) {
+ cases := []struct {
+ name string
+ data []byte
+ }{
+ {
+ name: "too short",
+ data: []byte{0x01},
+ },
+ {
+ name: "offset out of bounds",
+ data: encodeUint64(64),
+ },
+ {
+ name: "offsets out of bounds",
+ data: append(encodeUint64(32), encodeUint64(2)...),
+ },
+ {
+ name: "count exceeds available data",
+ // Create data with huge count that would overflow if multiplied by 32
+ // offset=32, then count=maxInt/16 (which when *32 would overflow)
+ data: func() []byte {
+ d := make([]byte, 96)
+ copy(d[0:32], encodeUint64(32)) // offset to array
+ copy(d[32:64], encodeUint64(0x7FFFFFFF)) // huge count that exceeds data
+ return d
+ }(),
+ },
+ {
+ name: "element out of bounds",
+ data: buildAggregate3ResultWithOffset(96, nil, nil),
+ },
+ {
+ name: "bytes offset out of bounds",
+ data: buildAggregate3ResultWithElement(64, encodeBool(true), encodeUint64(256)),
+ },
+ {
+ name: "bytes length out of bounds",
+ data: buildAggregate3ResultBytesLengthOutOfBounds(),
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := DecodeMulticall3Aggregate3Result(tt.data)
+ require.Error(t, err)
+ })
+ }
+
+ _, err := readUint256([]byte{0x01})
+ require.Error(t, err)
+
+ overflow := make([]byte, 32)
+ overflow[0] = 1
+ _, err = readUint256(overflow)
+ require.Error(t, err)
+
+ _, err = readBool([]byte{0x01})
+ require.Error(t, err)
+}
+
+func TestShouldFallbackMulticall3(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ want bool
+ }{
+ {
+ name: "nil error",
+ err: nil,
+ want: false,
+ },
+ {
+ name: "unsupported endpoint",
+ err: common.NewErrEndpointUnsupported(errors.New("boom")),
+ want: true,
+ },
+ {
+ name: "execution exception with contract not found",
+ err: common.NewErrEndpointExecutionException(errors.New("contract not found")),
+ want: true,
+ },
+ {
+ name: "execution exception with no code at address",
+ err: common.NewErrEndpointExecutionException(errors.New("no code at address 0x123")),
+ want: true,
+ },
+ {
+ name: "execution exception with execution reverted - no fallback",
+ err: common.NewErrEndpointExecutionException(errors.New("execution reverted")),
+ want: false, // Generic reverts should NOT trigger fallback - they would also revert individually
+ },
+ {
+ name: "execution exception with code is empty",
+ err: common.NewErrEndpointExecutionException(errors.New("code is empty at 0xCA11bde05977b3631167028862bE2a173976CA11")),
+ want: true,
+ },
+ {
+ name: "execution exception with missing trie node",
+ err: common.NewErrEndpointExecutionException(errors.New("missing trie node abc123")),
+ want: true,
+ },
+ {
+ name: "execution exception with account not found",
+ err: common.NewErrEndpointExecutionException(errors.New("account not found")),
+ want: true,
+ },
+ {
+ name: "execution exception with generic error - no fallback",
+ err: common.NewErrEndpointExecutionException(errors.New("some other error")),
+ want: false,
+ },
+ {
+ name: "execution exception with rate limit - no fallback",
+ err: common.NewErrEndpointExecutionException(errors.New("rate limited")),
+ want: false,
+ },
+ {
+ name: "non-execution error",
+ err: errors.New("nope"),
+ want: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := ShouldFallbackMulticall3(tt.err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func encodeAggregate3Results(results []Multicall3Result) []byte {
+ // Offsets are relative to start of array content (after length word),
+ // so the offset table size is just N*32 (not including the length word)
+ offsetTableSize := 32 * len(results)
+ offsets := make([]uint64, len(results))
+ elems := make([][]byte, len(results))
+ cur := uint64(offsetTableSize)
+
+ for i, res := range results {
+ elems[i] = encodeAggregate3ResultElement(res)
+ offsets[i] = cur
+ cur += uint64(len(elems[i]))
+ }
+
+ array := make([]byte, 0, int(cur))
+ array = append(array, encodeUint64(uint64(len(results)))...)
+ for _, off := range offsets {
+ array = append(array, encodeUint64(off)...)
+ }
+ for _, elem := range elems {
+ array = append(array, elem...)
+ }
+
+ out := make([]byte, 0, 32+len(array))
+ out = append(out, encodeUint64(32)...)
+ out = append(out, array...)
+ return out
+}
+
+func encodeAggregate3ResultElement(result Multicall3Result) []byte {
+ head := make([]byte, 0, 64)
+ head = append(head, encodeBool(result.Success)...)
+ head = append(head, encodeUint64(64)...)
+ tail := encodeBytes(result.ReturnData)
+ return append(head, tail...)
+}
+
+func buildAggregate3ResultWithOffset(offset uint64, count []byte, elemOffset []byte) []byte {
+ data := make([]byte, 96)
+ copy(data, encodeUint64(32))
+ if count != nil {
+ copy(data[32:], count)
+ } else {
+ copy(data[32:], encodeUint64(1))
+ }
+ if elemOffset != nil {
+ copy(data[64:], elemOffset)
+ } else {
+ copy(data[64:], encodeUint64(offset))
+ }
+ return data
+}
+
+func buildAggregate3ResultWithElement(elemOffset uint64, head ...[]byte) []byte {
+ data := make([]byte, 160)
+ copy(data, encodeUint64(32))
+ copy(data[32:], encodeUint64(1))
+ copy(data[64:], encodeUint64(elemOffset))
+ copy(data[96:], head[0])
+ copy(data[128:], head[1])
+ return data
+}
+
+func buildAggregate3ResultBytesLengthOutOfBounds() []byte {
+ data := make([]byte, 192)
+ copy(data, encodeUint64(32))
+ copy(data[32:], encodeUint64(1))
+ copy(data[64:], encodeUint64(64))
+ copy(data[96:], encodeBool(true))
+ copy(data[128:], encodeUint64(64))
+ copy(data[160:], encodeUint64(128))
+ return data
+}
+
+func hexAddr(n int) string {
+ return fmt.Sprintf("0x%040x", n)
+}
+
+func hexData(size int) string {
+ return "0x" + strings.Repeat("11", size)
+}
+
+func newEthCallRequest(t *testing.T, id interface{}, callObj map[string]interface{}, blockParam interface{}) *common.NormalizedRequest {
+ t.Helper()
+ params := []interface{}{callObj}
+ if blockParam != nil {
+ params = append(params, blockParam)
+ }
+ jr := common.NewJsonRpcRequest("eth_call", params)
+ if id != nil {
+ require.NoError(t, jr.SetID(id))
+ }
+ return common.NewNormalizedRequestFromJsonRpcRequest(jr)
+}
+
+func newJsonRpcRequest(t *testing.T, method string, params []interface{}, id interface{}) *common.NormalizedRequest {
+ t.Helper()
+ jr := common.NewJsonRpcRequest(method, params)
+ require.NoError(t, jr.SetID(id))
+ return common.NewNormalizedRequestFromJsonRpcRequest(jr)
+}
diff --git a/common/config.go b/common/config.go
index cc27f4991..15e272499 100644
--- a/common/config.go
+++ b/common/config.go
@@ -2,6 +2,8 @@ package common
import (
"bytes"
+ "encoding/hex"
+ "encoding/json"
"fmt"
"maps"
"os"
@@ -926,13 +928,14 @@ func (c *EvmUpstreamConfig) Copy() *EvmUpstreamConfig {
}
type FailsafeConfig struct {
- MatchMethod string `yaml:"matchMethod,omitempty" json:"matchMethod"`
- MatchFinality []DataFinalityState `yaml:"matchFinality,omitempty" json:"matchFinality"`
- Retry *RetryPolicyConfig `yaml:"retry" json:"retry"`
- CircuitBreaker *CircuitBreakerPolicyConfig `yaml:"circuitBreaker" json:"circuitBreaker"`
- Timeout *TimeoutPolicyConfig `yaml:"timeout" json:"timeout"`
- Hedge *HedgePolicyConfig `yaml:"hedge" json:"hedge"`
- Consensus *ConsensusPolicyConfig `yaml:"consensus" json:"consensus"`
+ MatchMethod string `yaml:"matchMethod,omitempty" json:"matchMethod"`
+ MatchFinality []DataFinalityState `yaml:"matchFinality,omitempty" json:"matchFinality"`
+ MatchUpstreamGroup string `yaml:"matchUpstreamGroup,omitempty" json:"matchUpstreamGroup"`
+ Retry *RetryPolicyConfig `yaml:"retry" json:"retry"`
+ CircuitBreaker *CircuitBreakerPolicyConfig `yaml:"circuitBreaker" json:"circuitBreaker"`
+ Timeout *TimeoutPolicyConfig `yaml:"timeout" json:"timeout"`
+ Hedge *HedgePolicyConfig `yaml:"hedge" json:"hedge"`
+ Consensus *ConsensusPolicyConfig `yaml:"consensus" json:"consensus"`
}
func (c *FailsafeConfig) Copy() *FailsafeConfig {
@@ -1548,6 +1551,243 @@ type EvmNetworkConfig struct {
// to work safely with transaction broadcasting.
// Set to false to disable this behavior and return raw upstream errors.
IdempotentTransactionBroadcast *bool `yaml:"idempotentTransactionBroadcast,omitempty" json:"idempotentTransactionBroadcast,omitempty"`
+
+ // Multicall3Aggregation configures aggregating eth_call requests into Multicall3.
+ // Accepts either a boolean (backward compat) or a full config object.
+ // Default: enabled with default settings
+ Multicall3Aggregation *Multicall3AggregationConfig `yaml:"multicall3Aggregation,omitempty" json:"multicall3Aggregation,omitempty"`
+}
+
+// Multicall3AggregationConfig configures network-level batching of eth_call requests
+// into Multicall3 aggregate calls. This batches requests across all entrypoints
+// (HTTP single, HTTP batch, gRPC) rather than just JSON-RPC batch requests.
+type Multicall3AggregationConfig struct {
+ // Enabled enables/disables Multicall3 aggregation. Default: true
+ Enabled bool `yaml:"enabled" json:"enabled"`
+
+ // WindowMs is the maximum time (milliseconds) to wait for a batch to fill.
+ // Default: 25ms
+ WindowMs int `yaml:"windowMs,omitempty" json:"windowMs"`
+
+ // MinWaitMs is the minimum time (milliseconds) to wait for additional requests
+ // to join a batch. Default: 2ms
+ MinWaitMs int `yaml:"minWaitMs,omitempty" json:"minWaitMs"`
+
+ // SafetyMarginMs is subtracted from request deadlines when computing flush time.
+ // Default: min(2, MinWaitMs)
+ SafetyMarginMs int `yaml:"safetyMarginMs,omitempty" json:"safetyMarginMs"`
+
+ // OnlyIfPending: if true, don't add latency unless a batch is already open.
+ // Default: false
+ OnlyIfPending bool `yaml:"onlyIfPending,omitempty" json:"onlyIfPending"`
+
+ // MaxCalls is the maximum number of calls per batch. Default: 20
+ MaxCalls int `yaml:"maxCalls,omitempty" json:"maxCalls"`
+
+ // MaxCalldataBytes is the maximum total calldata size per batch. Default: 64000
+ MaxCalldataBytes int `yaml:"maxCalldataBytes,omitempty" json:"maxCalldataBytes"`
+
+ // MaxQueueSize is the maximum total enqueued requests across all batches.
+ // Default: 1000
+ MaxQueueSize int `yaml:"maxQueueSize,omitempty" json:"maxQueueSize"`
+
+ // MaxPendingBatches is the maximum number of distinct batch keys.
+ // Default: 200
+ MaxPendingBatches int `yaml:"maxPendingBatches,omitempty" json:"maxPendingBatches"`
+
+ // CachePerCall enables per-call cache writes after successful Multicall3.
+ // Default: true
+ CachePerCall *bool `yaml:"cachePerCall,omitempty" json:"cachePerCall"`
+
+ // AllowCrossUserBatching: if true, requests from different users can share a batch.
+ // Default: true
+ AllowCrossUserBatching *bool `yaml:"allowCrossUserBatching,omitempty" json:"allowCrossUserBatching"`
+
+ // AllowPendingTagBatching: if true, allow batching calls with "pending" block tag.
+ // Default: false
+ AllowPendingTagBatching bool `yaml:"allowPendingTagBatching,omitempty" json:"allowPendingTagBatching"`
+
+ // AutoDetectBypass: if true, automatically detect contracts that revert when called
+ // via Multicall3 (e.g., contracts checking msg.sender code size). When a call reverts
+ // in a batch but succeeds individually, the contract is added to a runtime bypass cache.
+ // Default: false
+ AutoDetectBypass bool `yaml:"autoDetectBypass,omitempty" json:"autoDetectBypass"`
+
+ // BypassContracts is a list of contract addresses that should NOT be batched via Multicall3.
+ // Use this for contracts that check if msg.sender has code (e.g., Chronicle Oracle feeds)
+ // and revert when called from a contract. Addresses are case-insensitive.
+ // Example: ["0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"]
+ BypassContracts []string `yaml:"bypassContracts,omitempty" json:"bypassContracts,omitempty"`
+
+ // bypassContractsMap is a pre-computed map for O(1) lookups (lowercase addresses without 0x prefix)
+ bypassContractsMap map[string]bool `yaml:"-" json:"-"`
+}
+
+// SetDefaults applies default values to unset fields
+func (c *Multicall3AggregationConfig) SetDefaults() {
+ if c.WindowMs == 0 {
+ c.WindowMs = 25
+ }
+ if c.MinWaitMs == 0 {
+ c.MinWaitMs = 2
+ }
+ if c.SafetyMarginMs == 0 {
+ c.SafetyMarginMs = min(2, c.MinWaitMs)
+ }
+ if c.MaxCalls == 0 {
+ c.MaxCalls = 20
+ }
+ if c.MaxCalldataBytes == 0 {
+ c.MaxCalldataBytes = 64000
+ }
+ if c.MaxQueueSize == 0 {
+ c.MaxQueueSize = 1000
+ }
+ if c.MaxPendingBatches == 0 {
+ c.MaxPendingBatches = 200
+ }
+ if c.CachePerCall == nil {
+ c.CachePerCall = &TRUE
+ }
+ if c.AllowCrossUserBatching == nil {
+ c.AllowCrossUserBatching = &TRUE
+ }
+ // Initialize bypass contracts map for O(1) lookups
+ c.initBypassContractsMap()
+}
+
+// initBypassContractsMap builds the internal map for fast bypass lookups.
+// Addresses are normalized to lowercase without the 0x/0X prefix.
+func (c *Multicall3AggregationConfig) initBypassContractsMap() {
+ if len(c.BypassContracts) == 0 {
+ c.bypassContractsMap = nil
+ return
+ }
+ c.bypassContractsMap = make(map[string]bool, len(c.BypassContracts))
+ for _, addr := range c.BypassContracts {
+ // Normalize: lowercase and remove 0x/0X prefix
+ normalized := strings.ToLower(addr)
+ normalized = strings.TrimPrefix(normalized, "0x")
+ if normalized != "" {
+ c.bypassContractsMap[normalized] = true
+ }
+ }
+}
+
+// ShouldBypassContract checks if the given contract address should bypass multicall3 batching.
+// The address should be the raw 20-byte address (not hex-encoded).
+func (c *Multicall3AggregationConfig) ShouldBypassContract(target []byte) bool {
+ if c.bypassContractsMap == nil || len(target) == 0 {
+ return false
+ }
+ // Convert target bytes to lowercase hex string (without 0x prefix)
+ targetHex := strings.ToLower(hex.EncodeToString(target))
+ return c.bypassContractsMap[targetHex]
+}
+
+// ShouldBypassContractHex checks if the given hex-encoded contract address should bypass multicall3 batching.
+// The address can be with or without the 0x/0X prefix, and is case-insensitive.
+func (c *Multicall3AggregationConfig) ShouldBypassContractHex(targetHex string) bool {
+ if c.bypassContractsMap == nil || targetHex == "" {
+ return false
+ }
+ // Normalize: lowercase and remove 0x/0X prefix
+ normalized := strings.ToLower(targetHex)
+ normalized = strings.TrimPrefix(normalized, "0x")
+ return c.bypassContractsMap[normalized]
+}
+
+// IsValid checks if the config values are valid
+func (c *Multicall3AggregationConfig) IsValid() error {
+ if c.WindowMs <= 0 {
+ return fmt.Errorf("multicall3Aggregation.windowMs must be > 0")
+ }
+ if c.MinWaitMs < 0 {
+ return fmt.Errorf("multicall3Aggregation.minWaitMs must be >= 0")
+ }
+ if c.MinWaitMs > c.WindowMs {
+ return fmt.Errorf("multicall3Aggregation.minWaitMs must be <= windowMs")
+ }
+ if c.MaxCalls <= 1 {
+ return fmt.Errorf("multicall3Aggregation.maxCalls must be > 1")
+ }
+ if c.MaxCalldataBytes <= 0 {
+ return fmt.Errorf("multicall3Aggregation.maxCalldataBytes must be > 0")
+ }
+ if c.MaxQueueSize <= 0 {
+ return fmt.Errorf("multicall3Aggregation.maxQueueSize must be > 0")
+ }
+ if c.MaxPendingBatches <= 0 {
+ return fmt.Errorf("multicall3Aggregation.maxPendingBatches must be > 0")
+ }
+ // Validate bypass contract addresses
+ for _, addr := range c.BypassContracts {
+ normalized := strings.ToLower(strings.TrimPrefix(strings.TrimPrefix(addr, "0x"), "0X"))
+ if normalized == "" {
+ return fmt.Errorf("multicall3Aggregation.bypassContracts contains empty address")
+ }
+ // Ethereum addresses are 20 bytes = 40 hex characters
+ if len(normalized) != 40 {
+ return fmt.Errorf("multicall3Aggregation.bypassContracts contains invalid address %q (expected 40 hex characters, got %d)", addr, len(normalized))
+ }
+ // Validate hex characters
+ for _, c := range normalized {
+ if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
+ return fmt.Errorf("multicall3Aggregation.bypassContracts contains invalid address %q (non-hex character)", addr)
+ }
+ }
+ }
+ return nil
+}
+
+// UnmarshalYAML implements backward compatibility for boolean config values
+func (c *Multicall3AggregationConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ // Try bool first (backward compat)
+ var boolVal bool
+ if err := unmarshal(&boolVal); err == nil {
+ c.Enabled = boolVal
+ if boolVal {
+ c.SetDefaults()
+ }
+ return nil
+ }
+
+ // Try full config
+ type rawConfig Multicall3AggregationConfig
+ var raw rawConfig
+ if err := unmarshal(&raw); err != nil {
+ return err
+ }
+ *c = Multicall3AggregationConfig(raw)
+ if c.Enabled {
+ c.SetDefaults()
+ }
+ return nil
+}
+
+// UnmarshalJSON implements backward compatibility for boolean config values (for TypeScript configs)
+func (c *Multicall3AggregationConfig) UnmarshalJSON(data []byte) error {
+ // Try bool first (backward compat)
+ var boolVal bool
+ if err := json.Unmarshal(data, &boolVal); err == nil {
+ c.Enabled = boolVal
+ if boolVal {
+ c.SetDefaults()
+ }
+ return nil
+ }
+
+ // Try full config
+ type rawConfig Multicall3AggregationConfig
+ var raw rawConfig
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return err
+ }
+ *c = Multicall3AggregationConfig(raw)
+ if c.Enabled {
+ c.SetDefaults()
+ }
+ return nil
}
// EvmIntegrityConfig is deprecated. Use DirectiveDefaultsConfig for validation settings.
diff --git a/common/config_test.go b/common/config_test.go
index 361450ee8..a7a0cd3b6 100644
--- a/common/config_test.go
+++ b/common/config_test.go
@@ -7,6 +7,7 @@ import (
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
@@ -838,3 +839,311 @@ projects:
assert.Equal(t, 5, network.Failsafe[1].Retry.MaxAttempts)
})
}
+
+func TestMulticall3AggregationConfigYAML(t *testing.T) {
+ yamlStr := `
+evm:
+ chainId: 1
+ multicall3Aggregation:
+ enabled: true
+ windowMs: 25
+ minWaitMs: 2
+ safetyMarginMs: 2
+ maxCalls: 20
+ maxCalldataBytes: 64000
+ maxQueueSize: 1000
+ maxPendingBatches: 200
+ cachePerCall: true
+ allowCrossUserBatching: true
+ allowPendingTagBatching: false
+`
+ var cfg NetworkConfig
+ err := yaml.Unmarshal([]byte(yamlStr), &cfg)
+ require.NoError(t, err)
+ require.NotNil(t, cfg.Evm)
+ require.NotNil(t, cfg.Evm.Multicall3Aggregation)
+ require.True(t, cfg.Evm.Multicall3Aggregation.Enabled)
+ require.Equal(t, 25, cfg.Evm.Multicall3Aggregation.WindowMs)
+ require.Equal(t, 2, cfg.Evm.Multicall3Aggregation.MinWaitMs)
+ require.Equal(t, 2, cfg.Evm.Multicall3Aggregation.SafetyMarginMs)
+ require.Equal(t, 20, cfg.Evm.Multicall3Aggregation.MaxCalls)
+ require.Equal(t, 64000, cfg.Evm.Multicall3Aggregation.MaxCalldataBytes)
+ require.Equal(t, 1000, cfg.Evm.Multicall3Aggregation.MaxQueueSize)
+ require.Equal(t, 200, cfg.Evm.Multicall3Aggregation.MaxPendingBatches)
+ require.NotNil(t, cfg.Evm.Multicall3Aggregation.CachePerCall)
+ require.True(t, *cfg.Evm.Multicall3Aggregation.CachePerCall)
+ require.NotNil(t, cfg.Evm.Multicall3Aggregation.AllowCrossUserBatching)
+ require.True(t, *cfg.Evm.Multicall3Aggregation.AllowCrossUserBatching)
+ require.False(t, cfg.Evm.Multicall3Aggregation.AllowPendingTagBatching)
+}
+
+func TestMulticall3AggregationConfigBoolBackcompat(t *testing.T) {
+ // Test backward compatibility with bool value
+ yamlStr := `
+evm:
+ chainId: 1
+ multicall3Aggregation: true
+`
+ var cfg NetworkConfig
+ err := yaml.Unmarshal([]byte(yamlStr), &cfg)
+ require.NoError(t, err)
+ require.NotNil(t, cfg.Evm)
+ require.NotNil(t, cfg.Evm.Multicall3Aggregation)
+ require.True(t, cfg.Evm.Multicall3Aggregation.Enabled)
+ // Check defaults are applied when bool is true
+ require.Equal(t, 25, cfg.Evm.Multicall3Aggregation.WindowMs)
+ require.Equal(t, 20, cfg.Evm.Multicall3Aggregation.MaxCalls)
+}
+
+func TestMulticall3AggregationConfigBoolFalse(t *testing.T) {
+ // Test backward compatibility with false value
+ yamlStr := `
+evm:
+ chainId: 1
+ multicall3Aggregation: false
+`
+ var cfg NetworkConfig
+ err := yaml.Unmarshal([]byte(yamlStr), &cfg)
+ require.NoError(t, err)
+ require.NotNil(t, cfg.Evm)
+ require.NotNil(t, cfg.Evm.Multicall3Aggregation)
+ require.False(t, cfg.Evm.Multicall3Aggregation.Enabled)
+}
+
+func TestMulticall3AggregationConfigDefaults(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true}
+ cfg.SetDefaults()
+ require.Equal(t, 25, cfg.WindowMs)
+ require.Equal(t, 2, cfg.MinWaitMs)
+ require.Equal(t, 2, cfg.SafetyMarginMs)
+ require.Equal(t, 20, cfg.MaxCalls)
+ require.Equal(t, 64000, cfg.MaxCalldataBytes)
+ require.Equal(t, 1000, cfg.MaxQueueSize)
+ require.Equal(t, 200, cfg.MaxPendingBatches)
+ require.NotNil(t, cfg.CachePerCall)
+ require.True(t, *cfg.CachePerCall)
+ require.NotNil(t, cfg.AllowCrossUserBatching)
+ require.True(t, *cfg.AllowCrossUserBatching)
+}
+
+func TestMulticall3AggregationConfigIsValid(t *testing.T) {
+ t.Run("valid config", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true}
+ cfg.SetDefaults()
+ err := cfg.IsValid()
+ require.NoError(t, err)
+ })
+
+ t.Run("windowMs must be positive", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true, WindowMs: 0}
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "windowMs must be > 0")
+ })
+
+ t.Run("minWaitMs must not be negative", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true, WindowMs: 25, MinWaitMs: -1}
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "minWaitMs must be >= 0")
+ })
+
+ t.Run("minWaitMs must be <= windowMs", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true, WindowMs: 10, MinWaitMs: 20}
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "minWaitMs must be <= windowMs")
+ })
+
+ t.Run("maxCalls must be > 1", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true, WindowMs: 25, MinWaitMs: 2, MaxCalls: 1}
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "maxCalls must be > 1")
+ })
+
+ t.Run("maxCalldataBytes must be positive", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{Enabled: true, WindowMs: 25, MinWaitMs: 2, MaxCalls: 20, MaxCalldataBytes: 0}
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "maxCalldataBytes must be > 0")
+ })
+
+ t.Run("maxQueueSize must be positive", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 0,
+ }
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "maxQueueSize must be > 0")
+ })
+
+ t.Run("maxPendingBatches must be positive", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ WindowMs: 25,
+ MinWaitMs: 2,
+ MaxCalls: 20,
+ MaxCalldataBytes: 64000,
+ MaxQueueSize: 1000,
+ MaxPendingBatches: 0,
+ }
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "maxPendingBatches must be > 0")
+ })
+
+ t.Run("bypassContracts valid addresses", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "0xABCDEF0123456789ABCDEF0123456789ABCDEF01",
+ "1111111111111111111111111111111111111111", // without 0x prefix
+ },
+ }
+ cfg.SetDefaults()
+ err := cfg.IsValid()
+ require.NoError(t, err)
+ })
+
+ t.Run("bypassContracts empty address rejected", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "", // empty
+ },
+ }
+ cfg.SetDefaults()
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "contains empty address")
+ })
+
+ t.Run("bypassContracts invalid length rejected", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "0x1234", // too short
+ },
+ }
+ cfg.SetDefaults()
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "expected 40 hex characters")
+ })
+
+ t.Run("bypassContracts non-hex characters rejected", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "0xZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", // non-hex
+ },
+ }
+ cfg.SetDefaults()
+ err := cfg.IsValid()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "non-hex character")
+ })
+}
+
+func TestMulticall3AggregationConfigBypassContracts(t *testing.T) {
+ t.Run("ShouldBypassContractHex with empty list", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{},
+ }
+ cfg.SetDefaults()
+
+ // Should not bypass any contract when list is empty
+ require.False(t, cfg.ShouldBypassContractHex("0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"))
+ require.False(t, cfg.ShouldBypassContractHex(""))
+ })
+
+ t.Run("ShouldBypassContractHex with configured contracts", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "0xABCDEF0123456789ABCDEF0123456789ABCDEF01",
+ },
+ }
+ cfg.SetDefaults()
+
+ // Exact match
+ require.True(t, cfg.ShouldBypassContractHex("0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"))
+ require.True(t, cfg.ShouldBypassContractHex("0xABCDEF0123456789ABCDEF0123456789ABCDEF01"))
+
+ // Case-insensitive matching
+ require.True(t, cfg.ShouldBypassContractHex("0x057f30e63a69175c69a4af5656b8c9ee647de3d0"))
+ require.True(t, cfg.ShouldBypassContractHex("0x057F30E63A69175C69A4AF5656B8C9EE647DE3D0"))
+
+ // Without 0x prefix
+ require.True(t, cfg.ShouldBypassContractHex("057f30e63A69175C69A4Af5656b8C9EE647De3D0"))
+
+ // Not in list
+ require.False(t, cfg.ShouldBypassContractHex("0x1111111111111111111111111111111111111111"))
+ require.False(t, cfg.ShouldBypassContractHex(""))
+ })
+
+ t.Run("ShouldBypassContract with raw bytes", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ },
+ }
+ cfg.SetDefaults()
+
+ // Matching address bytes
+ matchingAddr, _ := HexToBytes("0x057f30e63A69175C69A4Af5656b8C9EE647De3D0")
+ require.True(t, cfg.ShouldBypassContract(matchingAddr))
+
+ // Non-matching address bytes
+ otherAddr, _ := HexToBytes("0x1111111111111111111111111111111111111111")
+ require.False(t, cfg.ShouldBypassContract(otherAddr))
+
+ // Empty bytes
+ require.False(t, cfg.ShouldBypassContract(nil))
+ require.False(t, cfg.ShouldBypassContract([]byte{}))
+ })
+
+ t.Run("BypassContracts handles invalid entries gracefully", func(t *testing.T) {
+ cfg := &Multicall3AggregationConfig{
+ Enabled: true,
+ BypassContracts: []string{
+ "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0",
+ "", // empty string
+ " ", // whitespace only becomes empty after trim
+ "0x", // just prefix
+ "invalid", // non-hex characters
+ },
+ }
+ cfg.SetDefaults()
+
+ // Valid entry should still work
+ require.True(t, cfg.ShouldBypassContractHex("0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"))
+
+ // Empty/invalid entries should be ignored (not panic)
+ require.False(t, cfg.ShouldBypassContractHex(""))
+ })
+
+ t.Run("nil config does not panic", func(t *testing.T) {
+ var cfg *Multicall3AggregationConfig
+ // Should not panic, just return false
+ require.NotPanics(t, func() {
+ // Note: ShouldBypassContractHex is a method on a pointer, so nil check is inside
+ if cfg != nil {
+ cfg.ShouldBypassContractHex("0x1234")
+ }
+ })
+ })
+}
diff --git a/common/defaults.go b/common/defaults.go
index 4e726e1f6..6e22553dc 100644
--- a/common/defaults.go
+++ b/common/defaults.go
@@ -1803,6 +1803,10 @@ func (n *NetworkConfig) SetDefaults(upstreams []*UpstreamConfig, defaults *Netwo
if n.Evm.GetLogsSplitConcurrency == 0 && defaults.Evm.GetLogsSplitConcurrency != 0 {
n.Evm.GetLogsSplitConcurrency = defaults.Evm.GetLogsSplitConcurrency
}
+ if n.Evm.Multicall3Aggregation == nil && defaults.Evm.Multicall3Aggregation != nil {
+ n.Evm.Multicall3Aggregation = &Multicall3AggregationConfig{}
+ *n.Evm.Multicall3Aggregation = *defaults.Evm.Multicall3Aggregation
+ }
} else if n.Evm == nil && defaults.Evm != nil {
n.Evm = &EvmNetworkConfig{}
*n.Evm = *defaults.Evm
diff --git a/common/network.go b/common/network.go
index 44a843c67..660fac5cf 100644
--- a/common/network.go
+++ b/common/network.go
@@ -25,6 +25,7 @@ type Network interface {
GetMethodMetrics(method string) TrackedMetrics
Forward(ctx context.Context, nq *NormalizedRequest) (*NormalizedResponse, error)
GetFinality(ctx context.Context, req *NormalizedRequest, resp *NormalizedResponse) DataFinalityState
+ Cache() CacheDAL
// TODO Move to EvmNetwork interface?
EvmHighestLatestBlockNumber(ctx context.Context) int64
diff --git a/common/request.go b/common/request.go
index 44903d001..f02049409 100644
--- a/common/request.go
+++ b/common/request.go
@@ -18,6 +18,7 @@ const (
CompositeTypeNone = "none"
CompositeTypeLogsSplitOnError = "logs-split-on-error"
CompositeTypeLogsSplitProactive = "logs-split-proactive"
+ CompositeTypeMulticall3 = "multicall3"
)
const RequestContextKey ContextKey = "rq"
diff --git a/docs/design/multicall3-batching.md b/docs/design/multicall3-batching.md
new file mode 100644
index 000000000..7b3786c0b
--- /dev/null
+++ b/docs/design/multicall3-batching.md
@@ -0,0 +1,323 @@
+# Multicall3 Batching (Network-Level)
+
+Status: Implemented
+
+## Context
+The current Multicall3 batching lives in the HTTP batch handler. It only applies
+to JSON-RPC batch requests and is tightly coupled to the HTTP layer, which is
+not ideal for an EVM-only feature. The goal is to move batching to a deeper
+layer so it can batch any incoming `eth_call` (batch or single), while keeping
+network-level behaviors (cache, failover, circuit breakers).
+
+## Goals
+- Batch `eth_call` across all entrypoints (HTTP batch + single requests + gRPC).
+- Preserve network-level behaviors (cache, failover, upstream selection).
+- Preserve per-request rate limits and per-request metrics.
+- Maintain per-call cache writes and reuse existing Multicall3 encode/decode.
+- Keep batching opt-in and configurable per network.
+
+## Non-Goals
+- Batching non-EVM methods.
+- Supporting `eth_call` fields beyond `to` + `data|input`.
+- Enforcing upstream-specific caps in v1 (network-level caps only).
+
+## Placement
+Preferred: a pre-cache hook in `Network.Forward` (or in `PreparedProject.Forward`
+right before `p.doForward`). This keeps batching entrypoint-agnostic while still
+using `Network.Forward` for caching and failover.
+
+Avoid: `PreUpstream` batching, which is too late (upstream-selected) and makes
+cross-request aggregation difficult.
+
+## Eligibility and Block Reference Normalization
+A request is eligible if:
+- Method is `eth_call`.
+- Call object contains only `to` and `data|input`.
+- Request is not already a multicall (recursion guard).
+- Target contract is not in the `bypassContracts` list.
+- Calls with any of `from`, `gas`, `gasPrice`, `maxFeePerGas`,
+ `maxPriorityFeePerGas`, `value`, or a state override (third param) are
+ ineligible.
+
+## Bypass Contracts
+
+Some contracts check `msg.sender` using `extcodesize()` and revert if the caller
+has code (i.e., is a contract). When using Multicall3, `msg.sender` becomes the
+Multicall3 contract address, causing these calls to fail.
+
+Use `bypassContracts` to exclude specific contracts from batching:
+
+```yaml
+evm:
+ multicall3Aggregation:
+ enabled: true
+ bypassContracts:
+ # Chronicle Oracle feeds check msg.sender code size
+ - "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"
+ # Other contracts that revert on contract callers
+ - "0xABCDEF0123456789ABCDEF0123456789ABCDEF01"
+```
+
+Addresses are case-insensitive and can be specified with or without the `0x`
+prefix. Calls to bypassed contracts are forwarded individually.
+
+## Auto-Detect Bypass
+
+Enable `autoDetectBypass` to automatically detect contracts that revert when
+called via Multicall3 but succeed when called individually:
+
+```yaml
+evm:
+ multicall3Aggregation:
+ enabled: true
+ autoDetectBypass: true
+```
+
+When a call reverts within a Multicall3 batch and `autoDetectBypass` is enabled:
+1. The call is retried individually (bypassing Multicall3).
+2. If the individual call succeeds, the contract is added to a runtime bypass
+ cache and future calls skip batching.
+3. If the individual call also fails, the original error is returned (no bypass
+ is added).
+
+This allows automatic discovery of contracts that check `msg.sender` code size
+(e.g., Chronicle Oracle) without requiring manual configuration.
+
+Metrics:
+- `erpc_multicall3_runtime_bypass_total{project, network}`: Contracts added to
+ runtime bypass.
+- `erpc_multicall3_auto_detect_retry_total{project, network, outcome}`: Retry
+ tracking. Outcome values: `attempt` (retry initiated), `detected` (bypass
+ discovered - individual call succeeded), `same_error` (individual call also
+ failed, not a bypass candidate).
+
+Note: The runtime bypass cache is in-memory and resets on restart. For known
+contracts, use `bypassContracts` for persistent configuration.
+
+Block reference normalization:
+- Use `NormalizeBlockParam` on the `eth_call` block parameter.
+- `nil` becomes `latest`.
+- `0x` block numbers are normalized to decimal strings.
+- Block hash (`0x` + 32 bytes) stays hex.
+- Object params use `blockHash`, `blockNumber`, or `blockTag`.
+- Known tags (`latest`, `finalized`, `safe`, `earliest`, `pending`) are
+ lower-cased for keying to avoid duplicates.
+
+Batching with tags:
+- Default: allow `latest`, `finalized`, `safe`, `earliest`.
+- `pending` is disabled by default (configurable).
+- We do not resolve `latest` to a specific block number; all calls share the
+ same tag, and the execution block is the one seen by the upstream at call time.
+
+## Batching Key (User Isolation + Directive Key)
+Key fields:
+- `projectId`
+- `networkId`
+- `blockRef`
+- `directivesKey`
+- `userId` if `allowCrossUserBatching` is false
+
+Directives key uses a stable, versioned subset:
+- `UseUpstream`
+- `SkipInterpolation`
+- `RetryEmpty`
+- `RetryPending`
+- `SkipCacheRead` (optional; can be per-request, but include for clarity)
+
+Any new directive must be explicitly added to the subset or ignored. The
+directives key version is defined in code (not config) to avoid cross-node
+mismatches; a version bump should be part of a release note.
+
+## Deduplication (Within Batch)
+To avoid TOCTOU cache misses and duplicated calls:
+- Maintain a `callKey` map inside each batch.
+- `callKey` uses the same derivation as the cache key (method + params).
+- Directives are already part of the batch key, so differing directives never
+ share a batch or a `callKey`.
+- Multiple identical requests share one multicall slot and fan out results
+ to all waiters.
+
+## Batching Window and Deadlines
+Configurable timing:
+- `windowMs`: max wait time for a batch.
+- `minWaitMs`: minimum wait to allow other requests to join.
+- `safetyMarginMs`: subtracted from the earliest request deadline.
+- `onlyIfPending`: no extra latency unless a batch is already open.
+
+Deadline-aware flush rules:
+- If `deadline <= now + minWaitMs`, bypass batching and forward individually.
+- Otherwise, `flushTime = min(flushTime, deadline - safetyMarginMs)`.
+- Clamp `flushTime` to at least `now + minWaitMs`.
+- If `flushTime <= now`, flush immediately.
+
+Concurrent flush behavior:
+- If a batch for a key is already flushing, a new request for the same key
+ starts a new batch (next window) rather than joining the in-flight batch.
+
+## Caps and Backpressure
+Network-level caps:
+- `maxCalls`
+- `maxCalldataBytes`
+- `maxQueueSize` (global or per-key)
+- `maxPendingBatches`
+
+Behavior on overflow:
+- Prefer bypassing batching (forward individually) and increment a metric.
+- Avoid unbounded memory growth.
+- `maxQueueSize` is the total enqueued requests across all batches; `maxPendingBatches`
+ is the number of distinct batch keys. If either limit would be exceeded, bypass
+ batching for that request.
+
+## Forwarding, Fallback, and Partial Failures
+1. Acquire per-request project + network rate limits.
+2. Build Multicall3 request, mark as composite
+ (e.g., `CompositeTypeMulticall3`) and set `skipNetworkRateLimit` context
+ (via `withSkipNetworkRateLimit`).
+3. Forward via `Network.Forward`. Fallback forwarding also uses
+ `skipNetworkRateLimit` to avoid double-counting.
+
+Fallback criteria:
+- Multicall request error with `ShouldFallbackMulticall3` (unsupported endpoint,
+ missing contract, known provider patterns).
+- Invalid or unusable response (RPC error, invalid hex, ABI decode failure,
+ length mismatch).
+Pattern matching is implementation-defined; see `architecture/evm/multicall3.go`.
+
+Partial failures:
+- If Multicall succeeds but an inner call returns `success=false`, return a
+ per-call `execution reverted` error. Do not fallback.
+- If Multicall succeeds and results count matches, map directly, no fallback.
+
+If fallback fails, propagate a single infrastructure error to all requests.
+
+## Error Propagation Semantics
+Per-call revert:
+- Return a standard `execution reverted` error with `data` field.
+
+Infrastructure failure:
+- All requests receive the same error.
+- Include metadata such as `{ "multicall3": true, "stage": "decode", "reason": "..." }`
+ to distinguish infra failures from per-call failures.
+
+## Cancellation Handling
+- If a request is canceled before flush, remove it from the batch and return
+ a context error for that request only.
+- Cancellation does not affect other requests in the batch.
+- Rate limit permits are not released (standard behavior). This keeps rate
+ limiting conservative and avoids races; a future enhancement could reclaim
+ permits for requests canceled before a flush.
+
+## Upstream Capability Handling
+Some upstreams may not support Multicall3 or have stricter limits.
+v1 decision: rely on `ShouldFallbackMulticall3` to detect unsupported upstreams
+and fallback to individual calls. A future v2 can add explicit upstream
+capability flags or filtering to avoid first-failure latency.
+
+## Cache Behavior
+Pre-batch cache lookup:
+- If `SkipCacheRead` is set, bypass per-request cache lookup.
+- Cached responses are returned immediately and removed from the batch.
+- For deduped requests, a single cached response satisfies all waiters.
+
+Per-call cache writes:
+- On multicall success and cache eligible, write each call as if it were a
+ standalone `eth_call`.
+- Cache key is identical to a standalone request (method + params), which
+ matches `callKey` derivation for dedupe.
+
+## Observability (Metrics + Tracing)
+Metrics:
+- `multicall3_aggregation_total{outcome}`
+- `multicall3_fallback_total{reason}`
+- `multicall3_cache_hits_total`
+- Optional: `multicall3_batch_size`, `multicall3_batch_wait_ms`,
+ `multicall3_queue_len`, `multicall3_queue_overflow_total`
+
+Aggregation outcome values:
+- `success`
+- `all_cached`
+- `fallback`
+- `error`
+- `bypassed` (ineligible, deadline-too-tight, or backpressure)
+
+Tracing:
+- Add spans for `Batcher.Enqueue`, `Batcher.Wait`, `Batcher.Flush`.
+- Link request spans to the multicall span; if batch size is small, also link
+ the multicall span back to request spans. Always attach a shared `batch.id`
+ attribute to both sides for correlation.
+
+## Config Proposal
+Extend `EvmNetworkConfig`:
+
+```yaml
+evm:
+ multicall3Aggregation:
+ enabled: true
+ windowMs: 25
+ minWaitMs: 2
+ safetyMarginMs: 2
+ onlyIfPending: false
+ maxCalls: 20
+ maxCalldataBytes: 64000
+ maxQueueSize: 1000
+ maxPendingBatches: 200
+ cachePerCall: true
+ allowCrossUserBatching: true
+ allowPendingTagBatching: false
+ autoDetectBypass: false
+ bypassContracts:
+ # Contracts that check msg.sender code size (e.g., Chronicle Oracle)
+ - "0x057f30e63A69175C69A4Af5656b8C9EE647De3D0"
+```
+
+Validation and defaults:
+- `windowMs > 0`, `minWaitMs >= 0`, `minWaitMs <= windowMs`
+- `safetyMarginMs` defaults to `min(2, minWaitMs)` when omitted
+ (note: if `minWaitMs=0`, this yields `0`; operators should set a non-zero
+ safety margin if they expect tight deadlines)
+- `maxCalls > 1`, `maxCalldataBytes > 0`, `maxQueueSize > 0`
+- If invalid, disable batching for that network and log a warning.
+
+Maintain backward compatibility with existing `multicall3Aggregation: true|false`.
+
+## Algorithm Sketch
+```
+Enqueue(req):
+ if not eligible or deadline too tight: return notHandled
+ if SkipCacheRead == false:
+ if cache hit: return cached response
+ if batch for key is flushing: create a new batch for key
+ add to batch key
+ if callKey duplicate: attach waiter and wait for result
+ adjust batch flush time (deadline-aware)
+ if caps reached: flush now
+ wait for flush or immediate join result
+
+Flush(batch):
+ build multicall req (mark as composite, skip rate limit)
+ forward via Network.Forward
+ if success: map responses + per-call cache writes
+ else if fallback-eligible: forward individually
+ else: propagate infra error to all
+```
+
+## Testing Plan
+- Unit: eligibility, normalization, keying, caps, deadline-aware flush.
+- Concurrency: batcher with multiple goroutines, cancellations, timeouts.
+- Concurrency: request arrives during flush for same batch key (new batch).
+- Integration: mixed batch/single calls, cache hits, fallback paths.
+- Rate limit: ensure per-request budgets are only consumed once.
+- Recursion: multicall request is never re-batched.
+- Chaos: inject upstream errors, invalid responses, length mismatches.
+
+## Migration Plan
+1. Implement batching in the network layer behind config.
+2. Keep HTTP-layer batching behind a kill switch or remove once stable.
+3. Enable on a subset of networks, monitor metrics, tune caps.
+
+## Open Questions
+- Should `SkipCacheRead` live in the key (simpler, more fragmentation) or be
+ handled per-request (more complex, better batching)?
+- Should batching of `pending` tag be allowed by default?
+- Should upstream-specific caps be introduced in v2?
+- Should permits be reclaimed for requests canceled before flush?
diff --git a/docs/pages/config/projects/networks.mdx b/docs/pages/config/projects/networks.mdx
index 2e40cbe99..86de2cb66 100644
--- a/docs/pages/config/projects/networks.mdx
+++ b/docs/pages/config/projects/networks.mdx
@@ -41,6 +41,48 @@ projects:
# When true (default), duplicate transaction errors are converted to success responses,
# allowing safe use of retry/hedge policies with transaction sending.
idempotentTransactionBroadcast: true
+ # (OPTIONAL) Configure Multicall3 aggregation for eth_call batching.
+ # When enabled (default), multiple eth_call requests targeting the same block are batched
+ # into a single Multicall3 aggregate3 call, reducing RPC calls and improving efficiency.
+ multicall3Aggregation:
+ # Enable or disable Multicall3 aggregation.
+ # DEFAULT: true
+ enabled: true
+ # Time window in milliseconds to collect requests before batching.
+ # DEFAULT: 25
+ windowMs: 25
+ # Minimum wait time in milliseconds before flushing a batch.
+ # DEFAULT: 2
+ minWaitMs: 2
+ # Safety margin in milliseconds subtracted from request deadlines when computing flush time.
+ # DEFAULT: min(2, minWaitMs)
+ safetyMarginMs: 2
+ # If true, only batch if another request is already waiting. Avoids adding latency
+ # when requests arrive sporadically.
+ # DEFAULT: false
+ onlyIfPending: false
+ # Maximum number of calls per batch.
+ # DEFAULT: 20
+ maxCalls: 20
+ # Maximum total calldata size in bytes for a batch.
+ # DEFAULT: 64000
+ maxCalldataBytes: 64000
+ # Maximum number of requests waiting to be batched.
+ # DEFAULT: 1000
+ maxQueueSize: 1000
+ # Maximum number of pending batches being processed.
+ # DEFAULT: 200
+ maxPendingBatches: 200
+ # Enable per-call cache writes after successful Multicall3 response.
+ # DEFAULT: true
+ cachePerCall: true
+ # Allow batching requests from different users together.
+ # Set to false to isolate user requests into separate batches.
+ # DEFAULT: true
+ allowCrossUserBatching: true
+ # Allow batching calls with "pending" block tag.
+ # DEFAULT: false
+ allowPendingTagBatching: false
# (OPTIONAL) A friendly alias for this network. This allows you to reference the network using the alias
# instead of the architecture/chainId format. For example, instead of using /main/evm/1, you can use /main/ethereum.
@@ -174,6 +216,37 @@ export default createConfig({
* allowing safe use of retry/hedge policies with transaction sending.
*/
idempotentTransactionBroadcast: true,
+ /**
+ * (OPTIONAL) Configure Multicall3 aggregation for eth_call batching.
+ * When enabled (default), multiple eth_call requests targeting the same block are batched
+ * into a single Multicall3 aggregate3 call, reducing RPC calls and improving efficiency.
+ */
+ multicall3Aggregation: {
+ // Enable or disable Multicall3 aggregation. DEFAULT: true
+ enabled: true,
+ // Time window in milliseconds to collect requests before batching. DEFAULT: 25
+ windowMs: 25,
+ // Minimum wait time in milliseconds before flushing a batch. DEFAULT: 2
+ minWaitMs: 2,
+ // Safety margin in milliseconds subtracted from request deadlines. DEFAULT: min(2, minWaitMs)
+ safetyMarginMs: 2,
+ // If true, only batch if another request is already waiting. DEFAULT: false
+ onlyIfPending: false,
+ // Maximum number of calls per batch. DEFAULT: 20
+ maxCalls: 20,
+ // Maximum total calldata size in bytes for a batch. DEFAULT: 64000
+ maxCalldataBytes: 64000,
+ // Maximum number of requests waiting to be batched. DEFAULT: 1000
+ maxQueueSize: 1000,
+ // Maximum number of pending batches being processed. DEFAULT: 200
+ maxPendingBatches: 200,
+ // Enable per-call cache writes after successful Multicall3. DEFAULT: true
+ cachePerCall: true,
+ // Allow batching requests from different users together. DEFAULT: true
+ allowCrossUserBatching: true,
+ // Allow batching calls with "pending" block tag. DEFAULT: false
+ allowPendingTagBatching: false,
+ },
},
/**
diff --git a/docs/pages/operation/batch.mdx b/docs/pages/operation/batch.mdx
index a1e6244b9..a4b5c8c45 100644
--- a/docs/pages/operation/batch.mdx
+++ b/docs/pages/operation/batch.mdx
@@ -148,9 +148,73 @@ curl --location 'http://localhost:4000/main' \
]'
```
-#### Roadmap
+## Multicall3 Aggregation
-On some doc pages we like to share our ideas for related future implementations, feel free to open a PR if you're up for a challenge:
+For EVM networks, eRPC can automatically aggregate `eth_call` requests into a single [Multicall3](https://www.multicall3.com/) contract call. This batching operates at the network level across **all entrypoints** (HTTP single requests, HTTP batch requests, and gRPC), not just JSON-RPC batch requests.
-
-- [ ] Auto-batch multiple `eth_call`s for evm upstreams using multicall3 contracts if available on that chain.
+### How it works
+
+Multicall3 aggregation collects `eth_call` requests that share the same block tag and batches them together:
+
+1. **Collection**: When multiple `eth_call` requests arrive within a time window (default: 25ms) targeting the same block tag (e.g., `latest`, `0x123456`), they are grouped together
+2. **Aggregation**: These calls are combined into a single `aggregate3` call to the Multicall3 contract
+3. **Execution**: One upstream request is made instead of many
+4. **Response mapping**: Results are decoded and mapped back to the original requests
+5. **Deduplication**: Identical calls (same target + calldata + block) within a batch are deduplicated and share the response
+
+This is particularly beneficial for applications that query multiple contract states (e.g., ERC20 balances, token metadata), reducing upstream RPC calls significantly even when clients send individual requests.
+
+
+ Multicall3 is deployed at the same address (`0xcA11bde05977b3631167028862bE2a173976CA11`) on most EVM chains.
+ The feature automatically falls back to individual requests if aggregation fails.
+
+
+### Configuration
+
+Multicall3 aggregation is **enabled by default**. To disable it for a specific network:
+
+
+
+```yaml filename="erpc.yaml"
+projects:
+ - id: main
+ networks:
+ - architecture: evm
+ evm:
+ chainId: 1
+ # Disable multicall3 aggregation for this network
+ multicall3Aggregation: false
+```
+
+
+```ts filename="erpc.ts"
+import { createConfig } from "@erpc-cloud/config";
+
+export default createConfig({
+ projects: [
+ {
+ id: "main",
+ networks: [
+ {
+ architecture: "evm",
+ evm: {
+ chainId: 1,
+ // Disable multicall3 aggregation for this network
+ multicall3Aggregation: false,
+ },
+ },
+ ],
+ },
+ ],
+});
+```
+
+
+
+### When to disable
+
+You might want to disable multicall3 aggregation if:
+
+- The chain doesn't have Multicall3 deployed
+- You're experiencing issues with specific contract calls that don't work well with aggregation
+- You need individual error responses for each call (aggregation returns success/failure per call, but error details may differ)
diff --git a/erpc/config_analyzer.go b/erpc/config_analyzer.go
index c0854052c..5df31d08c 100644
--- a/erpc/config_analyzer.go
+++ b/erpc/config_analyzer.go
@@ -325,7 +325,7 @@ func GenerateValidationReport(ctx context.Context, cfg *common.Config) *Validati
}
clReg := clients.NewClientRegistry(&silent, project.Id, prxPool, evm.NewJsonRpcErrorExtractor())
vndReg := thirdparty.NewVendorsRegistry()
- rlr, err := upstream.NewRateLimitersRegistry(cfg.RateLimiters, &silent)
+ rlr, err := upstream.NewRateLimitersRegistry(ctx, cfg.RateLimiters, &silent)
if err != nil {
appendErr(fmt.Sprintf("project=%s failed to create rate limiters registry: %v", project.Id, err))
continue
@@ -806,6 +806,7 @@ func validateUpstreamEndpoints(ctx context.Context, cfg *common.Config, logger z
)
vndReg := thirdparty.NewVendorsRegistry()
rlr, err := upstream.NewRateLimitersRegistry(
+ ctx,
cfg.RateLimiters,
&logger,
)
diff --git a/erpc/erpc.go b/erpc/erpc.go
index d9b29cb04..b28042bc7 100644
--- a/erpc/erpc.go
+++ b/erpc/erpc.go
@@ -33,6 +33,7 @@ func NewERPC(
}
rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ appCtx,
cfg.RateLimiters,
logger,
)
diff --git a/erpc/evm_json_rpc_cache_test.go b/erpc/evm_json_rpc_cache_test.go
index 21bed49e0..7cf8a975d 100644
--- a/erpc/evm_json_rpc_cache_test.go
+++ b/erpc/evm_json_rpc_cache_test.go
@@ -99,7 +99,7 @@ func createCacheTestFixtures(ctx context.Context, upstreamConfigs []upsTestCfg)
for _, cfg := range upstreamConfigs {
mt := health.NewTracker(&logger, "prjA", 100*time.Second)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &logger)
if err != nil {
panic(err)
}
@@ -2030,6 +2030,10 @@ func TestEvmJsonRpcCache_DynamoDB(t *testing.T) {
},
}
+ // Set up gock mocks for upstream state poller HTTP requests
+ util.SetupMocksForEvmStatePoller()
+ defer util.ResetGock()
+
// Create test upstreams with different finalized blocks
mockUpstream := createMockUpstream(t, ctx, 123, "upsA", common.EvmSyncingStateNotSyncing, 10, 15)
@@ -2393,6 +2397,10 @@ func TestEvmJsonRpcCache_Redis(t *testing.T) {
},
}
+ // Set up gock mocks for upstream state poller HTTP requests
+ util.SetupMocksForEvmStatePoller()
+ defer util.ResetGock()
+
// Create test upstream with finalized blocks
mockUpstream := createMockUpstream(t, ctx, 123, "upsA", common.EvmSyncingStateNotSyncing, 10, 15)
@@ -2622,7 +2630,7 @@ func createMockUpstream(t *testing.T, ctx context.Context, chainId int64, upstre
require.NoError(t, err)
mt := health.NewTracker(&logger, "prjA", 100*time.Second)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &logger)
require.NoError(t, err)
mockUpstream, err := upstream.NewUpstream(ctx, "test", &common.UpstreamConfig{
@@ -3244,7 +3252,7 @@ func createCacheTestFixturesWithCompression(ctx context.Context, upstreamConfigs
for _, cfg := range upstreamConfigs {
mt := health.NewTracker(&logger, "prjA", 100*time.Second)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &logger)
if err != nil {
panic(err)
}
diff --git a/erpc/http_batch_eth_call.go b/erpc/http_batch_eth_call.go
new file mode 100644
index 000000000..4dfaeb400
--- /dev/null
+++ b/erpc/http_batch_eth_call.go
@@ -0,0 +1,609 @@
+package erpc
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/erpc/erpc/architecture/evm"
+ "github.com/erpc/erpc/auth"
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/telemetry"
+ "github.com/rs/zerolog"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// maxConcurrentCacheWrites limits concurrent multicall3 per-call cache write goroutines.
+// This value balances memory usage with throughput - too low causes dropped writes under load,
+// too high risks memory pressure. 100 is chosen as a reasonable default for most workloads.
+const maxConcurrentCacheWrites = 100
+
+// cacheWriteSem limits concurrent multicall3 per-call cache write goroutines
+// to prevent unbounded goroutine growth under high load.
+var cacheWriteSem = make(chan struct{}, maxConcurrentCacheWrites)
+
+type ethCallBatchInfo struct {
+ networkId string
+ blockRef string
+ blockParam interface{}
+}
+
+type ethCallBatchCandidate struct {
+ index int
+ ctx context.Context
+ req *common.NormalizedRequest
+ logger zerolog.Logger
+}
+
+type ethCallBatchProbe struct {
+ Method string `json:"method"`
+ Params []interface{} `json:"params"`
+ NetworkId string `json:"networkId"`
+}
+
+var (
+ forwardBatchNetwork = func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return network.Forward(ctx, req)
+ }
+ forwardBatchProject = func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return project.doForward(ctx, network, req)
+ }
+ newBatchJsonRpcResponse = common.NewJsonRpcResponse
+)
+
+// detectEthCallBatchInfo checks if a batch request is eligible for Multicall3 aggregation.
+// Returns nil (no error) if the batch is not eligible due to:
+// - Fewer than 2 requests in the batch
+// - Any non-eth_call method in the batch
+// - Requests targeting different networks
+// - Requests targeting different block references
+// - Non-EVM architecture
+// Returns an error only for actual parsing/validation failures.
+func detectEthCallBatchInfo(requests []json.RawMessage, architecture, chainId string) (*ethCallBatchInfo, error) {
+ if len(requests) < 2 {
+ return nil, nil
+ }
+ if architecture != "" && architecture != string(common.ArchitectureEvm) {
+ return nil, nil
+ }
+
+ defaultNetworkId := ""
+ if architecture != "" && chainId != "" {
+ defaultNetworkId = fmt.Sprintf("%s:%s", architecture, chainId)
+ }
+
+ var networkId string
+ var blockRef string
+ var blockParam interface{}
+ // Track requireCanonical state across requests:
+ // 0 = not yet set, 1 = explicitly true, 2 = explicitly false, 3 = not specified (default true)
+ var requireCanonicalState int
+
+ for _, raw := range requests {
+ var probe ethCallBatchProbe
+ if err := common.SonicCfg.Unmarshal(raw, &probe); err != nil {
+ return nil, err
+ }
+ if strings.ToLower(probe.Method) != "eth_call" {
+ return nil, nil
+ }
+
+ reqNetworkId := defaultNetworkId
+ if reqNetworkId == "" {
+ reqNetworkId = probe.NetworkId
+ }
+ if reqNetworkId == "" || !strings.HasPrefix(reqNetworkId, "evm:") {
+ return nil, nil
+ }
+ if networkId == "" {
+ networkId = reqNetworkId
+ } else if networkId != reqNetworkId {
+ return nil, nil
+ }
+
+ param := interface{}("latest")
+ if len(probe.Params) >= 2 {
+ param = probe.Params[1]
+ }
+ bref, err := evm.NormalizeBlockParam(param)
+ if err != nil {
+ return nil, err
+ }
+ if blockRef == "" {
+ blockRef = bref
+ blockParam = param
+ } else if blockRef != bref {
+ return nil, nil
+ }
+
+ // Check for mixed requireCanonical values in block-hash params (EIP-1898)
+ // We need to ensure all requests in a batch have compatible requireCanonical values,
+ // otherwise the Multicall3 call won't honor individual semantics.
+ // States: 0 = not yet set, 1 = true (explicit or default), 2 = explicitly false
+ // Explicit true and absent (default true) are treated as compatible (both = 1)
+ if blockObj, ok := param.(map[string]interface{}); ok {
+ if _, hasBlockHash := blockObj["blockHash"]; hasBlockHash {
+ currentState := 1 // default: true (EIP-1898 default)
+ if reqCanonical, hasReqCanonical := blockObj["requireCanonical"]; hasReqCanonical {
+ if reqCanonicalBool, ok := reqCanonical.(bool); ok && !reqCanonicalBool {
+ currentState = 2 // explicitly false
+ }
+ }
+ if requireCanonicalState == 0 {
+ requireCanonicalState = currentState
+ } else if requireCanonicalState != currentState {
+ // Mixed requireCanonical values - not eligible for batching
+ return nil, nil
+ }
+ }
+ }
+ }
+
+ if networkId == "" || blockRef == "" {
+ return nil, nil
+ }
+
+ return ðCallBatchInfo{
+ networkId: networkId,
+ blockRef: blockRef,
+ blockParam: blockParam,
+ }, nil
+}
+
+// forwardEthCallBatchCandidates forwards individual eth_call requests in parallel as a fallback
+// when Multicall3 aggregation fails or is not applicable.
+//
+// Rate limiting note: The network rate limit is skipped (withSkipNetworkRateLimit) because
+// rate limits were already acquired for each request during the aggregation flow preparation.
+// This prevents double-counting rate limits when falling back to individual requests.
+func (s *HttpServer) forwardEthCallBatchCandidates(
+ startedAt *time.Time,
+ project *PreparedProject,
+ network *Network,
+ candidates []ethCallBatchCandidate,
+ responses []interface{},
+) {
+ if project == nil || network == nil {
+ err := common.NewErrInvalidRequest(fmt.Errorf("network not available for batch eth_call fallback"))
+ for _, cand := range candidates {
+ responses[cand.index] = processErrorBody(&cand.logger, startedAt, cand.req, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(cand.ctx, nil, err)
+ }
+ return
+ }
+
+ // Process candidates in parallel for better performance during fallback
+ var wg sync.WaitGroup
+ for _, cand := range candidates {
+ wg.Add(1)
+ go func(c ethCallBatchCandidate) {
+ defer wg.Done()
+ defer func() {
+ if r := recover(); r != nil {
+ panicErr := common.NewErrJsonRpcExceptionInternal(
+ 0,
+ common.JsonRpcErrorServerSideException,
+ fmt.Sprintf("internal error: panic in batch fallback: %v", r),
+ nil,
+ nil,
+ )
+ c.logger.Error().
+ Str("panic", fmt.Sprintf("%v", r)).
+ Str("stack", string(debug.Stack())).
+ Msg("panic in forwardEthCallBatchCandidates goroutine")
+ responses[c.index] = processErrorBody(&c.logger, startedAt, c.req, panicErr, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(c.ctx, nil, panicErr)
+ }
+ }()
+ resp, err := forwardBatchProject(withSkipNetworkRateLimit(c.ctx), project, network, c.req)
+ if err != nil {
+ if resp != nil {
+ resp.Release()
+ }
+ responses[c.index] = processErrorBody(&c.logger, startedAt, c.req, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(c.ctx, nil, err)
+ return
+ }
+
+ responses[c.index] = resp
+ common.EndRequestSpan(c.ctx, resp, nil)
+ }(cand)
+ }
+ wg.Wait()
+}
+
+func (s *HttpServer) handleEthCallBatchAggregation(
+ httpCtx context.Context,
+ startedAt *time.Time,
+ r *http.Request,
+ project *PreparedProject,
+ baseLogger zerolog.Logger,
+ batchInfo *ethCallBatchInfo,
+ requests []json.RawMessage,
+ headers http.Header,
+ queryArgs map[string][]string,
+ responses []interface{},
+) bool {
+ if batchInfo == nil || project == nil {
+ return false
+ }
+
+ network, networkErr := project.GetNetwork(httpCtx, batchInfo.networkId)
+ uaMode := common.UserAgentTrackingModeSimplified
+ if project.Config != nil && project.Config.UserAgentMode != "" {
+ uaMode = project.Config.UserAgentMode
+ }
+
+ candidates := make([]ethCallBatchCandidate, 0, len(requests))
+ for i, rawReq := range requests {
+ nq := common.NewNormalizedRequest(rawReq)
+ rawReq = nil
+ requestCtx := common.StartRequestSpan(httpCtx, nq)
+
+ clientIP := s.resolveRealClientIP(r)
+ nq.SetClientIP(clientIP)
+
+ if err := nq.Validate(); err != nil {
+ responses[i] = processErrorBody(&baseLogger, startedAt, nq, err, &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, err)
+ continue
+ }
+
+ method, methodErr := nq.Method()
+ if methodErr != nil {
+ responses[i] = processErrorBody(&baseLogger, startedAt, nq, methodErr, &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, methodErr)
+ continue
+ }
+ rlg := baseLogger.With().Str("method", method).Logger()
+
+ ap, err := auth.NewPayloadFromHttp(method, r.RemoteAddr, headers, queryArgs)
+ if err != nil {
+ responses[i] = processErrorBody(&rlg, startedAt, nq, err, &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, err)
+ continue
+ }
+
+ user, err := project.AuthenticateConsumer(requestCtx, nq, method, ap)
+ if err != nil {
+ responses[i] = processErrorBody(&rlg, startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ continue
+ }
+ if user != nil {
+ rlg = rlg.With().Str("userId", user.Id).Logger()
+ }
+ nq.SetUser(user)
+
+ if networkErr != nil || network == nil {
+ err := networkErr
+ if err == nil {
+ err = common.NewErrNetworkNotFound(batchInfo.networkId)
+ }
+ responses[i] = processErrorBody(&rlg, startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ continue
+ }
+
+ nq.SetNetwork(network)
+ nq.SetCacheDal(network.Cache())
+ nq.ApplyDirectiveDefaults(network.Config().DirectiveDefaults)
+ nq.EnrichFromHttp(headers, queryArgs, uaMode)
+ rlg.Trace().Interface("directives", nq.Directives()).Msgf("applied request directives")
+
+ // Acquire project rate limit early (for billing/analytics purposes)
+ if err := project.acquireRateLimitPermit(requestCtx, nq); err != nil {
+ responses[i] = processErrorBody(&rlg, startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ continue
+ }
+
+ // Record per-request metric for billing/analytics
+ // This ensures aggregated requests are counted individually
+ reqFinality := nq.Finality(requestCtx)
+ telemetry.CounterHandle(telemetry.MetricNetworkRequestsReceived,
+ project.Config.Id, network.Label(), method, reqFinality.String(), nq.UserId(), nq.AgentName(),
+ ).Inc()
+
+ candidates = append(candidates, ethCallBatchCandidate{
+ index: i,
+ ctx: requestCtx,
+ req: nq,
+ logger: rlg,
+ })
+ }
+
+ if len(candidates) == 0 {
+ return true
+ }
+
+ projectId := ""
+ if project.Config != nil {
+ projectId = project.Config.Id
+ }
+
+ // Check cache for individual requests before aggregating
+ // Respects skip-cache-read directive - requests with this directive skip cache probe
+ cacheDal := network.Cache()
+ var uncachedCandidates []ethCallBatchCandidate
+ if cacheDal != nil && !cacheDal.IsObjectNull() {
+ uncachedCandidates = make([]ethCallBatchCandidate, 0, len(candidates))
+ cacheHits := 0
+ for _, cand := range candidates {
+ // Respect skip-cache-read directive - if set, skip cache probe entirely
+ if cand.req.SkipCacheRead() {
+ uncachedCandidates = append(uncachedCandidates, cand)
+ continue
+ }
+ cachedResp, err := cacheDal.Get(cand.ctx, cand.req)
+ if err != nil {
+ // Log and track cache errors - they're non-fatal but indicate potential cache issues
+ telemetry.MetricMulticall3CacheReadErrorsTotal.WithLabelValues(projectId, batchInfo.networkId).Inc()
+ cand.logger.Warn().
+ Err(err).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 pre-aggregation cache get failed, treating as miss")
+ }
+ if err == nil && cachedResp != nil && !cachedResp.IsObjectNull(cand.ctx) {
+ // Cache hit - use cached response directly
+ cachedResp.SetFromCache(true)
+ responses[cand.index] = cachedResp
+ common.EndRequestSpan(cand.ctx, cachedResp, nil)
+ cacheHits++
+ continue
+ }
+ // Cache miss - needs to be fetched
+ uncachedCandidates = append(uncachedCandidates, cand)
+ }
+ if cacheHits > 0 {
+ telemetry.MetricMulticall3CacheHitsTotal.WithLabelValues(projectId, batchInfo.networkId).Add(float64(cacheHits))
+ baseLogger.Debug().
+ Int("cacheHits", cacheHits).
+ Int("uncached", len(uncachedCandidates)).
+ Int("total", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 pre-aggregation cache check")
+ }
+ candidates = uncachedCandidates
+ }
+
+ // All requests served from cache
+ if len(candidates) == 0 {
+ telemetry.MetricMulticall3AggregationTotal.WithLabelValues(projectId, batchInfo.networkId, "all_cached").Inc()
+ return true
+ }
+
+ // Acquire network rate limits only for uncached requests that will hit the network
+ // This prevents wasting rate limit permits on cache hits
+ var rateLimitedCandidates []ethCallBatchCandidate
+ for _, cand := range candidates {
+ if err := network.acquireRateLimitPermit(cand.ctx, cand.req); err != nil {
+ responses[cand.index] = processErrorBody(&cand.logger, startedAt, cand.req, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(cand.ctx, nil, err)
+ continue
+ }
+ rateLimitedCandidates = append(rateLimitedCandidates, cand)
+ }
+ candidates = rateLimitedCandidates
+
+ // All uncached requests were rate limited
+ if len(candidates) == 0 {
+ return true
+ }
+
+ if len(candidates) < 2 {
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ reqs := make([]*common.NormalizedRequest, len(candidates))
+ for i, cand := range candidates {
+ reqs[i] = cand.req
+ }
+
+ mcReq, calls, err := evm.BuildMulticall3Request(reqs, batchInfo.blockParam)
+ if err != nil {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "build_failed").Inc()
+ baseLogger.Debug().Err(err).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 build failed, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ // Mark as composite to disable hedging - multicall3 requests should not be
+ // hedged like normal requests as this would create duplicate batches
+ mcReq.SetCompositeType(common.CompositeTypeMulticall3)
+
+ mcCtx := withSkipNetworkRateLimit(httpCtx)
+ mcResp, mcErr := forwardBatchNetwork(mcCtx, network, mcReq)
+ if mcErr != nil {
+ if mcResp != nil {
+ mcResp.Release()
+ }
+ if evm.ShouldFallbackMulticall3(mcErr) {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "forward_failed").Inc()
+ baseLogger.Debug().Err(mcErr).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 request failed, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+ // Non-recoverable error - don't fallback, propagate to all candidates
+ telemetry.MetricMulticall3AggregationTotal.WithLabelValues(projectId, batchInfo.networkId, "error").Inc()
+ for _, cand := range candidates {
+ responses[cand.index] = processErrorBody(&cand.logger, startedAt, cand.req, mcErr, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(cand.ctx, nil, mcErr)
+ }
+ return true
+ }
+ if mcResp == nil {
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "nil_response").Inc()
+ baseLogger.Debug().
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 response missing, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ jrr, err := mcResp.JsonRpcResponse(mcCtx)
+ if err != nil || jrr == nil || jrr.Error != nil {
+ mcResp.Release()
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "invalid_response").Inc()
+ logEvent := baseLogger.Debug().Err(err).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Bool("missingResponse", jrr == nil).
+ Bool("hasRpcError", jrr != nil && jrr.Error != nil)
+ if jrr != nil && jrr.Error != nil {
+ logEvent = logEvent.Int("rpcErrorCode", jrr.Error.Code).
+ Str("rpcErrorMessage", jrr.Error.Message)
+ }
+ logEvent.Msg("multicall3 response invalid, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ var resultHex string
+ if err := common.SonicCfg.Unmarshal(jrr.GetResultBytes(), &resultHex); err != nil {
+ mcResp.Release()
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "unmarshal_failed").Inc()
+ baseLogger.Debug().Err(err).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 result unmarshal failed, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+ resultBytes, err := common.HexToBytes(resultHex)
+ if err != nil {
+ mcResp.Release()
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "hex_decode_failed").Inc()
+ baseLogger.Debug().Err(err).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 result decode failed, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ decoded, err := evm.DecodeMulticall3Aggregate3Result(resultBytes)
+ if err != nil {
+ mcResp.Release()
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "abi_decode_failed").Inc()
+ baseLogger.Debug().Err(err).
+ Int("candidateCount", len(candidates)).
+ Str("networkId", batchInfo.networkId).
+ Msg("multicall3 result parsing failed, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+ if len(decoded) != len(calls) {
+ mcResp.Release()
+ // Length mismatch is a critical issue - use separate metric and Error level logging
+ telemetry.MetricMulticall3FallbackTotal.WithLabelValues(projectId, batchInfo.networkId, "length_mismatch").Inc()
+ baseLogger.Error().
+ Int("candidateCount", len(candidates)).
+ Int("decodedCount", len(decoded)).
+ Int("callCount", len(calls)).
+ Str("networkId", batchInfo.networkId).
+ Msg("CRITICAL: multicall3 result length mismatch - possible data corruption, falling back")
+ s.forwardEthCallBatchCandidates(startedAt, project, network, candidates, responses)
+ return true
+ }
+
+ shouldCache := !mcResp.FromCache() && cacheDal != nil && !cacheDal.IsObjectNull()
+ for i, result := range decoded {
+ cand := candidates[i]
+ if result.Success {
+ returnHex := "0x" + hex.EncodeToString(result.ReturnData)
+ jrr, err := newBatchJsonRpcResponse(cand.req.ID(), returnHex, nil)
+ if err != nil {
+ responses[cand.index] = processErrorBody(&cand.logger, startedAt, cand.req, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(cand.ctx, nil, err)
+ continue
+ }
+
+ nr := common.NewNormalizedResponse().WithRequest(cand.req).WithJsonRpcResponse(jrr)
+ nr.SetUpstream(mcResp.Upstream())
+ nr.SetFromCache(mcResp.FromCache())
+ nr.SetAttempts(mcResp.Attempts())
+ nr.SetRetries(mcResp.Retries())
+ nr.SetHedges(mcResp.Hedges())
+ nr.SetEvmBlockRef(mcResp.EvmBlockRef())
+ nr.SetEvmBlockNumber(mcResp.EvmBlockNumber())
+ responses[cand.index] = nr
+ common.EndRequestSpan(cand.ctx, nr, nil)
+
+ // Cache individual response asynchronously with backpressure
+ if shouldCache {
+ // Try to acquire semaphore (non-blocking to avoid blocking response path)
+ select {
+ case cacheWriteSem <- struct{}{}:
+ nr.RLockWithTrace(cand.ctx)
+ nr.AddRef()
+ go func(resp *common.NormalizedResponse, req *common.NormalizedRequest, reqCtx context.Context, lg zerolog.Logger) {
+ defer func() { <-cacheWriteSem }() // Release semaphore
+ defer func() {
+ if rec := recover(); rec != nil {
+ telemetry.MetricUnexpectedPanicTotal.WithLabelValues(
+ "multicall3-cache-write",
+ fmt.Sprintf("network:%s", batchInfo.networkId),
+ common.ErrorFingerprint(rec),
+ ).Inc()
+ lg.Error().
+ Interface("panic", rec).
+ Str("stack", string(debug.Stack())).
+ Msg("unexpected panic on multicall3 per-call cache write")
+ }
+ }()
+ defer resp.RUnlock()
+ defer resp.DoneRef()
+
+ timeoutCtx, timeoutCtxCancel := context.WithTimeoutCause(network.AppCtx(), 10*time.Second, errors.New("cache driver timeout during multicall3 per-call cache write"))
+ defer timeoutCtxCancel()
+ tracedCtx := trace.ContextWithSpanContext(timeoutCtx, trace.SpanContextFromContext(reqCtx))
+ if err := cacheDal.Set(tracedCtx, req, resp); err != nil {
+ lg.Warn().Err(err).Msg("could not store multicall3 per-call response in cache")
+ }
+ }(nr, cand.req, cand.ctx, cand.logger)
+ default:
+ // Semaphore full - skip cache write to avoid unbounded goroutine growth
+ telemetry.MetricMulticall3CacheWriteDroppedTotal.WithLabelValues(projectId, batchInfo.networkId).Inc()
+ cand.logger.Debug().Msg("skipping multicall3 per-call cache write due to backpressure")
+ }
+ }
+ continue
+ }
+
+ dataHex := "0x" + hex.EncodeToString(result.ReturnData)
+ callErr := common.NewErrEndpointExecutionException(
+ common.NewErrJsonRpcExceptionInternal(
+ 0,
+ common.JsonRpcErrorEvmReverted,
+ "execution reverted",
+ nil,
+ map[string]interface{}{
+ "data": dataHex,
+ },
+ ),
+ )
+ responses[cand.index] = processErrorBody(&cand.logger, startedAt, cand.req, callErr, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(cand.ctx, nil, callErr)
+ }
+
+ // Aggregation succeeded
+ telemetry.MetricMulticall3AggregationTotal.WithLabelValues(projectId, batchInfo.networkId, "success").Inc()
+ mcResp.Release()
+ return true
+}
diff --git a/erpc/http_batch_eth_call_detect_test.go b/erpc/http_batch_eth_call_detect_test.go
new file mode 100644
index 000000000..df66b4420
--- /dev/null
+++ b/erpc/http_batch_eth_call_detect_test.go
@@ -0,0 +1,157 @@
+package erpc
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/erpc/erpc/common"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDetectEthCallBatchInfo(t *testing.T) {
+ buildRaw := func(t *testing.T, method string, params []interface{}, networkId string) json.RawMessage {
+ t.Helper()
+ body := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "method": method,
+ "params": params,
+ }
+ if networkId != "" {
+ body["networkId"] = networkId
+ }
+ raw, err := common.SonicCfg.Marshal(body)
+ require.NoError(t, err)
+ return raw
+ }
+
+ callObj := map[string]interface{}{"to": "0x0000000000000000000000000000000000000001"}
+
+ cases := []struct {
+ name string
+ requests []json.RawMessage
+ arch string
+ chainId string
+ wantNil bool
+ wantErr bool
+ wantNetwork string
+ wantBlockRef string
+ wantBlock interface{}
+ }{
+ {
+ name: "single request",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1")},
+ wantNil: true,
+ },
+ {
+ name: "non-evm arch",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1")},
+ arch: "solana",
+ wantNil: true,
+ },
+ {
+ name: "invalid json",
+ requests: []json.RawMessage{json.RawMessage("{"), buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1")},
+ wantNil: true,
+ wantErr: true,
+ },
+ {
+ name: "non-eth_call",
+ requests: []json.RawMessage{buildRaw(t, "eth_getBalance", []interface{}{callObj}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1")},
+ wantNil: true,
+ },
+ {
+ name: "missing network",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj}, ""), buildRaw(t, "eth_call", []interface{}{callObj}, "")},
+ wantNil: true,
+ },
+ {
+ name: "network mismatch",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj}, "evm:2")},
+ wantNil: true,
+ },
+ {
+ name: "block mismatch",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj, "0x1"}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj, "0x2"}, "evm:1")},
+ wantNil: true,
+ },
+ {
+ name: "invalid block param",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0xzz"}}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj, "latest"}, "evm:1")},
+ wantNil: true,
+ wantErr: true,
+ },
+ {
+ name: "success explicit network",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj, "0x1"}, "evm:1"), buildRaw(t, "eth_call", []interface{}{callObj, "0x1"}, "evm:1")},
+ wantNetwork: "evm:1",
+ wantBlockRef: "1",
+ wantBlock: "0x1",
+ },
+ {
+ name: "success default network",
+ requests: []json.RawMessage{buildRaw(t, "eth_call", []interface{}{callObj}, ""), buildRaw(t, "eth_call", []interface{}{callObj}, "")},
+ arch: "evm",
+ chainId: "123",
+ wantNetwork: "evm:123",
+ wantBlockRef: "latest",
+ wantBlock: "latest",
+ },
+ {
+ name: "mixed requireCanonical - one false one true",
+ requests: []json.RawMessage{
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": false}}, "evm:1"),
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true}}, "evm:1"),
+ },
+ wantNil: true,
+ },
+ {
+ name: "mixed requireCanonical - one false one default",
+ requests: []json.RawMessage{
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": false}}, "evm:1"),
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234"}}, "evm:1"),
+ },
+ wantNil: true,
+ },
+ {
+ name: "same requireCanonical - both true",
+ requests: []json.RawMessage{
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true}}, "evm:1"),
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true}}, "evm:1"),
+ },
+ wantNetwork: "evm:1",
+ wantBlockRef: "0x1234567890123456789012345678901234567890123456789012345678901234",
+ wantBlock: map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true},
+ },
+ {
+ name: "same requireCanonical - explicit true and default compatible",
+ requests: []json.RawMessage{
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true}}, "evm:1"),
+ buildRaw(t, "eth_call", []interface{}{callObj, map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234"}}, "evm:1"),
+ },
+ wantNetwork: "evm:1",
+ wantBlockRef: "0x1234567890123456789012345678901234567890123456789012345678901234",
+ wantBlock: map[string]interface{}{"blockHash": "0x1234567890123456789012345678901234567890123456789012345678901234", "requireCanonical": true},
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ info, err := detectEthCallBatchInfo(tt.requests, tt.arch, tt.chainId)
+ if tt.wantErr {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ }
+ if tt.wantNil {
+ assert.Nil(t, info)
+ return
+ }
+ require.NotNil(t, info)
+ assert.Equal(t, tt.wantNetwork, info.networkId)
+ assert.Equal(t, tt.wantBlockRef, info.blockRef)
+ assert.Equal(t, tt.wantBlock, info.blockParam)
+ })
+ }
+}
diff --git a/erpc/http_batch_eth_call_forward_test.go b/erpc/http_batch_eth_call_forward_test.go
new file mode 100644
index 000000000..871c715cc
--- /dev/null
+++ b/erpc/http_batch_eth_call_forward_test.go
@@ -0,0 +1,101 @@
+package erpc
+
+import (
+ "context"
+ "errors"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/erpc/erpc/common"
+ "github.com/rs/zerolog/log"
+ "github.com/stretchr/testify/require"
+)
+
+func TestForwardEthCallBatchCandidates(t *testing.T) {
+ server := &HttpServer{serverCfg: &common.ServerConfig{IncludeErrorDetails: &common.TRUE}}
+ startedAt := time.Now()
+
+ makeCandidate := func(index int) ethCallBatchCandidate {
+ req := common.NewNormalizedRequest([]byte(`{"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"}]}`))
+ ctx := common.StartRequestSpan(context.Background(), req)
+ return ethCallBatchCandidate{
+ index: index,
+ ctx: ctx,
+ req: req,
+ logger: log.Logger,
+ }
+ }
+
+ responses := make([]interface{}, 1)
+ server.forwardEthCallBatchCandidates(&startedAt, nil, nil, []ethCallBatchCandidate{makeCandidate(0)}, responses)
+ require.NotNil(t, responses[0])
+
+ origForward := forwardBatchProject
+ t.Cleanup(func() {
+ forwardBatchProject = origForward
+ })
+
+ t.Run("forward error", func(t *testing.T) {
+ responses := make([]interface{}, 1)
+ resp := common.NewNormalizedResponse()
+ forwardBatchProject = func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return resp, errors.New("boom")
+ }
+
+ server.forwardEthCallBatchCandidates(&startedAt, &PreparedProject{}, &Network{}, []ethCallBatchCandidate{makeCandidate(0)}, responses)
+ require.NotNil(t, responses[0])
+ })
+
+ t.Run("forward success", func(t *testing.T) {
+ responses := make([]interface{}, 1)
+ resp := common.NewNormalizedResponse()
+ forwardBatchProject = func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return resp, nil
+ }
+
+ server.forwardEthCallBatchCandidates(&startedAt, &PreparedProject{}, &Network{}, []ethCallBatchCandidate{makeCandidate(0)}, responses)
+ require.Equal(t, resp, responses[0])
+ })
+
+ t.Run("panic recovery in forward goroutine", func(t *testing.T) {
+ responses := make([]interface{}, 2)
+ var callCount int32
+ forwardBatchProject = func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ count := atomic.AddInt32(&callCount, 1)
+ if count == 1 {
+ panic("test panic in forward")
+ }
+ return common.NewNormalizedResponse(), nil
+ }
+
+ // Create 2 candidates - one will panic, one will succeed
+ server.forwardEthCallBatchCandidates(&startedAt, &PreparedProject{}, &Network{}, []ethCallBatchCandidate{makeCandidate(0), makeCandidate(1)}, responses)
+
+ // Both responses should have been populated (panic recovered)
+ require.NotNil(t, responses[0], "first response should not be nil after panic")
+ require.NotNil(t, responses[1], "second response should not be nil")
+ })
+
+ t.Run("context cancellation is handled", func(t *testing.T) {
+ cancelledCtx, cancel := context.WithCancel(context.Background())
+ cancel() // Cancel immediately
+
+ req := common.NewNormalizedRequest([]byte(`{"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"}]}`))
+
+ candidate := ethCallBatchCandidate{
+ index: 0,
+ ctx: cancelledCtx,
+ req: req,
+ logger: log.Logger,
+ }
+
+ responses := make([]interface{}, 1)
+ forwardBatchProject = func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return nil, ctx.Err()
+ }
+
+ server.forwardEthCallBatchCandidates(&startedAt, &PreparedProject{}, &Network{}, []ethCallBatchCandidate{candidate}, responses)
+ require.NotNil(t, responses[0], "response should be populated even with cancelled context")
+ })
+}
diff --git a/erpc/http_batch_eth_call_handle_test.go b/erpc/http_batch_eth_call_handle_test.go
new file mode 100644
index 000000000..8ecc9b36f
--- /dev/null
+++ b/erpc/http_batch_eth_call_handle_test.go
@@ -0,0 +1,565 @@
+package erpc
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/erpc/erpc/architecture/evm"
+ "github.com/erpc/erpc/common"
+ "github.com/rs/zerolog/log"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHandleEthCallBatchAggregation_EarlyReturn(t *testing.T) {
+ server := &HttpServer{serverCfg: &common.ServerConfig{IncludeErrorDetails: &common.TRUE}}
+ startedAt := time.Now()
+ req := httptest.NewRequest("POST", "http://localhost", nil)
+ req.RemoteAddr = "127.0.0.1:1234"
+
+ handled := server.handleEthCallBatchAggregation(
+ context.Background(),
+ &startedAt,
+ req,
+ nil,
+ log.Logger,
+ nil,
+ nil,
+ req.Header,
+ req.URL.Query(),
+ nil,
+ )
+
+ assert.False(t, handled)
+}
+
+func TestHandleEthCallBatchAggregation_RequestAndAuthErrors(t *testing.T) {
+ t.Run("validation error", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ requests := []json.RawMessage{
+ json.RawMessage(`{"jsonrpc":"2.0","id":1}`),
+ json.RawMessage(`{"jsonrpc":"2.0","id":2}`),
+ }
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), requests, nil)
+ require.True(t, handled)
+ require.Len(t, responses, len(requests))
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+
+ t.Run("auth payload error", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ headers := http.Header{}
+ headers.Set("Authorization", "Basic !!!")
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), headers)
+ require.True(t, handled)
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+
+ t.Run("auth unauthorized", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ cfg.Projects[0].Auth = &common.AuthConfig{
+ Strategies: []*common.AuthStrategyConfig{
+ {
+ Type: common.AuthTypeSecret,
+ Secret: &common.SecretStrategyConfig{
+ Id: "secret",
+ Value: "s3cr3t",
+ },
+ },
+ },
+ }
+
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+}
+
+func TestHandleEthCallBatchAggregation_NetworkAndRateLimitErrors(t *testing.T) {
+ t.Run("network not found", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ batchInfo := ðCallBatchInfo{networkId: "evm:999", blockRef: "latest", blockParam: "latest"}
+ handled, responses := runHandle(t, ctx, server, project, batchInfo, validBatchRequests(t), nil)
+ require.True(t, handled)
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+
+ t.Run("project rate limit", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ cfg.RateLimiters = rateLimitConfig("project-budget")
+ cfg.Projects[0].RateLimitBudget = "project-budget"
+
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+
+ t.Run("network rate limit", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ cfg.RateLimiters = rateLimitConfig("network-budget")
+ cfg.Projects[0].Networks[0].RateLimitBudget = "network-budget"
+
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ for _, resp := range responses {
+ require.NotNil(t, resp)
+ }
+ })
+}
+
+func TestHandleEthCallBatchAggregation_FallbackPaths(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ validRequests := validBatchRequests(t)
+ invalidRequests := invalidBatchRequests(t)
+ singleRequest := validRequests[:1]
+
+ cases := []struct {
+ name string
+ requests []json.RawMessage
+ networkResponse func() (*common.NormalizedResponse, error)
+ expectedProjCalls int
+ expectedNetCalls int
+ }{
+ {
+ name: "single candidate",
+ requests: singleRequest,
+ networkResponse: func() (*common.NormalizedResponse, error) { return nil, nil },
+ expectedProjCalls: 1,
+ },
+ {
+ name: "build error",
+ requests: invalidRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) { return nil, nil },
+ expectedProjCalls: 2,
+ },
+ {
+ name: "forward error fallback",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ // Use "contract not found" to trigger ShouldFallbackMulticall3
+ // Note: "execution reverted" does NOT trigger fallback (would also fail individually)
+ return nil, common.NewErrEndpointExecutionException(errors.New("contract not found"))
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "mc response nil",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) { return nil, nil },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "mc response error",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, nil, common.NewErrJsonRpcExceptionExternal(-32000, "boom", ""))
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "result unmarshal error",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, map[string]interface{}{"oops": "nope"}, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "result hex error",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, "0xzz", nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "decode error",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, "0x01", nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ {
+ name: "length mismatch",
+ requests: validRequests,
+ networkResponse: func() (*common.NormalizedResponse, error) {
+ resultHex := encodeAggregate3Results([]evm.Multicall3Result{{Success: true, ReturnData: []byte{0x01}}})
+ jrr := mustJsonRpcResponse(t, 1, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ expectedProjCalls: 2,
+ expectedNetCalls: 1,
+ },
+ }
+
+ for _, tt := range cases {
+ t.Run(tt.name, func(t *testing.T) {
+ var mu sync.Mutex
+ projCalls := 0
+ netCalls := 0
+
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ mu.Lock()
+ netCalls++
+ mu.Unlock()
+ return tt.networkResponse()
+ },
+ func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ mu.Lock()
+ projCalls++
+ mu.Unlock()
+ return fallbackResponse(t, req), nil
+ },
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), tt.requests, nil)
+ require.True(t, handled)
+ mu.Lock()
+ assert.Equal(t, tt.expectedProjCalls, projCalls)
+ assert.Equal(t, tt.expectedNetCalls, netCalls)
+ mu.Unlock()
+ if len(responses) > 0 {
+ _, ok := responses[0].(*common.NormalizedResponse)
+ assert.True(t, ok)
+ }
+ })
+ }
+}
+
+func TestHandleEthCallBatchAggregation_NonFallbackError(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ projCalls := 0
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return nil, errors.New("boom")
+ },
+ func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ projCalls++
+ return fallbackResponse(t, req), nil
+ },
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ assert.Equal(t, 0, projCalls)
+ for _, resp := range responses {
+ _, ok := resp.(*common.NormalizedResponse)
+ assert.False(t, ok)
+ }
+}
+
+func TestHandleEthCallBatchAggregation_NewJsonRpcResponseError(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ results := []evm.Multicall3Result{{Success: true, ReturnData: []byte{0xaa}}, {Success: true, ReturnData: []byte{0xbb}}}
+ resultHex := encodeAggregate3Results(results)
+
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ return fallbackResponse(t, req), nil
+ },
+ func(id interface{}, result interface{}, rpcError *common.ErrJsonRpcExceptionExternal) (*common.JsonRpcResponse, error) {
+ return nil, errors.New("boom")
+ },
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ for _, resp := range responses {
+ _, ok := resp.(*common.NormalizedResponse)
+ assert.False(t, ok)
+ }
+}
+
+func TestHandleEthCallBatchAggregation_SuccessAndFailureResults(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ results := []evm.Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: false, ReturnData: []byte{0xbb}},
+ }
+ resultHex := encodeAggregate3Results(results)
+
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ nil,
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ require.Len(t, responses, 2)
+
+ resp0, ok := responses[0].(*common.NormalizedResponse)
+ require.True(t, ok)
+ jrr, err := resp0.JsonRpcResponse(ctx)
+ require.NoError(t, err)
+ var decodedHex string
+ require.NoError(t, common.SonicCfg.Unmarshal(jrr.GetResultBytes(), &decodedHex))
+ assert.Equal(t, "0x"+hex.EncodeToString(results[0].ReturnData), decodedHex)
+
+ errResp, ok := responses[1].(*HttpJsonRpcErrorResponse)
+ require.True(t, ok)
+ errMap, ok := errResp.Error.(map[string]interface{})
+ require.True(t, ok)
+ assert.Equal(t, "0x"+hex.EncodeToString(results[1].ReturnData), errMap["data"])
+}
+
+func TestHandleEthCallBatchAggregation_PanicRecovery(t *testing.T) {
+ t.Run("panic in fallback forward is recovered", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+ defer cleanup()
+
+ // Panic recovery exists in forwardEthCallBatchCandidates (fallback path)
+ // The multicall3 network forward doesn't have panic recovery, but the
+ // fallback path does. So we make network forward fail to trigger fallback,
+ // then have the project forward (fallback) panic.
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ // Return "contract not found" error to trigger fallback via ShouldFallbackMulticall3
+ return nil, common.NewErrEndpointExecutionException(errors.New("contract not found"))
+ },
+ func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ // Panic in the fallback forward - this should be recovered
+ panic("test panic in fallback forward")
+ },
+ nil,
+ )
+
+ // The function should not crash - panic in fallback path should be recovered
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ // We expect error responses due to the panic
+ require.Len(t, responses, 2)
+ for _, resp := range responses {
+ require.NotNil(t, resp, "response should not be nil after panic recovery")
+ }
+ })
+}
+
+func TestHandleEthCallBatchAggregation_DetectEthCallBatchInfo_EmptyParams(t *testing.T) {
+ // Empty params in eth_call is valid - the block param defaults to "latest".
+ // This test verifies that empty params requests are handled correctly.
+ requests := []json.RawMessage{
+ json.RawMessage(`{"jsonrpc":"2.0","id":1,"method":"eth_call","params":[]}`),
+ json.RawMessage(`{"jsonrpc":"2.0","id":2,"method":"eth_call","params":[]}`),
+ }
+
+ // detectEthCallBatchInfo should return valid info with "latest" block ref
+ info, err := detectEthCallBatchInfo(requests, "evm", "123")
+ require.NoError(t, err)
+ require.NotNil(t, info, "empty params defaults to latest - should be valid for batching")
+ assert.Equal(t, "evm:123", info.networkId)
+ assert.Equal(t, "latest", info.blockRef)
+ assert.Equal(t, interface{}("latest"), info.blockParam)
+}
+
+func TestHandleEthCallBatchAggregation_CacheHits(t *testing.T) {
+ t.Run("all cached - no multicall3 call", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, network, ctx, cleanup := setupBatchHandlerWithCache(t, cfg)
+ defer cleanup()
+
+ // Set up mock cache
+ mockCache := &common.MockCacheDal{}
+ network.cacheDal = mockCache
+
+ // Create cached responses
+ cachedResp1 := createCachedResponse(t, common.NewNormalizedRequest([]byte(`{"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"},"latest"]}`)), "0xcached1")
+ cachedResp2 := createCachedResponse(t, common.NewNormalizedRequest([]byte(`{"jsonrpc":"2.0","id":2,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"},"latest"]}`)), "0xcached2")
+
+ // Mock cache hits for both requests
+ mockCache.On("Get", mock.Anything, mock.Anything).Return(cachedResp1, nil).Once()
+ mockCache.On("Get", mock.Anything, mock.Anything).Return(cachedResp2, nil).Once()
+
+ // Track if multicall3 was called (it shouldn't be)
+ multicall3Called := false
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ multicall3Called = true
+ return nil, errors.New("should not be called")
+ },
+ nil,
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ require.False(t, multicall3Called, "multicall3 should not be called when all requests are cached")
+ require.Len(t, responses, 2)
+
+ // Verify both responses are from cache
+ for i, resp := range responses {
+ nr, ok := resp.(*common.NormalizedResponse)
+ require.True(t, ok, "response %d should be NormalizedResponse", i)
+ assert.True(t, nr.FromCache(), "response %d should be from cache", i)
+ }
+
+ mockCache.AssertExpectations(t)
+ })
+
+ t.Run("mixed cached and uncached", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, network, ctx, cleanup := setupBatchHandlerWithCache(t, cfg)
+ defer cleanup()
+
+ // Set up mock cache - first request cached, second not
+ mockCache := &common.MockCacheDal{}
+ network.cacheDal = mockCache
+
+ // Create cached response for first request
+ cachedResp := createCachedResponse(t, common.NewNormalizedRequest([]byte(`{"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"},"latest"]}`)), "0xcached_first")
+
+ // First request - cache hit, second request - cache miss
+ mockCache.On("Get", mock.Anything, mock.Anything).Return(cachedResp, nil).Once()
+ mockCache.On("Get", mock.Anything, mock.Anything).Return(nil, nil).Once()
+
+ // Cache set should be called for the uncached request
+ mockCache.On("Set", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe()
+
+ // Only the second (uncached) request should go through multicall3
+ // But since we only have 1 uncached request, it falls back to individual forwarding
+ withBatchStubs(t,
+ nil,
+ func(ctx context.Context, project *PreparedProject, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ // This is the fallback for single uncached request
+ return fallbackResponse(t, req), nil
+ },
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ require.Len(t, responses, 2)
+
+ // First response should be from cache
+ resp0, ok := responses[0].(*common.NormalizedResponse)
+ require.True(t, ok)
+ assert.True(t, resp0.FromCache())
+
+ // Second response should be from fallback (not cache)
+ _, ok = responses[1].(*common.NormalizedResponse)
+ require.True(t, ok)
+
+ mockCache.AssertExpectations(t)
+ })
+
+ t.Run("cache write after successful multicall3", func(t *testing.T) {
+ cfg := baseBatchConfig()
+ server, project, network, ctx, cleanup := setupBatchHandlerWithCache(t, cfg)
+ defer cleanup()
+
+ // Set up mock cache - all cache misses
+ mockCache := &common.MockCacheDal{}
+ network.cacheDal = mockCache
+
+ mockCache.On("Get", mock.Anything, mock.Anything).Return(nil, nil).Times(2)
+
+ // Expect cache Set to be called for each successful response
+ setCalled := make(chan struct{}, 2)
+ mockCache.On("Set", mock.Anything, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) {
+ setCalled <- struct{}{}
+ }).Times(2)
+
+ results := []evm.Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: true, ReturnData: []byte{0xbb}},
+ }
+ resultHex := encodeAggregate3Results(results)
+
+ withBatchStubs(t,
+ func(ctx context.Context, network *Network, req *common.NormalizedRequest) (*common.NormalizedResponse, error) {
+ jrr := mustJsonRpcResponse(t, 1, resultHex, nil)
+ return common.NewNormalizedResponse().WithJsonRpcResponse(jrr), nil
+ },
+ nil,
+ nil,
+ )
+
+ handled, responses := runHandle(t, ctx, server, project, defaultBatchInfo(), validBatchRequests(t), nil)
+ require.True(t, handled)
+ require.Len(t, responses, 2)
+
+ // Wait for async cache writes (with timeout)
+ for i := 0; i < 2; i++ {
+ select {
+ case <-setCalled:
+ // Good, cache set was called
+ case <-time.After(2 * time.Second):
+ t.Fatal("timeout waiting for cache Set to be called")
+ }
+ }
+
+ mockCache.AssertExpectations(t)
+ })
+}
diff --git a/erpc/http_batch_eth_call_helpers_test.go b/erpc/http_batch_eth_call_helpers_test.go
new file mode 100644
index 000000000..11a739109
--- /dev/null
+++ b/erpc/http_batch_eth_call_helpers_test.go
@@ -0,0 +1,294 @@
+package erpc
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/erpc/erpc/architecture/evm"
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/data"
+ "github.com/erpc/erpc/util"
+ "github.com/rs/zerolog/log"
+ "github.com/stretchr/testify/require"
+)
+
+func baseBatchConfig() *common.Config {
+ return &common.Config{
+ Server: &common.ServerConfig{IncludeErrorDetails: &common.TRUE},
+ Projects: []*common.ProjectConfig{
+ {
+ Id: "test_project",
+ Networks: []*common.NetworkConfig{
+ {
+ Architecture: common.ArchitectureEvm,
+ Evm: &common.EvmNetworkConfig{ChainId: 123},
+ },
+ },
+ Upstreams: []*common.UpstreamConfig{
+ {
+ Type: common.UpstreamTypeEvm,
+ Endpoint: "http://rpc1.localhost",
+ Evm: &common.EvmUpstreamConfig{ChainId: 123},
+ },
+ },
+ },
+ },
+ RateLimiters: &common.RateLimiterConfig{},
+ }
+}
+
+func rateLimitConfig(budgetId string) *common.RateLimiterConfig {
+ return &common.RateLimiterConfig{
+ Budgets: []*common.RateLimitBudgetConfig{
+ {
+ Id: budgetId,
+ Rules: []*common.RateLimitRuleConfig{
+ {Method: "eth_call", MaxCount: 0, Period: common.RateLimitPeriodSecond},
+ },
+ },
+ },
+ }
+}
+
+func setupBatchHandler(t *testing.T, cfg *common.Config) (*HttpServer, *PreparedProject, context.Context, func()) {
+ t.Helper()
+ util.SetupMocksForEvmStatePoller()
+
+ logger := log.Logger
+ ctx, cancel := context.WithCancel(context.Background())
+
+ ssr, err := data.NewSharedStateRegistry(ctx, &logger, &common.SharedStateConfig{
+ Connector: &common.ConnectorConfig{
+ Driver: "memory",
+ Memory: &common.MemoryConnectorConfig{
+ MaxItems: 100_000,
+ MaxTotalSize: "1GB",
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ erpcInstance, err := NewERPC(ctx, &logger, ssr, nil, cfg)
+ require.NoError(t, err)
+ erpcInstance.Bootstrap(ctx)
+
+ server, err := NewHttpServer(ctx, &logger, cfg.Server, cfg.HealthCheck, cfg.Admin, erpcInstance)
+ require.NoError(t, err)
+
+ project, err := erpcInstance.GetProject("test_project")
+ require.NoError(t, err)
+
+ cleanup := func() {
+ cancel()
+ util.AssertNoPendingMocks(t, 0)
+ util.ResetGock()
+ }
+
+ return server, project, ctx, cleanup
+}
+
+func validBatchRequests(t *testing.T) []json.RawMessage {
+ callObj := map[string]interface{}{
+ "to": "0x0000000000000000000000000000000000000001",
+ "data": "0x",
+ }
+ return []json.RawMessage{
+ buildEthCallRaw(t, 1, callObj, "latest"),
+ buildEthCallRaw(t, 2, callObj, "latest"),
+ }
+}
+
+func invalidBatchRequests(t *testing.T) []json.RawMessage {
+ callObj := map[string]interface{}{
+ "to": "0x0000000000000000000000000000000000000001",
+ "data": "0x",
+ "value": "0x1",
+ }
+ return []json.RawMessage{
+ buildEthCallRaw(t, 1, callObj, "latest"),
+ buildEthCallRaw(t, 2, callObj, "latest"),
+ }
+}
+
+func buildEthCallRaw(t *testing.T, id interface{}, callObj map[string]interface{}, blockParam interface{}) json.RawMessage {
+ t.Helper()
+ params := []interface{}{callObj}
+ if blockParam != nil {
+ params = append(params, blockParam)
+ }
+ body := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": id,
+ "method": "eth_call",
+ "params": params,
+ }
+ raw, err := common.SonicCfg.Marshal(body)
+ require.NoError(t, err)
+ return raw
+}
+
+func defaultBatchInfo() *ethCallBatchInfo {
+ return ðCallBatchInfo{networkId: "evm:123", blockRef: "latest", blockParam: "latest"}
+}
+
+func runHandle(t *testing.T, ctx context.Context, server *HttpServer, project *PreparedProject, batchInfo *ethCallBatchInfo, requests []json.RawMessage, headers http.Header) (bool, []interface{}) {
+ t.Helper()
+ responses := make([]interface{}, len(requests))
+ startedAt := time.Now()
+ req := httptest.NewRequest("POST", "http://localhost", nil)
+ req.RemoteAddr = "127.0.0.1:1234"
+ if headers != nil {
+ req.Header = headers
+ }
+
+ handled := server.handleEthCallBatchAggregation(
+ ctx,
+ &startedAt,
+ req,
+ project,
+ log.Logger,
+ batchInfo,
+ requests,
+ req.Header,
+ req.URL.Query(),
+ responses,
+ )
+
+ return handled, responses
+}
+
+type batchNetworkForward func(context.Context, *Network, *common.NormalizedRequest) (*common.NormalizedResponse, error)
+type batchProjectForward func(context.Context, *PreparedProject, *Network, *common.NormalizedRequest) (*common.NormalizedResponse, error)
+type batchNewJsonRpcResponse func(id interface{}, result interface{}, rpcError *common.ErrJsonRpcExceptionExternal) (*common.JsonRpcResponse, error)
+
+func withBatchStubs(t *testing.T, network batchNetworkForward, project batchProjectForward, newResp batchNewJsonRpcResponse) {
+ t.Helper()
+ origNetwork := forwardBatchNetwork
+ origProject := forwardBatchProject
+ origNew := newBatchJsonRpcResponse
+ if network != nil {
+ forwardBatchNetwork = network
+ }
+ if project != nil {
+ forwardBatchProject = project
+ }
+ if newResp != nil {
+ newBatchJsonRpcResponse = newResp
+ }
+ t.Cleanup(func() {
+ forwardBatchNetwork = origNetwork
+ forwardBatchProject = origProject
+ newBatchJsonRpcResponse = origNew
+ })
+}
+
+func fallbackResponse(t *testing.T, req *common.NormalizedRequest) *common.NormalizedResponse {
+ t.Helper()
+ jrr := mustJsonRpcResponse(t, req.ID(), "0xfeed", nil)
+ return common.NewNormalizedResponse().WithRequest(req).WithJsonRpcResponse(jrr)
+}
+
+func mustJsonRpcResponse(t *testing.T, id interface{}, result interface{}, rpcErr *common.ErrJsonRpcExceptionExternal) *common.JsonRpcResponse {
+ t.Helper()
+ jrr, err := common.NewJsonRpcResponse(id, result, rpcErr)
+ require.NoError(t, err)
+ return jrr
+}
+
+func encodeAggregate3Results(results []evm.Multicall3Result) string {
+ encoded := encodeAggregate3ResultsBytes(results)
+ return "0x" + hex.EncodeToString(encoded)
+}
+
+func encodeAggregate3ResultsBytes(results []evm.Multicall3Result) []byte {
+ // Offsets are relative to start of array content (after length word),
+ // so the offset table size is just N*32 (not including the length word)
+ headSize := 32 * len(results)
+ offsets := make([]uint64, len(results))
+ elems := make([][]byte, len(results))
+ cur := uint64(headSize)
+
+ for i, res := range results {
+ elems[i] = encodeAggregate3ResultElement(res)
+ offsets[i] = cur
+ cur += uint64(len(elems[i]))
+ }
+
+ array := make([]byte, 0, int(cur))
+ array = append(array, encodeUint64(uint64(len(results)))...)
+ for _, off := range offsets {
+ array = append(array, encodeUint64(off)...)
+ }
+ for _, elem := range elems {
+ array = append(array, elem...)
+ }
+
+ out := make([]byte, 0, 32+len(array))
+ out = append(out, encodeUint64(32)...)
+ out = append(out, array...)
+ return out
+}
+
+func encodeAggregate3ResultElement(result evm.Multicall3Result) []byte {
+ head := make([]byte, 0, 64)
+ head = append(head, encodeBool(result.Success)...)
+ head = append(head, encodeUint64(64)...)
+ tail := encodeBytes(result.ReturnData)
+ return append(head, tail...)
+}
+
+func encodeUint64(value uint64) []byte {
+ out := make([]byte, 32)
+ out[24] = byte(value >> 56)
+ out[25] = byte(value >> 48)
+ out[26] = byte(value >> 40)
+ out[27] = byte(value >> 32)
+ out[28] = byte(value >> 24)
+ out[29] = byte(value >> 16)
+ out[30] = byte(value >> 8)
+ out[31] = byte(value)
+ return out
+}
+
+func encodeBool(value bool) []byte {
+ out := make([]byte, 32)
+ if value {
+ out[31] = 1
+ }
+ return out
+}
+
+func encodeBytes(data []byte) []byte {
+ out := make([]byte, 0, 32+len(data)+32)
+ out = append(out, encodeUint64(uint64(len(data)))...)
+ out = append(out, data...)
+ pad := (32 - (len(data) % 32)) % 32
+ if pad > 0 {
+ out = append(out, make([]byte, pad)...)
+ }
+ return out
+}
+
+func setupBatchHandlerWithCache(t *testing.T, cfg *common.Config) (*HttpServer, *PreparedProject, *Network, context.Context, func()) {
+ t.Helper()
+ server, project, ctx, cleanup := setupBatchHandler(t, cfg)
+
+ network, err := project.GetNetwork(ctx, "evm:123")
+ require.NoError(t, err)
+
+ return server, project, network, ctx, cleanup
+}
+
+func createCachedResponse(t *testing.T, req *common.NormalizedRequest, result string) *common.NormalizedResponse {
+ t.Helper()
+ jrr, err := common.NewJsonRpcResponse(req.ID(), result, nil)
+ require.NoError(t, err)
+ resp := common.NewNormalizedResponse().WithRequest(req).WithJsonRpcResponse(jrr)
+ resp.SetFromCache(true)
+ return resp
+}
diff --git a/erpc/http_server.go b/erpc/http_server.go
index c98e0ee66..377115324 100644
--- a/erpc/http_server.go
+++ b/erpc/http_server.go
@@ -405,170 +405,196 @@ func (s *HttpServer) createRequestHandler() http.Handler {
// We no longer need the top-level body; drop reference early to free its backing array
body = nil
- for i, reqBody := range requests {
- wg.Add(1)
- go func(index int, rawReq json.RawMessage, headers http.Header, queryArgs map[string][]string) {
- defer func() {
- defer wg.Done()
- if rec := recover(); rec != nil {
- telemetry.MetricUnexpectedPanicTotal.WithLabelValues(
- "request-handler",
- fmt.Sprintf("project:%s network:%s", architecture, chainId),
- common.ErrorFingerprint(rec),
- ).Inc()
- lg.Error().
- Interface("panic", rec).
- Str("stack", string(debug.Stack())).
- Msgf("unexpected server panic on per-request handler")
- err := fmt.Errorf("unexpected server panic on per-request handler: %v stack: %s", rec, string(debug.Stack()))
- responses[index] = processErrorBody(&lg, &startedAt, nil, err, s.serverCfg.IncludeErrorDetails)
- }
- }()
+ batchHandled := false
+ if isBatch && !isAdmin && !isHealthCheck {
+ batchInfo, detectErr := detectEthCallBatchInfo(requests, architecture, chainId)
+ if detectErr != nil {
+ lg.Info().Err(detectErr).
+ Int("requestCount", len(requests)).
+ Msg("eth_call batch detection failed, processing individually")
+ }
+ if batchInfo != nil && isMulticall3AggregationEnabled(project, batchInfo.networkId) {
+ batchHandled = s.handleEthCallBatchAggregation(
+ httpCtx,
+ &startedAt,
+ r,
+ project,
+ lg,
+ batchInfo,
+ requests,
+ headers,
+ queryArgs,
+ responses,
+ )
+ }
+ }
- nq := common.NewNormalizedRequest(rawReq)
- // Help GC: drop reference to the rawReq slice copy in the parent slice as soon as possible
- rawReq = nil
- requestCtx := common.StartRequestSpan(httpCtx, nq)
+ if !batchHandled {
+ for i, reqBody := range requests {
+ wg.Add(1)
+ go func(index int, rawReq json.RawMessage, headers http.Header, queryArgs map[string][]string) {
+ defer func() {
+ defer wg.Done()
+ if rec := recover(); rec != nil {
+ telemetry.MetricUnexpectedPanicTotal.WithLabelValues(
+ "request-handler",
+ fmt.Sprintf("project:%s network:%s", architecture, chainId),
+ common.ErrorFingerprint(rec),
+ ).Inc()
+ lg.Error().
+ Interface("panic", rec).
+ Str("stack", string(debug.Stack())).
+ Msgf("unexpected server panic on per-request handler")
+ err := fmt.Errorf("unexpected server panic on per-request handler: %v stack: %s", rec, string(debug.Stack()))
+ responses[index] = processErrorBody(&lg, &startedAt, nil, err, s.serverCfg.IncludeErrorDetails)
+ }
+ }()
- // Resolve and set real client IP before any rate limiting/auth checks
- clientIP := s.resolveRealClientIP(r)
- nq.SetClientIP(clientIP)
+ nq := common.NewNormalizedRequest(rawReq)
+ // Help GC: drop reference to the rawReq slice copy in the parent slice as soon as possible
+ rawReq = nil
+ requestCtx := common.StartRequestSpan(httpCtx, nq)
- // Validate the raw JSON-RPC payload early
- if err := nq.Validate(); err != nil {
- responses[index] = processErrorBody(&lg, &startedAt, nq, err, &common.TRUE)
- common.EndRequestSpan(requestCtx, nil, responses[index])
- return
- }
+ // Resolve and set real client IP before any rate limiting/auth checks
+ clientIP := s.resolveRealClientIP(r)
+ nq.SetClientIP(clientIP)
- method, _ := nq.Method()
- rlg := lg.With().Str("method", method).Logger()
+ // Validate the raw JSON-RPC payload early
+ if err := nq.Validate(); err != nil {
+ responses[index] = processErrorBody(&lg, &startedAt, nq, err, &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, responses[index])
+ return
+ }
- var ap *auth.AuthPayload
- var err error
+ method, _ := nq.Method()
+ rlg := lg.With().Str("method", method).Logger()
- if project != nil {
- ap, err = auth.NewPayloadFromHttp(method, r.RemoteAddr, headers, queryArgs)
- } else if isAdmin {
- ap, err = auth.NewPayloadFromHttp(method, r.RemoteAddr, headers, queryArgs)
- }
- if err != nil {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, err, &common.TRUE)
- common.EndRequestSpan(requestCtx, nil, err)
- return
- }
+ var ap *auth.AuthPayload
+ var err error
- if isAdmin {
- _, err := s.erpc.AdminAuthenticate(requestCtx, nq, method, ap)
- if err != nil {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, err, &common.TRUE)
- common.EndRequestSpan(requestCtx, nil, err)
- return
+ if project != nil {
+ ap, err = auth.NewPayloadFromHttp(method, r.RemoteAddr, headers, queryArgs)
+ } else if isAdmin {
+ ap, err = auth.NewPayloadFromHttp(method, r.RemoteAddr, headers, queryArgs)
}
- } else {
- user, err := project.AuthenticateConsumer(requestCtx, nq, method, ap)
if err != nil {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, err, &common.TRUE)
common.EndRequestSpan(requestCtx, nil, err)
return
}
- if user != nil {
- rlg = rlg.With().Str("userId", user.Id).Logger()
- }
- nq.SetUser(user)
- }
- if isAdmin {
- if s.adminCfg != nil {
- resp, err := s.erpc.AdminHandleRequest(requestCtx, nq)
+ if isAdmin {
+ _, err := s.erpc.AdminAuthenticate(requestCtx, nq, method, ap)
if err != nil {
responses[index] = processErrorBody(&rlg, &startedAt, nq, err, &common.TRUE)
common.EndRequestSpan(requestCtx, nil, err)
return
}
- responses[index] = resp
- common.EndRequestSpan(requestCtx, resp, nil)
- return
} else {
- responses[index] = processErrorBody(
- &rlg,
- &startedAt,
- nq,
- common.NewErrAuthUnauthorized(
- "",
- "admin is not enabled for this project",
- ),
- s.serverCfg.IncludeErrorDetails,
- )
- common.EndRequestSpan(requestCtx, nil, err)
- return
+ user, err := project.AuthenticateConsumer(requestCtx, nq, method, ap)
+ if err != nil {
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
+ }
+ if user != nil {
+ rlg = rlg.With().Str("userId", user.Id).Logger()
+ }
+ nq.SetUser(user)
}
- }
- var networkId string
-
- if architecture == "" || chainId == "" {
- var req map[string]interface{}
- if err := common.SonicCfg.Unmarshal(rawReq, &req); err != nil {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, common.NewErrInvalidRequest(err), &common.TRUE)
- common.EndRequestSpan(requestCtx, nil, err)
- return
+ if isAdmin {
+ if s.adminCfg != nil {
+ resp, err := s.erpc.AdminHandleRequest(requestCtx, nq)
+ if err != nil {
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, err, &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
+ }
+ responses[index] = resp
+ common.EndRequestSpan(requestCtx, resp, nil)
+ return
+ } else {
+ responses[index] = processErrorBody(
+ &rlg,
+ &startedAt,
+ nq,
+ common.NewErrAuthUnauthorized(
+ "",
+ "admin is not enabled for this project",
+ ),
+ s.serverCfg.IncludeErrorDetails,
+ )
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
+ }
}
- if networkIdFromBody, ok := req["networkId"].(string); ok {
- networkId = networkIdFromBody
- parts := strings.Split(networkId, ":")
- if len(parts) == 2 {
- architecture = parts[0]
- chainId = parts[1]
+
+ var networkId string
+
+ if architecture == "" || chainId == "" {
+ var req map[string]interface{}
+ if err := common.SonicCfg.Unmarshal(rawReq, &req); err != nil {
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, common.NewErrInvalidRequest(err), &common.TRUE)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
}
+ if networkIdFromBody, ok := req["networkId"].(string); ok {
+ networkId = networkIdFromBody
+ parts := strings.Split(networkId, ":")
+ if len(parts) == 2 {
+ architecture = parts[0]
+ chainId = parts[1]
+ }
+ }
+ } else {
+ networkId = fmt.Sprintf("%s:%s", architecture, chainId)
}
- } else {
- networkId = fmt.Sprintf("%s:%s", architecture, chainId)
- }
- if architecture == "" || chainId == "" {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, common.NewErrInvalidRequest(fmt.Errorf(
- "architecture and chain must be provided in URL (for example //evm/42161) or in request body (for example \"networkId\":\"evm:42161\") or configureed via domain aliasing",
- )), s.serverCfg.IncludeErrorDetails)
- common.EndRequestSpan(requestCtx, nil, err)
- return
- }
+ if architecture == "" || chainId == "" {
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, common.NewErrInvalidRequest(fmt.Errorf(
+ "architecture and chain must be provided in URL (for example //evm/42161) or in request body (for example \"networkId\":\"evm:42161\") or configureed via domain aliasing",
+ )), s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
+ }
- nw, err := project.GetNetwork(httpCtx, networkId)
- if err != nil {
- responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
- common.EndRequestSpan(requestCtx, nil, err)
- return
- }
- nq.SetNetwork(nw)
+ nw, err := project.GetNetwork(httpCtx, networkId)
+ if err != nil {
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
+ }
+ nq.SetNetwork(nw)
- nq.ApplyDirectiveDefaults(nw.Config().DirectiveDefaults)
- // Configure how to store User-Agent (raw vs simplified) based on project config
- uaMode := common.UserAgentTrackingModeSimplified
- if project != nil && project.Config.UserAgentMode != "" {
- uaMode = project.Config.UserAgentMode
- }
- nq.EnrichFromHttp(headers, queryArgs, uaMode)
- rlg.Trace().Interface("directives", nq.Directives()).Msgf("applied request directives")
+ nq.ApplyDirectiveDefaults(nw.Config().DirectiveDefaults)
+ // Configure how to store User-Agent (raw vs simplified) based on project config
+ uaMode := common.UserAgentTrackingModeSimplified
+ if project != nil && project.Config.UserAgentMode != "" {
+ uaMode = project.Config.UserAgentMode
+ }
+ nq.EnrichFromHttp(headers, queryArgs, uaMode)
+ rlg.Trace().Interface("directives", nq.Directives()).Msgf("applied request directives")
- resp, err := project.Forward(requestCtx, networkId, nq)
- if err != nil {
- // If an error occurred but a response was produced (e.g., lastValidResponse),
- // release it now since we are not going to write it.
- if resp != nil {
- go resp.Release()
+ resp, err := project.Forward(requestCtx, networkId, nq)
+ if err != nil {
+ // If an error occurred but a response was produced (e.g., lastValidResponse),
+ // release it now since we are not going to write it.
+ if resp != nil {
+ go resp.Release()
+ }
+ responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
+ common.EndRequestSpan(requestCtx, nil, err)
+ return
}
- responses[index] = processErrorBody(&rlg, &startedAt, nq, err, s.serverCfg.IncludeErrorDetails)
- common.EndRequestSpan(requestCtx, nil, err)
- return
- }
- responses[index] = resp
- common.EndRequestSpan(requestCtx, resp, nil)
- }(i, reqBody, headers, queryArgs)
- }
+ responses[index] = resp
+ common.EndRequestSpan(requestCtx, resp, nil)
+ }(i, reqBody, headers, queryArgs)
+ }
- wg.Wait()
+ wg.Wait()
+ }
httpCtx, writeResponseSpan := common.StartDetailSpan(httpCtx, "HttpServer.WriteResponse")
defer writeResponseSpan.End()
@@ -1644,3 +1670,25 @@ func stripAddrDecorations(s string) string {
}
return s
}
+
+// isMulticall3AggregationEnabled checks if multicall3 aggregation is enabled for a given network.
+// Returns true (default) if no explicit config is set, or if the config is explicitly set to enabled.
+func isMulticall3AggregationEnabled(project *PreparedProject, networkId string) bool {
+ if project == nil || project.Config == nil {
+ return true // Default to enabled
+ }
+
+ project.cfgMu.RLock()
+ defer project.cfgMu.RUnlock()
+
+ for _, nwCfg := range project.Config.Networks {
+ if nwCfg != nil && nwCfg.NetworkId() == networkId {
+ if nwCfg.Evm != nil && nwCfg.Evm.Multicall3Aggregation != nil {
+ return nwCfg.Evm.Multicall3Aggregation.Enabled
+ }
+ break
+ }
+ }
+
+ return true // Default to enabled
+}
diff --git a/erpc/http_server_batch_eth_call_test.go b/erpc/http_server_batch_eth_call_test.go
new file mode 100644
index 000000000..7c8e22c7b
--- /dev/null
+++ b/erpc/http_server_batch_eth_call_test.go
@@ -0,0 +1,112 @@
+package erpc
+
+import (
+ "encoding/hex"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/erpc/erpc/architecture/evm"
+ "github.com/erpc/erpc/common"
+ "github.com/erpc/erpc/util"
+ "github.com/h2non/gock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHttpServer_BatchEthCall_MulticallAggregation(t *testing.T) {
+ util.ResetGock()
+ defer util.ResetGock()
+ util.SetupMocksForEvmStatePoller()
+ defer util.AssertNoPendingMocks(t, 0)
+
+ cfg := baseBatchConfig()
+ sendRequest, _, _, shutdown, _ := createServerTestFixtures(cfg, t)
+ defer shutdown()
+
+ multicallAddr := strings.ToLower("0xcA11bde05977b3631167028862bE2a173976CA11")
+ results := []evm.Multicall3Result{
+ {Success: true, ReturnData: []byte{0xaa}},
+ {Success: true, ReturnData: []byte{0xbb}},
+ }
+ resultHex := encodeAggregate3Results(results)
+
+ gock.New("http://rpc1.localhost").
+ Post("/").
+ Times(1).
+ Filter(func(request *http.Request) bool {
+ body := strings.ToLower(util.SafeReadBody(request))
+ return strings.Contains(body, "\"eth_call\"") && strings.Contains(body, multicallAddr)
+ }).
+ Reply(200).
+ JSON(map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "result": resultHex,
+ })
+
+ batchBody := `[
+ {"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"},"latest"]},
+ {"jsonrpc":"2.0","id":2,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000002","data":"0x"},"latest"]}
+ ]`
+
+ status, _, body := sendRequest(batchBody, nil, map[string]string{})
+ assert.Equal(t, http.StatusOK, status)
+
+ var responses []map[string]interface{}
+ require.NoError(t, common.SonicCfg.Unmarshal([]byte(body), &responses))
+ require.Len(t, responses, 2)
+
+ assert.Equal(t, float64(1), responses[0]["id"])
+ assert.Equal(t, "0x"+hex.EncodeToString(results[0].ReturnData), responses[0]["result"])
+ assert.Equal(t, float64(2), responses[1]["id"])
+ assert.Equal(t, "0x"+hex.EncodeToString(results[1].ReturnData), responses[1]["result"])
+}
+
+func TestHttpServer_BatchEthCall_MulticallAggregationDisabled(t *testing.T) {
+ util.ResetGock()
+ defer util.ResetGock()
+ util.SetupMocksForEvmStatePoller()
+ defer util.AssertNoPendingMocks(t, 0)
+
+ // Create config with multicall3 aggregation disabled
+ cfg := baseBatchConfig()
+ cfg.Projects[0].Networks[0].Evm.Multicall3Aggregation = &common.Multicall3AggregationConfig{Enabled: false}
+
+ sendRequest, _, _, shutdown, _ := createServerTestFixtures(cfg, t)
+ defer shutdown()
+
+ // When multicall3 is disabled, each eth_call should be sent individually
+ // So we mock 2 individual eth_call responses instead of 1 multicall
+ gock.New("http://rpc1.localhost").
+ Post("/").
+ Times(2).
+ Filter(func(request *http.Request) bool {
+ body := util.SafeReadBody(request)
+ return strings.Contains(body, "\"eth_call\"")
+ }).
+ Reply(200).
+ JSON(map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "result": "0xcc",
+ })
+
+ batchBody := `[
+ {"jsonrpc":"2.0","id":1,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000001","data":"0x"},"latest"]},
+ {"jsonrpc":"2.0","id":2,"method":"eth_call","params":[{"to":"0x0000000000000000000000000000000000000002","data":"0x"},"latest"]}
+ ]`
+
+ status, _, body := sendRequest(batchBody, nil, map[string]string{})
+ assert.Equal(t, http.StatusOK, status)
+
+ var responses []map[string]interface{}
+ require.NoError(t, common.SonicCfg.Unmarshal([]byte(body), &responses))
+ require.Len(t, responses, 2)
+
+ // Both responses should have the individual result
+ assert.Equal(t, float64(1), responses[0]["id"])
+ assert.Equal(t, "0xcc", responses[0]["result"])
+ assert.Equal(t, float64(2), responses[1]["id"])
+ assert.Equal(t, "0xcc", responses[1]["result"])
+}
diff --git a/erpc/networks.go b/erpc/networks.go
index 078dd371d..967ca6b9e 100644
--- a/erpc/networks.go
+++ b/erpc/networks.go
@@ -26,6 +26,7 @@ import (
type FailsafeExecutor struct {
method string
finalities []common.DataFinalityState
+ upstreamGroup string
executor failsafe.Executor[*common.NormalizedResponse]
timeout *time.Duration
consensusPolicyEnabled bool
@@ -49,6 +50,24 @@ type Network struct {
initializer *util.Initializer
}
+type skipNetworkRateLimitKey struct{}
+
+func withSkipNetworkRateLimit(ctx context.Context) context.Context {
+ return context.WithValue(ctx, skipNetworkRateLimitKey{}, true)
+}
+
+func shouldSkipNetworkRateLimit(ctx context.Context) bool {
+ if ctx == nil {
+ return false
+ }
+ if v := ctx.Value(skipNetworkRateLimitKey{}); v != nil {
+ if skip, ok := v.(bool); ok && skip {
+ return true
+ }
+ }
+ return false
+}
+
func (n *Network) Bootstrap(ctx context.Context) error {
// Initialize policy evaluator if configured
if n.cfg.SelectionPolicy != nil {
@@ -330,6 +349,16 @@ func (n *Network) Forward(ctx context.Context, req *common.NormalizedRequest) (*
forwardSpan.SetAttributes(attribute.Bool("cache.hit", false))
}
+ // Get failsafe executor first to know if we need to filter upstreams by group
+ failsafeExecutor := n.getFailsafeExecutor(ctx, req)
+ if failsafeExecutor == nil {
+ err := errors.New("no failsafe executor found for this request")
+ if mlx != nil {
+ mlx.Close(ctx, nil, err)
+ }
+ return nil, err
+ }
+
_, upstreamSpan := common.StartDetailSpan(ctx, "GetSortedUpstreams")
upsList, err := n.upstreamsRegistry.GetSortedUpstreams(ctx, n.networkId, method)
upstreamSpan.SetAttributes(attribute.Int("upstreams.count", len(upsList)))
@@ -352,6 +381,51 @@ func (n *Network) Forward(ctx context.Context, req *common.NormalizedRequest) (*
return nil, err
}
+ // Filter upstreams by group if the failsafe executor specifies a group
+ // This is primarily used for consensus policies that should only compare
+ // responses from a specific group of upstreams (e.g., public RPC endpoints)
+ // Skip group filtering if UseUpstream directive is set - allows targeting any upstream for debugging
+ useUpstreamDirective := ""
+ if req.Directives() != nil {
+ useUpstreamDirective = req.Directives().UseUpstream
+ }
+ if failsafeExecutor.upstreamGroup != "" && useUpstreamDirective == "" {
+ filteredUpstreams := make([]common.Upstream, 0, len(upsList))
+ for _, u := range upsList {
+ if cfg := u.Config(); cfg != nil && cfg.Group == failsafeExecutor.upstreamGroup {
+ filteredUpstreams = append(filteredUpstreams, u)
+ }
+ }
+ lg.Debug().
+ Str("upstreamGroup", failsafeExecutor.upstreamGroup).
+ Int("originalCount", len(upsList)).
+ Int("filteredCount", len(filteredUpstreams)).
+ Msgf("filtered upstreams by group for failsafe policy")
+ if len(filteredUpstreams) == 0 {
+ err := common.NewErrFailsafeConfiguration(
+ fmt.Errorf("no upstreams match the configured group '%s' for failsafe policy (had %d upstreams before filtering, method=%s)",
+ failsafeExecutor.upstreamGroup, len(upsList), method),
+ map[string]interface{}{
+ "upstreamGroup": failsafeExecutor.upstreamGroup,
+ "originalCount": len(upsList),
+ "method": method,
+ "failsafeMethod": failsafeExecutor.method,
+ },
+ )
+ if mlx != nil {
+ mlx.Close(ctx, nil, err)
+ }
+ return nil, err
+ }
+ upsList = filteredUpstreams
+
+ // Update tracing to reflect post-filter state
+ forwardSpan.SetAttributes(
+ attribute.Int("upstreams.filtered_count", len(upsList)),
+ attribute.String("upstreams.filter_group", failsafeExecutor.upstreamGroup),
+ )
+ }
+
// Set upstreams on the request
req.SetUpstreams(upsList)
@@ -378,11 +452,13 @@ func (n *Network) Forward(ctx context.Context, req *common.NormalizedRequest) (*
}
// 3) Apply rate limits
- if err := n.acquireRateLimitPermit(ctx, req); err != nil {
- if mlx != nil {
- mlx.Close(ctx, nil, err)
+ if !shouldSkipNetworkRateLimit(ctx) {
+ if err := n.acquireRateLimitPermit(ctx, req); err != nil {
+ if mlx != nil {
+ mlx.Close(ctx, nil, err)
+ }
+ return nil, err
}
- return nil, err
}
// 4) Prepare the request
@@ -431,15 +507,11 @@ func (n *Network) Forward(ctx context.Context, req *common.NormalizedRequest) (*
// This is the only way to pass additional values to failsafe policy executors context
ectx := context.WithValue(ctx, common.RequestContextKey, req)
- failsafeExecutor := n.getFailsafeExecutor(ctx, req)
- if failsafeExecutor == nil {
- return nil, errors.New("no failsafe executor found for this request")
- }
-
// Add tracing for which failsafe policy was selected
forwardSpan.SetAttributes(
attribute.String("failsafe.matched_method", failsafeExecutor.method),
attribute.String("failsafe.matched_finalities", fmt.Sprintf("%v", failsafeExecutor.finalities)),
+ attribute.String("failsafe.matched_upstream_group", failsafeExecutor.upstreamGroup),
)
// Track time from failsafe executor start to first callback invocation
@@ -871,6 +943,14 @@ func (n *Network) Config() *common.NetworkConfig {
return n.cfg
}
+func (n *Network) Cache() common.CacheDAL {
+ return n.cacheDal
+}
+
+func (n *Network) AppCtx() context.Context {
+ return n.appCtx
+}
+
func (n *Network) GetFinality(ctx context.Context, req *common.NormalizedRequest, resp *common.NormalizedResponse) common.DataFinalityState {
ctx, span := common.StartDetailSpan(ctx, "Network.GetFinality")
defer span.End()
diff --git a/erpc/networks_availability_test.go b/erpc/networks_availability_test.go
index 0a076068d..32195116a 100644
--- a/erpc/networks_availability_test.go
+++ b/erpc/networks_availability_test.go
@@ -49,7 +49,7 @@ func TestNetworkAvailability_LowerExactBlock_Skip(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -114,7 +114,7 @@ func TestNetworkAvailability_LowerLatestMinus_Skip(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -187,7 +187,7 @@ func TestNetworkAvailability_LowerEarliestPlus_InitAndSkip(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -297,7 +297,7 @@ func TestNetworkAvailability_InvalidRange_FailOpen_AllowsRequest(t *testing.T) {
Reply(200).
JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x1"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -374,7 +374,7 @@ func TestNetworkAvailability_Window_ExactLowerUpper(t *testing.T) {
Reply(200).
JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x64"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -475,7 +475,7 @@ func TestNetworkAvailability_EarliestPlus_Freeze_NoAdvance(t *testing.T) {
return strings.Contains(b, "\"eth_getBlockByNumber\"") && strings.Contains(b, "\"0x3\"")
}).Reply(200).JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x3"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -564,7 +564,7 @@ func TestNetworkAvailability_EarliestPlus_UpdateRate_Advance(t *testing.T) {
return strings.Contains(b, "\"eth_getBlockByNumber\"") && strings.Contains(b, "\"0x1\"")
}).Reply(200).JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x1"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -637,7 +637,7 @@ func TestNetworkAvailability_UnsupportedProbe_FailOpen(t *testing.T) {
return strings.Contains(b, "\"eth_getBlockByNumber\"") && strings.Contains(b, "\"0x0\"")
}).Reply(200).JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x0"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -718,7 +718,7 @@ func TestNetworkAvailability_UpperEarliestPlus_Enforced(t *testing.T) {
return strings.Contains(b, "\"eth_getBlockByNumber\"") && !strings.Contains(b, "\"0x0\"") && !strings.Contains(b, "\"0x1\"")
}).Reply(200).JSON([]byte(`{"jsonrpc":"2.0","id":1,"result":{"number":"0x1"}}`))
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -803,7 +803,7 @@ func TestNetworkAvailability_Enforce_Precedence_DefaultDoesNotOverrideMethod(t *
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -882,7 +882,7 @@ func TestNetworkAvailability_Enforce_Precedence_DefaultDoesNotOverrideNetwork(t
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -953,7 +953,7 @@ func TestNetworkAvailability_Enforce_DefaultFalse_Disables_WhenNoExplicitConfig(
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1019,7 +1019,7 @@ func TestNetworkAvailability_Enforce_NetworkFalse_Disables(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1073,7 +1073,7 @@ func TestCheckUpstreamBlockAvailability_BlockBeyondLatest_ReturnsRetryableError(
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1140,7 +1140,7 @@ func TestCheckUpstreamBlockAvailability_SmallDistance_IsRetryable(t *testing.T)
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1231,7 +1231,7 @@ func TestCheckUpstreamBlockAvailability_ErrorHasCorrectDetails(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1312,7 +1312,7 @@ func TestRetryableBlockUnavailability_NoInfiniteLoop(t *testing.T) {
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
diff --git a/erpc/networks_bench_test.go b/erpc/networks_bench_test.go
index 50ffed774..bfa7f097d 100644
--- a/erpc/networks_bench_test.go
+++ b/erpc/networks_bench_test.go
@@ -36,7 +36,7 @@ func BenchmarkNetworkForward_SimpleSuccess(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -141,7 +141,7 @@ func BenchmarkNetworkForward_MethodIgnoreCase(b *testing.B) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -252,7 +252,7 @@ func BenchmarkNetworkForward_RetryFailures(b *testing.B) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -368,7 +368,7 @@ func BenchmarkNetworkForward_ConcurrentEthGetLogsIntegrityEnabled(b *testing.B)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
diff --git a/erpc/networks_bootstrap_test.go b/erpc/networks_bootstrap_test.go
index da028759a..2414c9a2c 100644
--- a/erpc/networks_bootstrap_test.go
+++ b/erpc/networks_bootstrap_test.go
@@ -52,7 +52,7 @@ func TestNetworksBootstrap_SlowProviderUpstreams_InitializeThenServe(t *testing.
})
require.NoError(t, err)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
upr := upstream.NewUpstreamsRegistry(
@@ -100,7 +100,7 @@ func TestNetworksBootstrap_UnsupportedNetwork_FatalFast(t *testing.T) {
ssr, err := data.NewSharedStateRegistry(ctx, &log.Logger, &common.SharedStateConfig{Connector: &common.ConnectorConfig{Driver: "memory", Memory: &common.MemoryConnectorConfig{MaxItems: 100_000, MaxTotalSize: "1GB"}}})
require.NoError(t, err)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
upr := upstream.NewUpstreamsRegistry(ctx, &log.Logger, "prjA", []*common.UpstreamConfig{}, ssr, rlr, vr, pr, nil, mt, 1*time.Second, nil)
@@ -141,7 +141,7 @@ func TestNetworksBootstrap_ProviderInitializing_503Retry(t *testing.T) {
ssr, err := data.NewSharedStateRegistry(ctx, &log.Logger, &common.SharedStateConfig{Connector: &common.ConnectorConfig{Driver: "memory", Memory: &common.MemoryConnectorConfig{MaxItems: 100_000, MaxTotalSize: "1GB"}}})
require.NoError(t, err)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
upr := upstream.NewUpstreamsRegistry(ctx, &log.Logger, "prjA", []*common.UpstreamConfig{}, ssr, rlr, vr, pr, nil, mt, 1*time.Second, nil)
diff --git a/erpc/networks_earliest_detection_test.go b/erpc/networks_earliest_detection_test.go
index 3bf32856c..9ae66faa9 100644
--- a/erpc/networks_earliest_detection_test.go
+++ b/erpc/networks_earliest_detection_test.go
@@ -61,7 +61,7 @@ func TestEarliestDetection_FailOpenWhenNoEarliestConfigured(t *testing.T) {
"result": map[string]interface{}{"number": "0x5"},
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -174,7 +174,7 @@ func TestEarliestDetection_BlocksRequestAfterSuccessfulDetection(t *testing.T) {
"result": nil, // Pruned
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -259,7 +259,7 @@ func TestEarliestDetection_InitialDetectionAlwaysRunsOnBootstrap(t *testing.T) {
},
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -351,7 +351,7 @@ func TestEarliestDetection_SchedulerHandlesPeriodicUpdates(t *testing.T) {
},
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -450,7 +450,7 @@ func TestEarliestDetection_InvalidRangeTriggersFailOpen(t *testing.T) {
"result": nil,
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -589,7 +589,7 @@ func TestEarliestDetection_StaleHighValueInSharedState(t *testing.T) {
},
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
diff --git a/erpc/networks_failsafe_test.go b/erpc/networks_failsafe_test.go
index b458f24ae..33ecff2d6 100644
--- a/erpc/networks_failsafe_test.go
+++ b/erpc/networks_failsafe_test.go
@@ -725,7 +725,7 @@ func setupTestNetworkWithRetryConfig(t *testing.T, ctx context.Context, directiv
}},
}
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
@@ -801,7 +801,7 @@ func setupTestNetworkWithMultipleFailsafePolicies(t *testing.T, ctx context.Cont
Failsafe: failsafeConfigs,
}
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
@@ -1335,3 +1335,239 @@ func TestGetFailsafeExecutor_OrderRespected(t *testing.T) {
assert.Contains(t, executor4.finalities, common.DataFinalityStateUnknown)
})
}
+
+func TestNetworkFailsafe_UpstreamGroupFilter(t *testing.T) {
+ t.Run("UseUpstreamDirective_OverridesGroupFilter", func(t *testing.T) {
+ util.ResetGock()
+ defer util.ResetGock()
+ util.SetupMocksForEvmStatePoller()
+
+ // Setup mock for upstream outside the group (rpc2 is NOT in "primary" group)
+ gock.New("http://rpc2.localhost").
+ Post("").
+ Filter(func(r *http.Request) bool {
+ body := util.SafeReadBody(r)
+ return strings.Contains(body, "eth_blockNumber")
+ }).
+ Times(1).
+ Reply(200).
+ JSON(map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": 1,
+ "result": "0x100",
+ })
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Create network with group filter in failsafe config
+ upstreamConfigs := []*common.UpstreamConfig{
+ {
+ Id: "rpc1",
+ Type: common.UpstreamTypeEvm,
+ Endpoint: "http://rpc1.localhost",
+ Evm: &common.EvmUpstreamConfig{
+ ChainId: 123,
+ },
+ Group: "primary", // In the group
+ },
+ {
+ Id: "rpc2",
+ Type: common.UpstreamTypeEvm,
+ Endpoint: "http://rpc2.localhost",
+ Evm: &common.EvmUpstreamConfig{
+ ChainId: 123,
+ },
+ Group: "fallback", // NOT in the group
+ },
+ }
+
+ networkConfig := &common.NetworkConfig{
+ Architecture: common.ArchitectureEvm,
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 123,
+ },
+ Failsafe: []*common.FailsafeConfig{
+ {
+ MatchMethod: "*",
+ MatchUpstreamGroup: "primary", // Only allow "primary" group upstreams
+ Retry: &common.RetryPolicyConfig{
+ MaxAttempts: 1,
+ },
+ },
+ },
+ }
+
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ require.NoError(t, err)
+
+ metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
+
+ vr := thirdparty.NewVendorsRegistry()
+ pr, err := thirdparty.NewProvidersRegistry(&log.Logger, vr, []*common.ProviderConfig{}, nil)
+ require.NoError(t, err)
+
+ ssr, err := data.NewSharedStateRegistry(ctx, &log.Logger, &common.SharedStateConfig{
+ Connector: &common.ConnectorConfig{
+ Driver: "memory",
+ Memory: &common.MemoryConnectorConfig{
+ MaxItems: 100_000,
+ MaxTotalSize: "1GB",
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ upstreamsRegistry := upstream.NewUpstreamsRegistry(
+ ctx,
+ &log.Logger,
+ "test",
+ upstreamConfigs,
+ ssr,
+ rateLimitersRegistry,
+ vr,
+ pr,
+ nil,
+ metricsTracker,
+ time.Second,
+ nil,
+ )
+
+ upstreamsRegistry.Bootstrap(ctx)
+
+ time.Sleep(100 * time.Millisecond)
+
+ network, err := NewNetwork(ctx, &log.Logger, "test", networkConfig, rateLimitersRegistry, upstreamsRegistry, metricsTracker)
+ require.NoError(t, err)
+
+ err = upstreamsRegistry.PrepareUpstreamsForNetwork(ctx, networkConfig.NetworkId())
+ require.NoError(t, err)
+
+ err = network.Bootstrap(ctx)
+ require.NoError(t, err)
+
+ upstream.ReorderUpstreams(upstreamsRegistry)
+
+ // Request WITH UseUpstream directive targeting rpc2 (outside group)
+ // Should succeed because UseUpstream overrides group filter
+ requestBytes := []byte(`{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}`)
+ req := common.NewNormalizedRequest(requestBytes)
+ req.SetNetwork(network)
+ if req.Directives() == nil {
+ req.SetDirectives(&common.RequestDirectives{})
+ }
+ req.Directives().UseUpstream = "rpc2" // Target upstream outside the group
+
+ resp, err := network.Forward(ctx, req)
+
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+
+ jrr, err := resp.JsonRpcResponse()
+ require.NoError(t, err)
+ assert.Nil(t, jrr.Error)
+
+ result, err := jrr.PeekStringByPath(ctx)
+ require.NoError(t, err)
+ assert.Equal(t, "0x100", result)
+ })
+
+ t.Run("EmptyGroupFilter_ReturnsConfigurationError", func(t *testing.T) {
+ util.ResetGock()
+ defer util.ResetGock()
+ util.SetupMocksForEvmStatePoller()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Create network with group filter that matches NO upstreams
+ upstreamConfigs := []*common.UpstreamConfig{
+ {
+ Id: "rpc1",
+ Type: common.UpstreamTypeEvm,
+ Endpoint: "http://rpc1.localhost",
+ Evm: &common.EvmUpstreamConfig{
+ ChainId: 123,
+ },
+ Group: "primary",
+ },
+ }
+
+ networkConfig := &common.NetworkConfig{
+ Architecture: common.ArchitectureEvm,
+ Evm: &common.EvmNetworkConfig{
+ ChainId: 123,
+ },
+ Failsafe: []*common.FailsafeConfig{
+ {
+ MatchMethod: "*",
+ MatchUpstreamGroup: "nonexistent-group", // No upstreams in this group
+ Retry: &common.RetryPolicyConfig{
+ MaxAttempts: 1,
+ },
+ },
+ },
+ }
+
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ require.NoError(t, err)
+
+ metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
+
+ vr := thirdparty.NewVendorsRegistry()
+ pr, err := thirdparty.NewProvidersRegistry(&log.Logger, vr, []*common.ProviderConfig{}, nil)
+ require.NoError(t, err)
+
+ ssr, err := data.NewSharedStateRegistry(ctx, &log.Logger, &common.SharedStateConfig{
+ Connector: &common.ConnectorConfig{
+ Driver: "memory",
+ Memory: &common.MemoryConnectorConfig{
+ MaxItems: 100_000,
+ MaxTotalSize: "1GB",
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ upstreamsRegistry := upstream.NewUpstreamsRegistry(
+ ctx,
+ &log.Logger,
+ "test",
+ upstreamConfigs,
+ ssr,
+ rateLimitersRegistry,
+ vr,
+ pr,
+ nil,
+ metricsTracker,
+ time.Second,
+ nil,
+ )
+
+ upstreamsRegistry.Bootstrap(ctx)
+
+ time.Sleep(100 * time.Millisecond)
+
+ network, err := NewNetwork(ctx, &log.Logger, "test", networkConfig, rateLimitersRegistry, upstreamsRegistry, metricsTracker)
+ require.NoError(t, err)
+
+ err = upstreamsRegistry.PrepareUpstreamsForNetwork(ctx, networkConfig.NetworkId())
+ require.NoError(t, err)
+
+ err = network.Bootstrap(ctx)
+ require.NoError(t, err)
+
+ upstream.ReorderUpstreams(upstreamsRegistry)
+
+ // Request WITHOUT UseUpstream directive - should fail with configuration error
+ requestBytes := []byte(`{"jsonrpc":"2.0","id":1,"method":"eth_blockNumber","params":[]}`)
+ req := common.NewNormalizedRequest(requestBytes)
+ req.SetNetwork(network)
+
+ _, err = network.Forward(ctx, req)
+
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "no upstreams match the configured group")
+ assert.Contains(t, err.Error(), "nonexistent-group")
+ })
+}
diff --git a/erpc/networks_forward_test.go b/erpc/networks_forward_test.go
index fc6b6c8f1..da52acf41 100644
--- a/erpc/networks_forward_test.go
+++ b/erpc/networks_forward_test.go
@@ -44,7 +44,7 @@ func TestNetwork_Forward_InfiniteLoopWithAllUpstreamsSkipping(t *testing.T) {
},
},
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, logger)
mt := health.NewTracker(logger, "testProject", 2*time.Second)
@@ -132,7 +132,7 @@ func TestNetwork_Forward_InfiniteLoopWithAllUpstreamsSkipping(t *testing.T) {
})
require.NoError(t, err)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, logger)
require.NoError(t, err)
mt := health.NewTracker(logger, "testProject", 2*time.Second)
@@ -212,7 +212,7 @@ func TestNetwork_Forward_InfiniteLoopWithAllUpstreamsSkipping(t *testing.T) {
require.NoError(t, err)
// Setup rate limiters registry
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, logger)
require.NoError(t, err)
@@ -396,7 +396,7 @@ func TestNetwork_Forward_InfiniteLoopWithAllUpstreamsSkipping(t *testing.T) {
})
require.NoError(t, err)
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, logger)
require.NoError(t, err)
diff --git a/erpc/networks_hedge_test.go b/erpc/networks_hedge_test.go
index d1ffb0445..db045a929 100644
--- a/erpc/networks_hedge_test.go
+++ b/erpc/networks_hedge_test.go
@@ -968,7 +968,7 @@ func setupTestNetworkWithMultipleUpstreams(t *testing.T, ctx context.Context, nu
func setupTestNetwork(t *testing.T, ctx context.Context, upstreamConfigs []*common.UpstreamConfig, networkConfig *common.NetworkConfig) *Network {
t.Helper()
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
diff --git a/erpc/networks_integrity_test.go b/erpc/networks_integrity_test.go
index 7e43061e0..f9b0fce30 100644
--- a/erpc/networks_integrity_test.go
+++ b/erpc/networks_integrity_test.go
@@ -50,7 +50,7 @@ func mustHexToBytes(hex string) []byte {
// Helper to setup test network for integrity tests
func setupIntegrityTestNetwork(t *testing.T, ctx context.Context, upstreams []*common.UpstreamConfig, ntwCfg *common.NetworkConfig) (*Network, *upstream.UpstreamsRegistry) {
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
diff --git a/erpc/networks_interpolation_test.go b/erpc/networks_interpolation_test.go
index 1e6575d48..d8c3152f7 100644
--- a/erpc/networks_interpolation_test.go
+++ b/erpc/networks_interpolation_test.go
@@ -39,7 +39,7 @@ func setupTestNetworkForInterpolation(t *testing.T, ctx context.Context, network
},
}
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -1936,7 +1936,7 @@ func TestInterpolation_UpstreamSkipping_OnInterpolatedLatest(t *testing.T) {
"result": "0x1234",
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
@@ -2054,7 +2054,7 @@ func TestInterpolation_UpstreamSkipping_DisabledByMethodConfig(t *testing.T) {
"result": "0x99",
})
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
diff --git a/erpc/networks_multiplexer_test.go b/erpc/networks_multiplexer_test.go
index eb618f4b2..ee532be3d 100644
--- a/erpc/networks_multiplexer_test.go
+++ b/erpc/networks_multiplexer_test.go
@@ -387,7 +387,7 @@ func setupTestNetworkForMultiplexer(t *testing.T, ctx context.Context) *Network
// No caching to test pure multiplexing
}
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
diff --git a/erpc/networks_registry.go b/erpc/networks_registry.go
index 53910c152..e4e00abf9 100644
--- a/erpc/networks_registry.go
+++ b/erpc/networks_registry.go
@@ -111,6 +111,7 @@ func NewNetwork(
failsafeExecutors = append(failsafeExecutors, &FailsafeExecutor{
method: method,
finalities: fsCfg.MatchFinality,
+ upstreamGroup: fsCfg.MatchUpstreamGroup,
executor: failsafe.NewExecutor(policyArray...),
timeout: timeoutDuration,
consensusPolicyEnabled: fsCfg.Consensus != nil,
diff --git a/erpc/networks_sendrawtx_test.go b/erpc/networks_sendrawtx_test.go
index 1db09ba3c..ffbb258c0 100644
--- a/erpc/networks_sendrawtx_test.go
+++ b/erpc/networks_sendrawtx_test.go
@@ -1671,7 +1671,7 @@ func setupSendRawTxTestNetworkWithRetryAndHedge(t *testing.T, ctx context.Contex
func setupSendRawTxNetwork(t *testing.T, ctx context.Context, upstreamConfigs []*common.UpstreamConfig, networkConfig *common.NetworkConfig) *Network {
t.Helper()
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
diff --git a/erpc/networks_skip_rate_limit_test.go b/erpc/networks_skip_rate_limit_test.go
new file mode 100644
index 000000000..5de6c5d1e
--- /dev/null
+++ b/erpc/networks_skip_rate_limit_test.go
@@ -0,0 +1,19 @@
+package erpc
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestShouldSkipNetworkRateLimit(t *testing.T) {
+ assert.False(t, shouldSkipNetworkRateLimit(nil))
+ assert.False(t, shouldSkipNetworkRateLimit(context.Background()))
+
+ skipCtx := withSkipNetworkRateLimit(context.Background())
+ assert.True(t, shouldSkipNetworkRateLimit(skipCtx))
+
+ wrongTypeCtx := context.WithValue(context.Background(), skipNetworkRateLimitKey{}, "no")
+ assert.False(t, shouldSkipNetworkRateLimit(wrongTypeCtx))
+}
diff --git a/erpc/networks_test.go b/erpc/networks_test.go
index 117662520..c2c57c7d2 100644
--- a/erpc/networks_test.go
+++ b/erpc/networks_test.go
@@ -45,7 +45,7 @@ func TestNetwork_Forward(t *testing.T) {
util.SetupMocksForEvmStatePoller()
defer util.AssertNoPendingMocks(t, 0)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{
Store: &common.RateLimitStoreConfig{
Driver: "memory",
@@ -212,7 +212,7 @@ func TestNetwork_Forward(t *testing.T) {
EmptyResultMaxAttempts: 2, // cap empties at 2 total attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
if err != nil {
t.Fatal(err)
}
@@ -331,7 +331,7 @@ func TestNetwork_Forward(t *testing.T) {
fsCfg := &common.FailsafeConfig{
Retry: &common.RetryPolicyConfig{MaxAttempts: 3}, // no EmptyResultMaxAttempts set
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
if err != nil {
t.Fatal(err)
}
@@ -447,7 +447,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
fsCfg := &common.FailsafeConfig{Retry: &common.RetryPolicyConfig{MaxAttempts: 3, EmptyResultMaxAttempts: 2}}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
if err != nil {
t.Fatal(err)
}
@@ -541,7 +541,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
fsCfg := &common.FailsafeConfig{Retry: &common.RetryPolicyConfig{MaxAttempts: 5, EmptyResultMaxAttempts: 2}}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
if err != nil {
t.Fatal(err)
}
@@ -627,7 +627,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
fsCfg := &common.FailsafeConfig{Retry: &common.RetryPolicyConfig{MaxAttempts: 5, EmptyResultMaxAttempts: 4, EmptyResultIgnore: []string{"eth_getBalance"}}}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{Budgets: []*common.RateLimitBudgetConfig{}}, &log.Logger)
if err != nil {
t.Fatal(err)
}
@@ -693,7 +693,7 @@ func TestNetwork_Forward(t *testing.T) {
util.SetupMocksForEvmStatePoller()
defer util.AssertNoPendingMocks(t, 0)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{
{
@@ -829,7 +829,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -954,7 +954,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -1095,7 +1095,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -1261,7 +1261,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -1418,7 +1418,7 @@ func TestNetwork_Forward(t *testing.T) {
// Initialize various components for the test environment
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -1615,7 +1615,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -1825,7 +1825,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -2017,7 +2017,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -2242,7 +2242,7 @@ func TestNetwork_Forward(t *testing.T) {
// Set up the test environment
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, []*common.ProviderConfig{}, nil)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
@@ -2380,7 +2380,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -2588,7 +2588,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -2778,7 +2778,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -2961,7 +2961,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -3163,7 +3163,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 4, // Allow up to 4 attempts (1 initial + 3 retries)
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -3392,7 +3392,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -3583,7 +3583,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3, // Allow up to 3 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -3771,7 +3771,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2, // Allow up to 2 retry attempts
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -3952,7 +3952,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4081,7 +4081,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4219,7 +4219,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4391,7 +4391,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4521,7 +4521,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4623,7 +4623,7 @@ func TestNetwork_Forward(t *testing.T) {
util.SetupMocksForEvmStatePoller()
defer util.AssertNoPendingMocks(t, 0)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{
{
@@ -4769,7 +4769,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 3,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -4899,7 +4899,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 4,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5027,7 +5027,7 @@ func TestNetwork_Forward(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5160,7 +5160,7 @@ func TestNetwork_Forward(t *testing.T) {
Duration: common.Duration(1 * time.Second),
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5293,7 +5293,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxCount: 1,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5446,7 +5446,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxCount: 5,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5597,7 +5597,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxCount: 5,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5755,7 +5755,7 @@ func TestNetwork_Forward(t *testing.T) {
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -5889,7 +5889,7 @@ func TestNetwork_Forward(t *testing.T) {
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6022,7 +6022,7 @@ func TestNetwork_Forward(t *testing.T) {
t.Fatal(err)
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6175,7 +6175,7 @@ func TestNetwork_Forward(t *testing.T) {
}
clr := clients.NewClientRegistry(&log.Logger, "prjA", nil, evm.NewJsonRpcErrorExtractor())
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6313,7 +6313,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6458,7 +6458,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6586,7 +6586,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6738,7 +6738,7 @@ func TestNetwork_Forward(t *testing.T) {
MaxAttempts: 2,
},
}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -6883,7 +6883,7 @@ func TestNetwork_Forward(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -7019,7 +7019,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -7142,7 +7142,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -7264,7 +7264,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -7380,7 +7380,7 @@ func TestNetwork_Forward(t *testing.T) {
metricsTracker.Bootstrap(ctx)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &logger)
assert.NoError(t, err)
@@ -7566,7 +7566,7 @@ func TestNetwork_Forward(t *testing.T) {
defer cancel()
fsCfg := &common.FailsafeConfig{}
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
@@ -9236,7 +9236,7 @@ func TestNetwork_EvmGetLogs(t *testing.T) {
defer cancel()
// Build network with tight best-effort budgets to force fallback
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
pr, err := thirdparty.NewProvidersRegistry(&log.Logger, vr, []*common.ProviderConfig{}, nil)
@@ -10170,7 +10170,7 @@ func TestNetwork_ThunderingHerdProtection(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 5*time.Second)
pollerInterval := 2000 * time.Millisecond
@@ -10372,7 +10372,7 @@ func TestNetwork_ThunderingHerdProtection(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
upCfg := &common.UpstreamConfig{
@@ -10557,7 +10557,7 @@ func TestNetwork_ThunderingHerdProtection(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rlr, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rlr, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
mt := health.NewTracker(&log.Logger, "prjA", 2*time.Second)
upCfg := &common.UpstreamConfig{
@@ -10655,7 +10655,7 @@ func TestNetwork_ThunderingHerdProtection(t *testing.T) {
func setupTestNetworkSimple(t *testing.T, ctx context.Context, upstreamConfig *common.UpstreamConfig, networkConfig *common.NetworkConfig) *Network {
t.Helper()
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
if upstreamConfig == nil {
@@ -10764,7 +10764,7 @@ func setupTestNetworkWithFullAndArchiveNodeUpstreams(
) *Network {
t.Helper()
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
up1 := &common.UpstreamConfig{
@@ -10929,7 +10929,7 @@ func TestNetwork_HighestLatestBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11082,7 +11082,7 @@ func TestNetwork_HighestLatestBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11240,7 +11240,7 @@ func TestNetwork_HighestLatestBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11367,7 +11367,7 @@ func TestNetwork_HighestLatestBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11501,7 +11501,7 @@ func TestNetwork_HighestFinalizedBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11632,7 +11632,7 @@ func TestNetwork_HighestFinalizedBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
@@ -11761,7 +11761,7 @@ func TestNetwork_HighestFinalizedBlockNumber(t *testing.T) {
Reply(200).
JSON([]byte(`{"result":"0x7b"}`))
- rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
vr := thirdparty.NewVendorsRegistry()
diff --git a/erpc/policy_evaluator_test.go b/erpc/policy_evaluator_test.go
index c6a95a881..599d32c22 100644
--- a/erpc/policy_evaluator_test.go
+++ b/erpc/policy_evaluator_test.go
@@ -1724,7 +1724,7 @@ func TestPolicyEvaluator(t *testing.T) {
}
func createTestNetwork(t *testing.T, ctx context.Context) (*Network, *upstream.Upstream, *upstream.Upstream, *upstream.Upstream) {
- rlr, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{
+ rlr, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{
Budgets: []*common.RateLimitBudgetConfig{},
}, &log.Logger)
if err != nil {
diff --git a/erpc/projects_test.go b/erpc/projects_test.go
index 48e966b84..d55297f90 100644
--- a/erpc/projects_test.go
+++ b/erpc/projects_test.go
@@ -25,7 +25,7 @@ func TestProject_Forward(t *testing.T) {
util.SetupMocksForEvmStatePoller()
defer util.AssertNoPendingMocks(t, 0)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{
Store: &common.RateLimitStoreConfig{
Driver: "memory",
@@ -136,7 +136,7 @@ func TestProject_TimeoutScenarios(t *testing.T) {
// Create a rate limiters registry (not specifically needed for this test,
// but it's part of the usual setup.)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{},
&log.Logger,
)
@@ -248,7 +248,7 @@ func TestProject_TimeoutScenarios(t *testing.T) {
util.SetupMocksForEvmStatePoller()
defer util.AssertNoPendingMocks(t, 0)
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{},
&log.Logger,
)
@@ -416,7 +416,7 @@ func TestProject_LazyLoadNetworkDefaults(t *testing.T) {
}
// Build ProjectsRegistry with no existing EvmJsonRpcCache or RateLimiter
- rateLimiters, _ := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimiters, _ := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
ssr, err := data.NewSharedStateRegistry(ctx, &log.Logger, &common.SharedStateConfig{
Connector: &common.ConnectorConfig{
Driver: "memory",
@@ -512,7 +512,7 @@ func TestProject_NetworkAlias(t *testing.T) {
panic(err)
}
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(),
&common.RateLimiterConfig{},
&log.Logger,
)
diff --git a/erpc/upstream_selection_test.go b/erpc/upstream_selection_test.go
index f4d717474..936b45437 100644
--- a/erpc/upstream_selection_test.go
+++ b/erpc/upstream_selection_test.go
@@ -809,7 +809,7 @@ func setupTestNetworkWithFourUpstreams(t *testing.T, ctx context.Context, failsa
func setupTestNetworkWithConfig(t *testing.T, ctx context.Context, upstreamConfigs []*common.UpstreamConfig, failsafeConfig *common.FailsafeConfig) *Network {
t.Helper()
- rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(&common.RateLimiterConfig{}, &log.Logger)
+ rateLimitersRegistry, err := upstream.NewRateLimitersRegistry(context.Background(), &common.RateLimiterConfig{}, &log.Logger)
require.NoError(t, err)
metricsTracker := health.NewTracker(&log.Logger, "test", time.Minute)
diff --git a/telemetry/metrics.go b/telemetry/metrics.go
index 430453253..fd646d2f4 100644
--- a/telemetry/metrics.go
+++ b/telemetry/metrics.go
@@ -410,6 +410,106 @@ var (
Help: "eth_getLogs requested block-range sizes.",
Buckets: EvmGetLogsRangeHistogramBuckets,
}, []string{"project", "network", "category", "user", "finality"})
+
+ // Multicall3 aggregation metrics
+ MetricMulticall3AggregationTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_aggregation_total",
+ Help: "Total number of multicall3 aggregation attempts.",
+ }, []string{"project", "network", "outcome"})
+
+ MetricMulticall3FallbackTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_fallback_total",
+ Help: "Total number of multicall3 fallbacks to individual requests.",
+ }, []string{"project", "network", "reason"})
+
+ MetricMulticall3CacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_cache_hits_total",
+ Help: "Total number of per-call cache hits in multicall3 batch aggregation.",
+ }, []string{"project", "network"})
+
+ // Network-level Multicall3 batching metrics
+ MetricMulticall3BatchSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "erpc",
+ Name: "multicall3_batch_size",
+ Help: "Number of unique calls per Multicall3 batch.",
+ Buckets: []float64{1, 2, 5, 10, 15, 20, 30, 50},
+ }, []string{"project", "network"})
+
+ MetricMulticall3BatchWaitMs = promauto.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "erpc",
+ Name: "multicall3_batch_wait_ms",
+ Help: "Time requests waited in batch before flush (milliseconds).",
+ Buckets: []float64{1, 2, 5, 10, 15, 20, 25, 30, 50},
+ }, []string{"project", "network"})
+
+ MetricMulticall3QueueLen = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "erpc",
+ Name: "multicall3_queue_len",
+ Help: "Current number of requests queued for batching.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3QueueOverflowTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_queue_overflow_total",
+ Help: "Total number of requests that bypassed batching due to queue overflow.",
+ }, []string{"project", "network", "reason"})
+
+ MetricMulticall3DedupeTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_dedupe_total",
+ Help: "Total number of deduplicated requests within batches.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3CacheWriteErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_cache_write_errors_total",
+ Help: "Total number of per-call cache write errors in multicall3 batch responses.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3CacheReadErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_cache_read_errors_total",
+ Help: "Total number of cache read errors during multicall3 pre-aggregation cache check.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3FallbackRequestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_fallback_requests_total",
+ Help: "Total number of individual requests during multicall3 fallback, labeled by outcome.",
+ }, []string{"project", "network", "outcome"})
+
+ MetricMulticall3AbandonedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_abandoned_total",
+ Help: "Total number of multicall3 batch results not delivered because caller context was cancelled.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3PanicTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_panic_total",
+ Help: "Total number of panics recovered in multicall3 batch processing.",
+ }, []string{"project", "network", "location"})
+
+ MetricMulticall3CacheWriteDroppedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_cache_write_dropped_total",
+ Help: "Total number of multicall3 per-call cache writes dropped due to backpressure.",
+ }, []string{"project", "network"})
+
+ MetricMulticall3RuntimeBypassTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_runtime_bypass_total",
+ Help: "Total number of contracts auto-detected as requiring bypass (revert via multicall3 but succeed individually).",
+ }, []string{"project", "network"})
+
+ MetricMulticall3AutoDetectRetryTotal = promauto.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "erpc",
+ Name: "multicall3_auto_detect_retry_total",
+ Help: "Total number of auto-detect retry attempts for reverted calls.",
+ }, []string{"project", "network", "outcome"})
)
var DefaultHistogramBuckets = []float64{
diff --git a/test/integration/multicall3_integration_test.go b/test/integration/multicall3_integration_test.go
new file mode 100644
index 000000000..f616b50df
--- /dev/null
+++ b/test/integration/multicall3_integration_test.go
@@ -0,0 +1,1540 @@
+// Package integration contains integration tests that run against a real eRPC instance.
+//
+// These tests are skipped by default unless ERPC_INTEGRATION_TEST_ENDPOINT is set.
+//
+// Environment variables:
+// - ERPC_INTEGRATION_TEST_ENDPOINT: eRPC endpoint URL (required)
+// - ERPC_INTEGRATION_TEST_METRICS: Prometheus metrics endpoint (optional, enables metric verification)
+// - ERPC_INTEGRATION_TEST_AUTH: Auth headers in "Header: value" format (optional, use ";" for multiple)
+//
+// Usage:
+//
+// # Run against local eRPC (no auth)
+// ERPC_INTEGRATION_TEST_ENDPOINT=http://localhost:4000/main/evm/1 \
+// go test -v ./test/integration/...
+//
+// # Run against local eRPC with auth and metrics
+// ERPC_INTEGRATION_TEST_ENDPOINT=http://localhost:4000/main/evm/1 \
+// ERPC_INTEGRATION_TEST_METRICS=http://localhost:4001/metrics \
+// ERPC_INTEGRATION_TEST_AUTH="X-ERPC-Secret-Token: your-token" \
+// go test -v ./test/integration/...
+//
+// # Run specific test
+// ERPC_INTEGRATION_TEST_ENDPOINT=http://localhost:4000/main/evm/1 \
+// ERPC_INTEGRATION_TEST_AUTH="X-ERPC-Secret-Token: your-token" \
+// go test -v -run TestMulticall3Integration_BatchEthCalls ./test/integration/...
+package integration
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ // Well-known contract addresses for testing (Ethereum mainnet)
+ // These are used because they're stable and have predictable behavior
+ wethAddress = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
+ usdcAddress = "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
+
+ // Function selectors
+ decimalsSelector = "0x313ce567" // decimals()
+ symbolSelector = "0x95d89b41" // symbol()
+ totalSupplySelector = "0x18160ddd" // totalSupply()
+)
+
+type jsonRPCRequest struct {
+ JSONRPC string `json:"jsonrpc"`
+ ID interface{} `json:"id"`
+ Method string `json:"method"`
+ Params []interface{} `json:"params"`
+}
+
+type jsonRPCResponse struct {
+ JSONRPC string `json:"jsonrpc"`
+ ID interface{} `json:"id"`
+ Result json.RawMessage `json:"result,omitempty"`
+ Error *json.RawMessage `json:"error,omitempty"`
+}
+
+type ethCallParams struct {
+ To string `json:"to"`
+ Data string `json:"data"`
+}
+
+func getTestEndpoint(t *testing.T) string {
+ endpoint := os.Getenv("ERPC_INTEGRATION_TEST_ENDPOINT")
+ if endpoint == "" {
+ t.Skip("ERPC_INTEGRATION_TEST_ENDPOINT not set, skipping integration test")
+ }
+ return endpoint
+}
+
+func getMetricsEndpoint() string {
+ return os.Getenv("ERPC_INTEGRATION_TEST_METRICS")
+}
+
+// getAuthHeaders returns authentication headers if configured
+// Format: "Header-Name: value" or multiple headers separated by ";"
+func getAuthHeaders() map[string]string {
+ headers := make(map[string]string)
+ authHeader := os.Getenv("ERPC_INTEGRATION_TEST_AUTH")
+ if authHeader == "" {
+ return headers
+ }
+
+ // Support multiple headers separated by ";"
+ parts := strings.Split(authHeader, ";")
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if idx := strings.Index(part, ":"); idx > 0 {
+ key := strings.TrimSpace(part[:idx])
+ value := strings.TrimSpace(part[idx+1:])
+ headers[key] = value
+ }
+ }
+ return headers
+}
+
+func makeRequest(t *testing.T, endpoint string, payload interface{}) []byte {
+ t.Helper()
+
+ body, err := json.Marshal(payload)
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ require.NoError(t, err)
+ req.Header.Set("Content-Type", "application/json")
+
+ // Add auth headers if configured
+ for key, value := range getAuthHeaders() {
+ req.Header.Set(key, value)
+ }
+
+ client := &http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ return respBody
+}
+
+func getMetricValue(metricsEndpoint, metricName string) (float64, error) {
+ resp, err := http.Get(metricsEndpoint)
+ if err != nil {
+ return 0, err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return 0, err
+ }
+
+ var total float64
+ for _, line := range strings.Split(string(body), "\n") {
+ if strings.HasPrefix(line, metricName) && !strings.HasPrefix(line, "#") {
+ // Parse the metric value (last field)
+ fields := strings.Fields(line)
+ if len(fields) >= 2 {
+ var val float64
+ fmt.Sscanf(fields[len(fields)-1], "%f", &val)
+ total += val
+ }
+ }
+ }
+ return total, nil
+}
+
+// TestMulticall3Integration_Connectivity verifies basic endpoint connectivity
+func TestMulticall3Integration_Connectivity(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_chainId",
+ Params: []interface{}{},
+ }
+
+ respBody := makeRequest(t, endpoint, req)
+
+ var resp jsonRPCResponse
+ err := json.Unmarshal(respBody, &resp)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Nil(t, resp.Error, "Request returned error: %s", string(respBody))
+ require.NotNil(t, resp.Result, "No result in response")
+
+ var chainId string
+ err = json.Unmarshal(resp.Result, &chainId)
+ require.NoError(t, err)
+ assert.NotEmpty(t, chainId, "chainId should not be empty")
+
+ t.Logf("Connected to chain: %s", chainId)
+}
+
+// TestMulticall3Integration_SingleEthCall verifies single eth_call works (baseline)
+func TestMulticall3Integration_SingleEthCall(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_call",
+ Params: []interface{}{
+ ethCallParams{To: wethAddress, Data: decimalsSelector},
+ "latest",
+ },
+ }
+
+ respBody := makeRequest(t, endpoint, req)
+
+ var resp jsonRPCResponse
+ err := json.Unmarshal(respBody, &resp)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Nil(t, resp.Error, "Request returned error: %s", string(respBody))
+ require.NotNil(t, resp.Result, "No result in response")
+
+ var result string
+ err = json.Unmarshal(resp.Result, &result)
+ require.NoError(t, err)
+ assert.NotEmpty(t, result, "result should not be empty")
+
+ t.Logf("Single eth_call result (decimals): %s", result)
+}
+
+// TestMulticall3Integration_BatchEthCalls verifies JSON-RPC batch with multiple eth_calls
+// These should be aggregated into a single multicall3 call
+func TestMulticall3Integration_BatchEthCalls(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // Capture baseline metrics if available
+ var baselineAggregation float64
+ if metricsEndpoint != "" {
+ var err error
+ baselineAggregation, err = getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ if err != nil {
+ t.Logf("Warning: could not get baseline metrics: %v", err)
+ }
+ }
+
+ // Send batch with 3 eth_calls to the same contract
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 3, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: totalSupplySelector}, "latest"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse batch response: %s", string(respBody))
+ require.Len(t, responses, 3, "Expected 3 responses in batch")
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d returned error", i)
+ assert.NotNil(t, resp.Result, "Response %d has no result", i)
+ }
+
+ t.Logf("Batch eth_call responses: all 3 successful")
+
+ // Check if metrics increased (indicates multicall3 was used)
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second) // Allow metrics to update
+
+ newAggregation, err := getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ if err == nil {
+ diff := newAggregation - baselineAggregation
+ t.Logf("Multicall3 aggregation metric: %.0f -> %.0f (+%.0f)", baselineAggregation, newAggregation, diff)
+ if diff > 0 {
+ t.Logf("✓ Multicall3 aggregation confirmed via metrics")
+ } else {
+ t.Logf("⚠ No increase in aggregation metric - multicall3 may not be enabled or requests fell back")
+ }
+ }
+ }
+}
+
+// TestMulticall3Integration_ConcurrentRequests verifies concurrent requests are batched together
+func TestMulticall3Integration_ConcurrentRequests(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // Capture baseline metrics if available
+ var baselineAggregation float64
+ if metricsEndpoint != "" {
+ baselineAggregation, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ }
+
+ // Send 10 concurrent requests - they should be batched within the window
+ const numRequests = 10
+ var wg sync.WaitGroup
+ results := make(chan bool, numRequests)
+
+ for i := 0; i < numRequests; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: id,
+ Method: "eth_call",
+ Params: []interface{}{
+ ethCallParams{To: wethAddress, Data: decimalsSelector},
+ "latest",
+ },
+ }
+
+ respBody := makeRequest(t, endpoint, req)
+
+ var resp jsonRPCResponse
+ if err := json.Unmarshal(respBody, &resp); err != nil {
+ results <- false
+ return
+ }
+
+ results <- resp.Error == nil && resp.Result != nil
+ }(i)
+ }
+
+ wg.Wait()
+ close(results)
+
+ successCount := 0
+ for success := range results {
+ if success {
+ successCount++
+ }
+ }
+
+ assert.Equal(t, numRequests, successCount, "All concurrent requests should succeed")
+ t.Logf("Concurrent requests: %d/%d successful", successCount, numRequests)
+
+ // Check if metrics show batching occurred
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second)
+
+ newAggregation, err := getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ if err == nil {
+ diff := newAggregation - baselineAggregation
+ t.Logf("Multicall3 aggregation metric: %.0f -> %.0f (+%.0f)", baselineAggregation, newAggregation, diff)
+
+ // If multicall3 is working, we should see fewer aggregations than requests
+ // (since multiple requests get batched into one multicall3 call)
+ if diff > 0 && diff < float64(numRequests) {
+ t.Logf("✓ Batching confirmed: %d requests resulted in %.0f aggregations", numRequests, diff)
+ }
+ }
+ }
+}
+
+// TestMulticall3Integration_MixedBatch verifies mixed batch with eth_call and other methods
+func TestMulticall3Integration_MixedBatch(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_blockNumber", Params: []interface{}{}},
+ {JSONRPC: "2.0", ID: 3, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: totalSupplySelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 4, Method: "eth_chainId", Params: []interface{}{}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse mixed batch response: %s", string(respBody))
+ require.Len(t, responses, 4, "Expected 4 responses in mixed batch")
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d returned error", i)
+ assert.NotNil(t, resp.Result, "Response %d has no result", i)
+ }
+
+ t.Logf("Mixed batch: all 4 responses successful (2 eth_call, 1 eth_blockNumber, 1 eth_chainId)")
+}
+
+// TestMulticall3Integration_DifferentContracts verifies batching across different contracts
+func TestMulticall3Integration_DifferentContracts(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Batch calls to different contracts - should still be batched via multicall3
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: usdcAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 3, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 4, Method: "eth_call", Params: []interface{}{ethCallParams{To: usdcAddress, Data: symbolSelector}, "latest"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 4, "Expected 4 responses")
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d returned error", i)
+ assert.NotNil(t, resp.Result, "Response %d has no result", i)
+ }
+
+ t.Logf("Different contracts batch: all 4 responses successful (WETH + USDC)")
+}
+
+// TestMulticall3Integration_ErrorHandling verifies error handling for failing calls
+func TestMulticall3Integration_ErrorHandling(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Send a batch with one valid and one invalid call
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: "0xdeadbeef"}, "latest"}}, // Invalid function
+ {JSONRPC: "2.0", ID: 3, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "latest"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 3, "Expected 3 responses")
+
+ // First and third should succeed
+ assert.Nil(t, responses[0].Error, "Response 0 should succeed")
+ assert.NotNil(t, responses[0].Result, "Response 0 should have result")
+
+ assert.Nil(t, responses[2].Error, "Response 2 should succeed")
+ assert.NotNil(t, responses[2].Result, "Response 2 should have result")
+
+ // Second may error or return empty - both are acceptable
+ if responses[1].Error != nil {
+ t.Logf("Invalid call returned error (expected): %s", string(*responses[1].Error))
+ } else if responses[1].Result != nil {
+ var result string
+ json.Unmarshal(responses[1].Result, &result)
+ t.Logf("Invalid call returned result: %s (may be empty or revert data)", result)
+ }
+
+ t.Logf("Error handling: valid calls succeeded, invalid call handled gracefully")
+}
+
+// TestMulticall3Integration_LargeBatch verifies handling of larger batches
+func TestMulticall3Integration_LargeBatch(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // Capture baseline metrics
+ var baselineAggregation float64
+ if metricsEndpoint != "" {
+ baselineAggregation, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ }
+
+ // Send a batch with 20 calls (should trigger batching limits)
+ batch := make([]jsonRPCRequest, 20)
+ for i := 0; i < 20; i++ {
+ batch[i] = jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: i + 1,
+ Method: "eth_call",
+ Params: []interface{}{
+ ethCallParams{To: wethAddress, Data: decimalsSelector},
+ "latest",
+ },
+ }
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 20, "Expected 20 responses")
+
+ successCount := 0
+ for _, resp := range responses {
+ if resp.Error == nil && resp.Result != nil {
+ successCount++
+ }
+ }
+
+ assert.Equal(t, 20, successCount, "All 20 calls should succeed")
+ t.Logf("Large batch: %d/20 calls successful", successCount)
+
+ // Check metrics
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second)
+
+ newAggregation, err := getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ if err == nil {
+ diff := newAggregation - baselineAggregation
+ t.Logf("Multicall3 aggregation metric: %.0f -> %.0f (+%.0f)", baselineAggregation, newAggregation, diff)
+
+ // With default maxCalls=20, a batch of 20 should result in 1 aggregation
+ // (or possibly 2 if there's splitting)
+ if diff > 0 && diff <= 2 {
+ t.Logf("✓ Efficient batching: 20 requests resulted in %.0f multicall3 aggregation(s)", diff)
+ }
+ }
+ }
+}
+
+// TestMulticall3Integration_Deduplication verifies that duplicate requests are deduplicated
+func TestMulticall3Integration_Deduplication(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // Capture baseline dedupe metric
+ var baselineDedupe float64
+ if metricsEndpoint != "" {
+ baselineDedupe, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_dedupe_total")
+ }
+
+ // Send batch with DUPLICATE calls - same contract, same function, same block
+ // These should be deduplicated (only one actual call to the contract)
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}}, // Duplicate
+ {JSONRPC: "2.0", ID: 3, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}}, // Duplicate
+ {JSONRPC: "2.0", ID: 4, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "latest"}}, // Different call
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 4, "Expected 4 responses")
+
+ // All responses should succeed
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d returned error", i)
+ assert.NotNil(t, resp.Result, "Response %d has no result", i)
+ }
+
+ // First 3 responses should be identical (same call)
+ var result1, result2, result3 string
+ json.Unmarshal(responses[0].Result, &result1)
+ json.Unmarshal(responses[1].Result, &result2)
+ json.Unmarshal(responses[2].Result, &result3)
+ assert.Equal(t, result1, result2, "Duplicate calls should return same result")
+ assert.Equal(t, result2, result3, "Duplicate calls should return same result")
+
+ t.Logf("Deduplication: 3 identical calls returned same result: %s", result1)
+
+ // Check if dedupe metric increased
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second)
+
+ newDedupe, err := getMetricValue(metricsEndpoint, "erpc_multicall3_dedupe_total")
+ if err == nil {
+ diff := newDedupe - baselineDedupe
+ t.Logf("Deduplication metric: %.0f -> %.0f (+%.0f)", baselineDedupe, newDedupe, diff)
+ if diff >= 2 {
+ t.Logf("✓ Deduplication confirmed: %0.f duplicate requests were deduplicated", diff)
+ }
+ }
+ }
+}
+
+// TestMulticall3Integration_BlockTagVariations tests batching with different block tags
+func TestMulticall3Integration_BlockTagVariations(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ t.Run("LatestBlock", func(t *testing.T) {
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "latest"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d with 'latest' tag should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+ t.Logf("'latest' block tag: both calls succeeded")
+ })
+
+ t.Run("SpecificBlockNumber", func(t *testing.T) {
+ // First get the latest block number
+ blockNumReq := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_blockNumber",
+ Params: []interface{}{},
+ }
+ respBody := makeRequest(t, endpoint, blockNumReq)
+
+ var blockResp jsonRPCResponse
+ err := json.Unmarshal(respBody, &blockResp)
+ require.NoError(t, err)
+ require.NotNil(t, blockResp.Result)
+
+ var blockNum string
+ json.Unmarshal(blockResp.Result, &blockNum)
+
+ // Now send batch with specific block number
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, blockNum}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, blockNum}},
+ }
+
+ respBody = makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d with specific block should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+ t.Logf("Specific block number (%s): both calls succeeded", blockNum)
+ })
+
+ t.Run("PendingBlock", func(t *testing.T) {
+ // By default, "pending" block tag should NOT be batched (allowPendingTagBatching=false)
+ // Each call should still succeed but may not be batched together
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "pending"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, "pending"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ // Calls should still succeed (individually forwarded)
+ successCount := 0
+ for _, resp := range responses {
+ if resp.Error == nil && resp.Result != nil {
+ successCount++
+ }
+ }
+
+ // Note: some networks don't support "pending" tag, so we just check responses are returned
+ t.Logf("'pending' block tag: %d/2 calls returned results (may not be batched)", successCount)
+ })
+}
+
+// TestMulticall3Integration_CacheHits verifies that repeated calls hit the cache
+func TestMulticall3Integration_CacheHits(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // First, get a specific block number to ensure consistent caching
+ blockNumReq := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_blockNumber",
+ Params: []interface{}{},
+ }
+ respBody := makeRequest(t, endpoint, blockNumReq)
+
+ var blockResp jsonRPCResponse
+ err := json.Unmarshal(respBody, &blockResp)
+ require.NoError(t, err)
+ require.NotNil(t, blockResp.Result)
+
+ var blockNum string
+ json.Unmarshal(blockResp.Result, &blockNum)
+
+ // Capture baseline cache hit metric
+ var baselineCacheHits float64
+ if metricsEndpoint != "" {
+ baselineCacheHits, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_cache_hits_total")
+ }
+
+ // Make the first request (should populate cache)
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, blockNum}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: symbolSelector}, blockNum}},
+ }
+
+ respBody = makeRequest(t, endpoint, batch)
+ var responses1 []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses1)
+ require.NoError(t, err)
+ require.Len(t, responses1, 2)
+
+ // Wait a bit for cache to be written
+ time.Sleep(500 * time.Millisecond)
+
+ // Make the same request again (should hit cache)
+ respBody = makeRequest(t, endpoint, batch)
+ var responses2 []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses2)
+ require.NoError(t, err)
+ require.Len(t, responses2, 2)
+
+ // Results should be identical
+ var result1a, result1b, result2a, result2b string
+ json.Unmarshal(responses1[0].Result, &result1a)
+ json.Unmarshal(responses2[0].Result, &result1b)
+ json.Unmarshal(responses1[1].Result, &result2a)
+ json.Unmarshal(responses2[1].Result, &result2b)
+
+ assert.Equal(t, result1a, result1b, "Cached result should match original")
+ assert.Equal(t, result2a, result2b, "Cached result should match original")
+
+ t.Logf("Cache test: results match for block %s", blockNum)
+
+ // Check if cache hit metric increased
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second)
+
+ newCacheHits, err := getMetricValue(metricsEndpoint, "erpc_multicall3_cache_hits_total")
+ if err == nil {
+ diff := newCacheHits - baselineCacheHits
+ t.Logf("Cache hits metric: %.0f -> %.0f (+%.0f)", baselineCacheHits, newCacheHits, diff)
+ if diff > 0 {
+ t.Logf("✓ Cache hits confirmed via metrics")
+ } else {
+ t.Logf("⚠ No cache hits detected (caching may be disabled or cache not populated)")
+ }
+ }
+ }
+}
+
+// TestMulticall3Integration_UseUpstreamDirective tests that UseUpstream header works with batching
+func TestMulticall3Integration_UseUpstreamDirective(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // This test verifies that the UseUpstream directive works
+ // The actual upstream selection is internal, but we can verify the request succeeds
+ // with the header present
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_call",
+ Params: []interface{}{
+ ethCallParams{To: wethAddress, Data: decimalsSelector},
+ "latest",
+ },
+ }
+
+ body, err := json.Marshal(req)
+ require.NoError(t, err)
+
+ httpReq, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ require.NoError(t, err)
+ httpReq.Header.Set("Content-Type", "application/json")
+
+ // Add auth headers
+ for key, value := range getAuthHeaders() {
+ httpReq.Header.Set(key, value)
+ }
+
+ // Add UseUpstream directive (this may or may not match an upstream, but shouldn't error)
+ httpReq.Header.Set("X-ERPC-Use-Upstream", "alchemy") // Common provider name
+
+ client := &http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(httpReq)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ var jsonResp jsonRPCResponse
+ err = json.Unmarshal(respBody, &jsonResp)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+
+ // Request should succeed (either via specified upstream or fallback)
+ if jsonResp.Error != nil {
+ t.Logf("UseUpstream directive: request returned error (upstream may not exist): %s", string(*jsonResp.Error))
+ } else {
+ assert.NotNil(t, jsonResp.Result, "Request should have result")
+ t.Logf("UseUpstream directive: request succeeded")
+ }
+}
+
+// TestMulticall3Integration_HighConcurrency stress tests with high concurrency
+func TestMulticall3Integration_HighConcurrency(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ // Capture baseline metrics
+ var baselineAggregation, baselineOverflow float64
+ if metricsEndpoint != "" {
+ baselineAggregation, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ baselineOverflow, _ = getMetricValue(metricsEndpoint, "erpc_multicall3_queue_overflow_total")
+ }
+
+ // Send 50 concurrent requests
+ const numRequests = 50
+ var wg sync.WaitGroup
+ results := make(chan bool, numRequests)
+
+ for i := 0; i < numRequests; i++ {
+ wg.Add(1)
+ go func(id int) {
+ defer wg.Done()
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: id,
+ Method: "eth_call",
+ Params: []interface{}{
+ ethCallParams{To: wethAddress, Data: decimalsSelector},
+ "latest",
+ },
+ }
+
+ respBody := makeRequest(t, endpoint, req)
+
+ var resp jsonRPCResponse
+ if err := json.Unmarshal(respBody, &resp); err != nil {
+ results <- false
+ return
+ }
+
+ results <- resp.Error == nil && resp.Result != nil
+ }(i)
+ }
+
+ wg.Wait()
+ close(results)
+
+ successCount := 0
+ for success := range results {
+ if success {
+ successCount++
+ }
+ }
+
+ // All requests should succeed
+ assert.Equal(t, numRequests, successCount, "All high-concurrency requests should succeed")
+ t.Logf("High concurrency: %d/%d requests successful", successCount, numRequests)
+
+ // Check metrics
+ if metricsEndpoint != "" {
+ time.Sleep(1 * time.Second)
+
+ newAggregation, _ := getMetricValue(metricsEndpoint, "erpc_multicall3_aggregation_total")
+ newOverflow, _ := getMetricValue(metricsEndpoint, "erpc_multicall3_queue_overflow_total")
+
+ aggDiff := newAggregation - baselineAggregation
+ overflowDiff := newOverflow - baselineOverflow
+
+ t.Logf("Aggregation metric: %.0f -> %.0f (+%.0f)", baselineAggregation, newAggregation, aggDiff)
+ t.Logf("Queue overflow metric: %.0f -> %.0f (+%.0f)", baselineOverflow, newOverflow, overflowDiff)
+
+ if aggDiff > 0 && aggDiff < float64(numRequests)/2 {
+ t.Logf("✓ Efficient batching under high concurrency: %d requests → %.0f aggregations", numRequests, aggDiff)
+ }
+
+ if overflowDiff > 0 {
+ t.Logf("⚠ Some requests overflowed (%.0f) - this is expected under extreme load", overflowDiff)
+ }
+ }
+}
+
+// TestMulticall3Integration_DifferentBlockTags tests that calls with different block tags are NOT batched together
+func TestMulticall3Integration_DifferentBlockTags(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Get latest block number
+ blockNumReq := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_blockNumber",
+ Params: []interface{}{},
+ }
+ respBody := makeRequest(t, endpoint, blockNumReq)
+
+ var blockResp jsonRPCResponse
+ json.Unmarshal(respBody, &blockResp)
+ var blockNum string
+ json.Unmarshal(blockResp.Result, &blockNum)
+
+ // Send batch with DIFFERENT block tags - these should NOT be batched together
+ // (different batch keys due to different blockRef)
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, blockNum}},
+ }
+
+ respBody = makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ // Both should succeed
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ // Results should be the same (same call, different block refs but close in time)
+ var result1, result2 string
+ json.Unmarshal(responses[0].Result, &result1)
+ json.Unmarshal(responses[1].Result, &result2)
+
+ t.Logf("Different block tags: 'latest' returned %s, '%s' returned %s", result1, blockNum, result2)
+ // Note: Results may differ if there was a state change between blocks
+}
+
+// TestMulticall3Integration_EmptyCalldata tests handling of calls with empty/minimal calldata
+func TestMulticall3Integration_EmptyCalldata(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Some contracts have fallback functions that accept empty calldata
+ // We're testing that the batching handles this gracefully
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: "0x"}, "latest"}}, // Empty calldata
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ // First call should succeed
+ assert.Nil(t, responses[0].Error, "Normal call should succeed")
+ assert.NotNil(t, responses[0].Result, "Normal call should have result")
+
+ // Second call may succeed or fail depending on the contract's fallback function
+ if responses[1].Error != nil {
+ t.Logf("Empty calldata: returned error (expected for contracts without fallback)")
+ } else {
+ t.Logf("Empty calldata: returned result (contract has fallback function)")
+ }
+}
+
+// TestMulticall3Integration_LargeCalldata tests handling of calls with large calldata
+func TestMulticall3Integration_LargeCalldata(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Create a call with larger calldata (balanceOf with address padded)
+ // balanceOf(address) = 0x70a08231 + 32-byte address
+ largeCalldata := "0x70a08231000000000000000000000000" + strings.Repeat("ab", 20) // balanceOf(0xabab...ab)
+
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: largeCalldata}, "latest"}},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{ethCallParams{To: wethAddress, Data: decimalsSelector}, "latest"}},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 2)
+
+ // Both should return results (balanceOf returns 0 for unknown address, decimals returns 18)
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ t.Logf("Large calldata: both calls succeeded")
+}
+
+// =============================================================================
+// Bypass Tests - Verify calls that should NOT be batched
+// =============================================================================
+
+// TestMulticall3Integration_BypassWithValueField tests that calls with 'value' field bypass batching
+func TestMulticall3Integration_BypassWithValueField(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // eth_call with 'value' field should bypass multicall3 batching
+ // (multicall3 aggregate3 doesn't support value transfers)
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector, "value": "0x0"},
+ "latest",
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // Both should still succeed (first bypasses batching, second may be batched)
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ t.Logf("Bypass with 'value' field: both calls succeeded (first bypassed batching)")
+}
+
+// TestMulticall3Integration_BypassWithFromField tests that calls with 'from' field bypass batching
+func TestMulticall3Integration_BypassWithFromField(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // eth_call with 'from' field should bypass multicall3 batching
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector, "from": "0x0000000000000000000000000000000000000001"},
+ "latest",
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ t.Logf("Bypass with 'from' field: both calls succeeded (first bypassed batching)")
+}
+
+// TestMulticall3Integration_BypassWithGasField tests that calls with 'gas' field bypass batching
+func TestMulticall3Integration_BypassWithGasField(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // eth_call with 'gas' field should bypass multicall3 batching
+ // Use a reasonable gas value (500k) to avoid "intrinsic gas too low" errors
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector, "gas": "0x7a120"}, // 500000 gas
+ "latest",
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // First call (with gas field) may succeed or fail depending on gas amount
+ // The important thing is that it was processed (bypassed batching)
+ if responses[0].Error != nil {
+ t.Logf("Call with 'gas' field returned error (processed, bypassed batching)")
+ } else {
+ assert.NotNil(t, responses[0].Result, "Response 0 should have result")
+ }
+
+ // Second call (normal) should always succeed
+ assert.Nil(t, responses[1].Error, "Response 1 (normal call) should succeed")
+ assert.NotNil(t, responses[1].Result, "Response 1 should have result")
+
+ t.Logf("Bypass with 'gas' field: calls handled correctly (first bypassed batching)")
+}
+
+// TestMulticall3Integration_BypassWithStateOverride tests that calls with state override bypass batching
+func TestMulticall3Integration_BypassWithStateOverride(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // eth_call with state override (3rd param) should bypass multicall3 batching
+ stateOverride := map[string]interface{}{
+ wethAddress: map[string]interface{}{
+ "balance": "0x1000000000000000000",
+ },
+ }
+
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ "latest",
+ stateOverride, // State override as 3rd param
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // First may error if upstream doesn't support state override, second should succeed
+ if responses[0].Error != nil {
+ t.Logf("State override call returned error (upstream may not support it)")
+ } else {
+ assert.NotNil(t, responses[0].Result, "Response 0 should have result")
+ }
+
+ assert.Nil(t, responses[1].Error, "Response 1 (normal call) should succeed")
+ assert.NotNil(t, responses[1].Result, "Response 1 should have result")
+
+ t.Logf("Bypass with state override: calls handled correctly (first bypassed batching)")
+}
+
+// TestMulticall3Integration_BypassRecursionGuard tests that calls to multicall3 contract bypass batching
+func TestMulticall3Integration_BypassRecursionGuard(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Multicall3 contract address (same on most EVM chains)
+ multicall3Address := "0xcA11bde05977b3631167028862bE2a173976CA11"
+
+ // Calling multicall3 directly should bypass the batching (recursion guard)
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": multicall3Address, "data": "0x252dba42"}, // aggregate() selector
+ "latest",
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // First call (to multicall3) may revert due to invalid calldata, but shouldn't cause batching issues
+ if responses[0].Error != nil {
+ t.Logf("Call to multicall3 returned error (expected for empty aggregate call)")
+ } else {
+ t.Logf("Call to multicall3 returned result (bypassed batching)")
+ }
+
+ // Second call should succeed normally
+ assert.Nil(t, responses[1].Error, "Normal call should succeed")
+ assert.NotNil(t, responses[1].Result, "Normal call should have result")
+
+ t.Logf("Recursion guard: calls to multicall3 contract handled correctly")
+}
+
+// TestMulticall3Integration_BypassRequireCanonicalFalse tests that requireCanonical:false bypasses batching
+func TestMulticall3Integration_BypassRequireCanonicalFalse(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // First get a recent block hash
+ blockNumReq := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_getBlockByNumber",
+ Params: []interface{}{"latest", false},
+ }
+ respBody := makeRequest(t, endpoint, blockNumReq)
+
+ var blockResp jsonRPCResponse
+ err := json.Unmarshal(respBody, &blockResp)
+ require.NoError(t, err)
+ require.NotNil(t, blockResp.Result)
+
+ var block map[string]interface{}
+ json.Unmarshal(blockResp.Result, &block)
+ blockHash, ok := block["hash"].(string)
+ if !ok || blockHash == "" {
+ t.Skip("Could not get block hash for requireCanonical test")
+ }
+
+ // EIP-1898 block param with requireCanonical: false should bypass batching
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ map[string]interface{}{"blockHash": blockHash, "requireCanonical": false},
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ "latest",
+ }},
+ }
+
+ respBody = makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // Both should succeed (first bypasses batching due to requireCanonical:false)
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ t.Logf("Bypass with requireCanonical:false: both calls succeeded")
+}
+
+// =============================================================================
+// Block Reference Variation Tests
+// =============================================================================
+
+// TestMulticall3Integration_BlockHashReference tests batching with block hash (EIP-1898)
+func TestMulticall3Integration_BlockHashReference(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // First get a recent block hash
+ blockReq := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_getBlockByNumber",
+ Params: []interface{}{"latest", false},
+ }
+ respBody := makeRequest(t, endpoint, blockReq)
+
+ var blockResp jsonRPCResponse
+ err := json.Unmarshal(respBody, &blockResp)
+ require.NoError(t, err)
+ require.NotNil(t, blockResp.Result)
+
+ var block map[string]interface{}
+ json.Unmarshal(blockResp.Result, &block)
+ blockHash, ok := block["hash"].(string)
+ if !ok || blockHash == "" {
+ t.Skip("Could not get block hash")
+ }
+
+ // EIP-1898 block param with blockHash
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ map[string]interface{}{"blockHash": blockHash},
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ map[string]interface{}{"blockHash": blockHash},
+ }},
+ }
+
+ respBody = makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d with block hash should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ t.Logf("Block hash reference (EIP-1898): both calls succeeded with hash %s", blockHash[:18]+"...")
+}
+
+// TestMulticall3Integration_FinalizedSafeEarliestTags tests batching with special block tags
+func TestMulticall3Integration_FinalizedSafeEarliestTags(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ testCases := []struct {
+ name string
+ blockTag string
+ }{
+ {"finalized", "finalized"},
+ {"safe", "safe"},
+ {"earliest", "earliest"},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ tc.blockTag,
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector},
+ tc.blockTag,
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ // Note: 'earliest' may fail on some contracts that didn't exist at genesis
+ // 'finalized' and 'safe' may not be supported by all nodes
+ successCount := 0
+ for _, resp := range responses {
+ if resp.Error == nil && resp.Result != nil {
+ successCount++
+ }
+ }
+
+ if successCount == 2 {
+ t.Logf("'%s' block tag: both calls succeeded", tc.blockTag)
+ } else if successCount > 0 {
+ t.Logf("'%s' block tag: %d/2 calls succeeded (some may not be supported)", tc.blockTag, successCount)
+ } else {
+ t.Logf("'%s' block tag: calls failed (tag may not be supported by upstream)", tc.blockTag)
+ }
+ })
+ }
+}
+
+// =============================================================================
+// Input Variations Tests
+// =============================================================================
+
+// TestMulticall3Integration_InputFieldAlternative tests that 'input' field works as alternative to 'data'
+func TestMulticall3Integration_InputFieldAlternative(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ // Some clients use 'input' instead of 'data' - both should work
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "input": decimalsSelector}, // 'input' instead of 'data'
+ "latest",
+ }},
+ {JSONRPC: "2.0", ID: 2, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": symbolSelector}, // 'data' for comparison
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err := json.Unmarshal(respBody, &responses)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+ require.Len(t, responses, 2)
+
+ for i, resp := range responses {
+ assert.Nil(t, resp.Error, "Response %d should succeed", i)
+ assert.NotNil(t, resp.Result, "Response %d should have result", i)
+ }
+
+ // Both should return valid results
+ var result1, result2 string
+ json.Unmarshal(responses[0].Result, &result1)
+ json.Unmarshal(responses[1].Result, &result2)
+
+ assert.NotEmpty(t, result1, "'input' field should return valid result")
+ assert.NotEmpty(t, result2, "'data' field should return valid result")
+
+ t.Logf("'input' field alternative: both 'input' and 'data' fields work correctly")
+}
+
+// =============================================================================
+// Directive Tests
+// =============================================================================
+
+// TestMulticall3Integration_SkipCacheReadDirective tests that skip-cache-read creates separate batch
+func TestMulticall3Integration_SkipCacheReadDirective(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_call",
+ Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ "latest",
+ },
+ }
+
+ body, err := json.Marshal(req)
+ require.NoError(t, err)
+
+ httpReq, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ require.NoError(t, err)
+ httpReq.Header.Set("Content-Type", "application/json")
+
+ // Add auth headers
+ for key, value := range getAuthHeaders() {
+ httpReq.Header.Set(key, value)
+ }
+
+ // Add skip-cache-read directive
+ httpReq.Header.Set("X-ERPC-Skip-Cache-Read", "true")
+
+ client := &http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(httpReq)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ var jsonResp jsonRPCResponse
+ err = json.Unmarshal(respBody, &jsonResp)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+
+ assert.Nil(t, jsonResp.Error, "Request with skip-cache-read should succeed")
+ assert.NotNil(t, jsonResp.Result, "Request should have result")
+
+ t.Logf("Skip-cache-read directive: request succeeded")
+}
+
+// TestMulticall3Integration_RetryEmptyDirective tests that retry-empty creates separate batch
+func TestMulticall3Integration_RetryEmptyDirective(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+
+ req := jsonRPCRequest{
+ JSONRPC: "2.0",
+ ID: 1,
+ Method: "eth_call",
+ Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ "latest",
+ },
+ }
+
+ body, err := json.Marshal(req)
+ require.NoError(t, err)
+
+ httpReq, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
+ require.NoError(t, err)
+ httpReq.Header.Set("Content-Type", "application/json")
+
+ // Add auth headers
+ for key, value := range getAuthHeaders() {
+ httpReq.Header.Set(key, value)
+ }
+
+ // Add retry-empty directive
+ httpReq.Header.Set("X-ERPC-Retry-Empty", "true")
+
+ client := &http.Client{Timeout: 30 * time.Second}
+ resp, err := client.Do(httpReq)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ var jsonResp jsonRPCResponse
+ err = json.Unmarshal(respBody, &jsonResp)
+ require.NoError(t, err, "Failed to parse response: %s", string(respBody))
+
+ assert.Nil(t, jsonResp.Error, "Request with retry-empty should succeed")
+ assert.NotNil(t, jsonResp.Result, "Request should have result")
+
+ t.Logf("Retry-empty directive: request succeeded")
+}
+
+// =============================================================================
+// Fallback Tests
+// =============================================================================
+
+// TestMulticall3Integration_FallbackMetricTracking verifies fallback metric is tracked
+func TestMulticall3Integration_FallbackMetricTracking(t *testing.T) {
+ endpoint := getTestEndpoint(t)
+ metricsEndpoint := getMetricsEndpoint()
+
+ if metricsEndpoint == "" {
+ t.Skip("ERPC_INTEGRATION_TEST_METRICS not set, skipping fallback metric test")
+ }
+
+ // Check current fallback metric
+ fallbackTotal, err := getMetricValue(metricsEndpoint, "erpc_multicall3_fallback_total")
+ require.NoError(t, err)
+
+ // We can't easily trigger a fallback in integration tests without a broken upstream,
+ // but we can verify the metric exists and is being tracked
+ t.Logf("Current fallback total: %.0f", fallbackTotal)
+
+ // Also check fallback requests metric
+ fallbackRequests, _ := getMetricValue(metricsEndpoint, "erpc_multicall3_fallback_requests_total")
+ t.Logf("Current fallback requests total: %.0f", fallbackRequests)
+
+ // Run a normal batch to ensure batching still works
+ batch := []jsonRPCRequest{
+ {JSONRPC: "2.0", ID: 1, Method: "eth_call", Params: []interface{}{
+ map[string]interface{}{"to": wethAddress, "data": decimalsSelector},
+ "latest",
+ }},
+ }
+
+ respBody := makeRequest(t, endpoint, batch)
+
+ var responses []jsonRPCResponse
+ err = json.Unmarshal(respBody, &responses)
+ require.NoError(t, err)
+ require.Len(t, responses, 1)
+ assert.Nil(t, responses[0].Error, "Normal call should succeed")
+
+ // Verify fallback metric didn't increase (no fallback needed)
+ time.Sleep(500 * time.Millisecond)
+ newFallbackTotal, _ := getMetricValue(metricsEndpoint, "erpc_multicall3_fallback_total")
+
+ if newFallbackTotal == fallbackTotal {
+ t.Logf("✓ No fallback triggered for normal batch (as expected)")
+ } else {
+ t.Logf("⚠ Fallback was triggered: %.0f -> %.0f", fallbackTotal, newFallbackTotal)
+ }
+}
+
+// TestMulticall3Integration_MetricsSummary prints a summary of multicall3 metrics
+func TestMulticall3Integration_MetricsSummary(t *testing.T) {
+ getTestEndpoint(t) // Ensure we have an endpoint configured
+ metricsEndpoint := getMetricsEndpoint()
+
+ if metricsEndpoint == "" {
+ t.Skip("ERPC_INTEGRATION_TEST_METRICS not set, skipping metrics summary")
+ }
+
+ metrics := []string{
+ "erpc_multicall3_aggregation_total",
+ "erpc_multicall3_fallback_total",
+ "erpc_multicall3_cache_hits_total",
+ "erpc_multicall3_queue_overflow_total",
+ "erpc_multicall3_dedupe_total",
+ "erpc_multicall3_panic_total",
+ "erpc_multicall3_abandoned_total",
+ "erpc_multicall3_cache_write_dropped_total",
+ }
+
+ t.Logf("\n=== Multicall3 Metrics Summary ===")
+ for _, metric := range metrics {
+ value, err := getMetricValue(metricsEndpoint, metric)
+ if err != nil {
+ t.Logf(" %s: error fetching", metric)
+ } else {
+ t.Logf(" %s: %.0f", metric, value)
+ }
+ }
+ t.Logf("===================================")
+}
diff --git a/typescript/config/lib/generated.d.ts b/typescript/config/lib/generated.d.ts
index 36a2c6bdc..6a61e13ea 100644
--- a/typescript/config/lib/generated.d.ts
+++ b/typescript/config/lib/generated.d.ts
@@ -472,6 +472,7 @@ export interface EvmAvailabilityBoundConfig {
export interface FailsafeConfig {
matchMethod?: string;
matchFinality?: DataFinalityState[];
+ matchUpstreamGroup?: string;
retry?: RetryPolicyConfig;
circuitBreaker?: CircuitBreakerPolicyConfig;
timeout?: TimeoutPolicyConfig;
diff --git a/typescript/config/src/generated.ts b/typescript/config/src/generated.ts
index ab58b155c..b50753b79 100644
--- a/typescript/config/src/generated.ts
+++ b/typescript/config/src/generated.ts
@@ -486,6 +486,7 @@ export interface EvmAvailabilityBoundConfig {
export interface FailsafeConfig {
matchMethod?: string;
matchFinality?: DataFinalityState[];
+ matchUpstreamGroup?: string;
retry?: RetryPolicyConfig;
circuitBreaker?: CircuitBreakerPolicyConfig;
timeout?: TimeoutPolicyConfig;
diff --git a/upstream/ratelimiter_budget.go b/upstream/ratelimiter_budget.go
index ae084225d..a82b16da1 100644
--- a/upstream/ratelimiter_budget.go
+++ b/upstream/ratelimiter_budget.go
@@ -24,7 +24,6 @@ type RateLimiterBudget struct {
Rules []*RateLimitRule
registry *RateLimitersRegistry
rulesMu sync.RWMutex
- cache limiter.RateLimitCache
maxTimeout time.Duration
}
@@ -73,11 +72,17 @@ type ruleResult struct {
allowed bool
}
+// getCache returns the current cache from the registry (thread-safe)
+func (b *RateLimiterBudget) getCache() limiter.RateLimitCache {
+ return b.registry.GetCache()
+}
+
// TryAcquirePermit evaluates all matching rules for the given method using Envoy's DoLimit.
// Rules are evaluated in parallel for lower latency. Returns true if allowed, false if rate limited.
func (b *RateLimiterBudget) TryAcquirePermit(ctx context.Context, projectId string, req *common.NormalizedRequest, method string, vendor string, upstreamId string, authLabel string, origin string) (bool, error) {
- if b.cache == nil {
- return true, nil
+ cache := b.getCache()
+ if cache == nil {
+ return true, nil // Fail-open when no cache is available
}
ctx, span := common.StartDetailSpan(ctx, "RateLimiter.TryAcquirePermit",
@@ -168,6 +173,11 @@ func (b *RateLimiterBudget) TryAcquirePermit(ctx context.Context, projectId stri
// evaluateRule checks a single rate limit rule against the cache.
// Returns true if allowed, false if over limit.
func (b *RateLimiterBudget) evaluateRule(ctx context.Context, rule *RateLimitRule, method, clientIP, userLabel, networkLabel string) bool {
+ cache := b.getCache()
+ if cache == nil {
+ return true // Fail-open when no cache is available
+ }
+
// Build descriptor entries
entries := []*pb_struct.RateLimitDescriptor_Entry{{Key: "method", Value: method}}
if rule.Config.PerIP && clientIP != "" && clientIP != "n/a" {
@@ -205,9 +215,9 @@ func (b *RateLimiterBudget) evaluateRule(ctx context.Context, rule *RateLimitRul
var statuses []*pb.RateLimitResponse_DescriptorStatus
var timedOut bool
if b.maxTimeout > 0 {
- statuses, timedOut = b.doLimitWithTimeout(ctx, rlReq, limits, method, userLabel, networkLabel)
+ statuses, timedOut = b.doLimitWithTimeout(ctx, cache, rlReq, limits, method, userLabel, networkLabel)
} else {
- statuses = b.cache.DoLimit(ctx, rlReq, limits)
+ statuses = cache.DoLimit(ctx, rlReq, limits)
}
if timedOut {
@@ -246,13 +256,14 @@ func (r *RateLimitRule) statsKeySuffix() string {
// Returns (statuses, timedOut). On timeout, returns (nil, true) and records fail-open metric.
func (b *RateLimiterBudget) doLimitWithTimeout(
ctx context.Context,
+ cache limiter.RateLimitCache,
rlReq *pb.RateLimitRequest,
limits []*config.RateLimit,
method, userLabel, networkLabel string,
) ([]*pb.RateLimitResponse_DescriptorStatus, bool) {
resultCh := make(chan []*pb.RateLimitResponse_DescriptorStatus, 1)
go func() {
- resultCh <- b.cache.DoLimit(ctx, rlReq, limits)
+ resultCh <- cache.DoLimit(ctx, rlReq, limits)
}()
timer := time.NewTimer(b.maxTimeout)
diff --git a/upstream/ratelimiter_budget_bench_test.go b/upstream/ratelimiter_budget_bench_test.go
index 9093f4d25..a2198b258 100644
--- a/upstream/ratelimiter_budget_bench_test.go
+++ b/upstream/ratelimiter_budget_bench_test.go
@@ -67,12 +67,15 @@ func buildBenchBudget(numRules int, perUser, perIP, perNetwork bool) *RateLimite
}
logger := zerolog.Nop()
+ registry := &RateLimitersRegistry{
+ statsManager: mgr,
+ envoyCache: cache,
+ }
return &RateLimiterBudget{
logger: &logger,
Id: "bench-budget",
Rules: rules,
- registry: &RateLimitersRegistry{statsManager: mgr},
- cache: cache,
+ registry: registry,
}
}
@@ -172,7 +175,7 @@ func BenchmarkTryAcquirePermit_NoRulesMatch(b *testing.B) {
// BenchmarkTryAcquirePermit_NilCache tests the nil cache fast path
func BenchmarkTryAcquirePermit_NilCache(b *testing.B) {
budget := buildBenchBudget(1, false, false, false)
- budget.cache = nil
+ budget.registry.envoyCache = nil // Simulate Redis not connected yet
ctx := context.Background()
b.ReportAllocs()
@@ -208,12 +211,15 @@ func buildBenchBudgetWithDelay(numRules int, delay time.Duration) *RateLimiterBu
}
logger := zerolog.Nop()
+ registry := &RateLimitersRegistry{
+ statsManager: mgr,
+ envoyCache: &delayedCache{inner: innerCache, delay: delay},
+ }
return &RateLimiterBudget{
logger: &logger,
Id: "bench-budget",
Rules: rules,
- registry: &RateLimitersRegistry{statsManager: mgr},
- cache: &delayedCache{inner: innerCache, delay: delay},
+ registry: registry,
}
}
diff --git a/upstream/ratelimiter_registry.go b/upstream/ratelimiter_registry.go
index b5c6aea40..f17d8de71 100644
--- a/upstream/ratelimiter_registry.go
+++ b/upstream/ratelimiter_registry.go
@@ -1,7 +1,10 @@
package upstream
import (
+ "context"
+ "fmt"
"math/rand"
+ "runtime/debug"
"strings"
"sync"
"time"
@@ -17,18 +20,23 @@ import (
"github.com/erpc/erpc/common"
"github.com/erpc/erpc/telemetry"
+ "github.com/erpc/erpc/util"
)
type RateLimitersRegistry struct {
+ appCtx context.Context
logger *zerolog.Logger
cfg *common.RateLimiterConfig
budgetsLimiters sync.Map
envoyCache limiter.RateLimitCache
statsManager stats.Manager
+ cacheMu sync.RWMutex
+ initializer *util.Initializer
}
-func NewRateLimitersRegistry(cfg *common.RateLimiterConfig, logger *zerolog.Logger) (*RateLimitersRegistry, error) {
+func NewRateLimitersRegistry(appCtx context.Context, cfg *common.RateLimiterConfig, logger *zerolog.Logger) (*RateLimitersRegistry, error) {
r := &RateLimitersRegistry{
+ appCtx: appCtx,
cfg: cfg,
logger: logger,
}
@@ -42,57 +50,109 @@ func (r *RateLimitersRegistry) bootstrap() error {
return nil
}
+ // Create a default stats manager (needed even if cache is nil)
+ store := gostats.NewStore(gostats.NewNullSink(), false)
+ r.statsManager = stats.NewStatManager(store, settings.NewSettings())
+
// Initialize shared cache if configured
if r.cfg.Store != nil && r.cfg.Store.Driver == "redis" && r.cfg.Store.Redis != nil {
- store := gostats.NewStore(gostats.NewNullSink(), false)
- mgr := stats.NewStatManager(store, settings.NewSettings())
- useTLS := r.cfg.Store.Redis.TLS != nil && r.cfg.Store.Redis.TLS.Enabled
- url := r.cfg.Store.Redis.URI
- if url == "" {
- url = r.cfg.Store.Redis.Addr
+ // Create initializer for background retry
+ r.initializer = util.NewInitializer(r.appCtx, r.logger, nil)
+
+ // Attempt Redis connection with panic recovery - don't block startup
+ connectTask := util.NewBootstrapTask("redis-ratelimiter-connect", r.connectRedisTask)
+ if err := r.initializer.ExecuteTasks(r.appCtx, connectTask); err != nil {
+ // Cache stays nil - rate limiting will fail-open until Redis connects
+ r.logger.Warn().Err(err).Msg("failed to initialize Redis rate limiter on first attempt (rate limiting will fail-open until connected, retrying in background)")
}
- poolSize := r.cfg.Store.Redis.ConnPoolSize
- client := redis.NewClientImpl(
- store.Scope("erpc_rl"),
- useTLS,
- r.cfg.Store.Redis.Username,
- "tcp",
- "single",
- url,
- poolSize,
- 5*time.Millisecond,
- 32,
- nil,
- false,
- nil,
- )
- r.envoyCache = redis.NewFixedRateLimitCacheImpl(
- client,
- nil,
- utils.NewTimeSourceImpl(),
- rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404
- 5,
- nil,
- defaultNearLimitRatio(r.cfg.Store.NearLimitRatio),
- defaultCacheKeyPrefix(r.cfg.Store.CacheKeyPrefix),
- mgr,
- false,
- )
- r.statsManager = mgr
} else if r.cfg.Store != nil && r.cfg.Store.Driver == "memory" {
- store := gostats.NewStore(gostats.NewNullSink(), false)
- mgr := stats.NewStatManager(store, settings.NewSettings())
+ // Explicitly configured for memory
r.envoyCache = NewMemoryRateLimitCache(
utils.NewTimeSourceImpl(),
rand.New(rand.NewSource(time.Now().Unix())), // #nosec G404
0,
defaultNearLimitRatio(r.cfg.Store.NearLimitRatio),
defaultCacheKeyPrefix(r.cfg.Store.CacheKeyPrefix),
- mgr,
+ r.statsManager,
)
- r.statsManager = mgr
}
+ // Initialize budgets (cache may be nil for Redis until it connects)
+ r.initializeBudgets()
+
+ return nil
+}
+
+// connectRedisTask attempts to connect to Redis with panic recovery
+func (r *RateLimitersRegistry) connectRedisTask(ctx context.Context) (err error) {
+ // Recover from panics in the envoyproxy/ratelimit library
+ defer func() {
+ if rec := recover(); rec != nil {
+ telemetry.MetricUnexpectedPanicTotal.WithLabelValues(
+ "ratelimiter-redis-connect",
+ fmt.Sprintf("store:%s", r.cfg.Store.Redis.URI),
+ common.ErrorFingerprint(rec),
+ ).Inc()
+ r.logger.Error().
+ Interface("panic", rec).
+ Str("stack", string(debug.Stack())).
+ Msg("panic recovered during Redis rate limiter connection (rate limiting will fail-open)")
+ err = fmt.Errorf("panic during Redis connection: %v", rec)
+ }
+ }()
+
+ store := gostats.NewStore(gostats.NewNullSink(), false)
+ mgr := stats.NewStatManager(store, settings.NewSettings())
+ useTLS := r.cfg.Store.Redis.TLS != nil && r.cfg.Store.Redis.TLS.Enabled
+ url := r.cfg.Store.Redis.URI
+ if url == "" {
+ url = r.cfg.Store.Redis.Addr
+ }
+ poolSize := r.cfg.Store.Redis.ConnPoolSize
+
+ r.logger.Debug().Str("url", util.RedactEndpoint(url)).Bool("tls", useTLS).Int("poolSize", poolSize).Msg("attempting to connect to Redis for rate limiting")
+
+ client := redis.NewClientImpl(
+ store.Scope("erpc_rl"),
+ useTLS,
+ r.cfg.Store.Redis.Username,
+ "tcp",
+ "single",
+ url,
+ poolSize,
+ 5*time.Millisecond,
+ 32,
+ nil,
+ false,
+ nil,
+ )
+
+ cache := redis.NewFixedRateLimitCacheImpl(
+ client,
+ nil,
+ utils.NewTimeSourceImpl(),
+ rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404
+ 5,
+ nil,
+ defaultNearLimitRatio(r.cfg.Store.NearLimitRatio),
+ defaultCacheKeyPrefix(r.cfg.Store.CacheKeyPrefix),
+ mgr,
+ false,
+ )
+
+ // Successfully connected - update the cache
+ // Note: statsManager is NOT updated here to avoid data races.
+ // The statsManager created in bootstrap() is sufficient and identical.
+ r.cacheMu.Lock()
+ r.envoyCache = cache
+ r.cacheMu.Unlock()
+
+ r.logger.Info().Str("url", util.RedactEndpoint(url)).Msg("successfully connected to Redis for rate limiting")
+ return nil
+}
+
+// initializeBudgets creates the rate limiter budgets
+func (r *RateLimitersRegistry) initializeBudgets() {
for _, budgetCfg := range r.cfg.Budgets {
lg := r.logger.With().Str("budget", budgetCfg.Id).Logger()
lg.Debug().Msgf("initializing rate limiter budget")
@@ -105,7 +165,6 @@ func (r *RateLimitersRegistry) bootstrap() error {
Rules: make([]*RateLimitRule, 0),
registry: r,
logger: &lg,
- cache: r.envoyCache,
maxTimeout: maxTimeout,
}
@@ -131,8 +190,13 @@ func (r *RateLimitersRegistry) bootstrap() error {
r.budgetsLimiters.Store(budgetCfg.Id, budget)
}
+}
- return nil
+// GetCache returns the current rate limit cache (thread-safe)
+func (r *RateLimitersRegistry) GetCache() limiter.RateLimitCache {
+ r.cacheMu.RLock()
+ defer r.cacheMu.RUnlock()
+ return r.envoyCache
}
func (r *RateLimitersRegistry) GetBudget(budgetId string) (*RateLimiterBudget, error) {
diff --git a/upstream/ratelimiter_test.go b/upstream/ratelimiter_test.go
index 5fb758605..b59c74487 100644
--- a/upstream/ratelimiter_test.go
+++ b/upstream/ratelimiter_test.go
@@ -16,7 +16,7 @@ func TestRateLimitersRegistry_New(t *testing.T) {
logger := zerolog.Nop()
t.Run("nil config", func(t *testing.T) {
- registry, err := NewRateLimitersRegistry(nil, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), nil, &logger)
require.NoError(t, err)
assert.NotNil(t, registry)
})
@@ -37,7 +37,7 @@ func TestRateLimitersRegistry_New(t *testing.T) {
},
},
}
- registry, err := NewRateLimitersRegistry(cfg, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), cfg, &logger)
require.NoError(t, err)
assert.NotNil(t, registry)
})
@@ -60,7 +60,7 @@ func TestRateLimitersRegistry_GetBudget(t *testing.T) {
},
},
}
- registry, err := NewRateLimitersRegistry(cfg, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), cfg, &logger)
require.NoError(t, err)
t.Run("existing budget", func(t *testing.T) {
@@ -106,7 +106,7 @@ func TestRateLimiterBudget_GetRulesByMethod(t *testing.T) {
},
},
}
- registry, err := NewRateLimitersRegistry(cfg, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), cfg, &logger)
require.NoError(t, err)
budget, err := registry.GetBudget("test-budget")
@@ -152,7 +152,7 @@ func TestRateLimiter_ConcurrentPermits(t *testing.T) {
},
},
}
- registry, err := NewRateLimitersRegistry(cfg, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), cfg, &logger)
require.NoError(t, err)
budget, err := registry.GetBudget("test-budget")
@@ -203,7 +203,7 @@ func TestRateLimiter_ExceedCapacity(t *testing.T) {
},
}
- registry, err := NewRateLimitersRegistry(cfg, &logger)
+ registry, err := NewRateLimitersRegistry(context.Background(), cfg, &logger)
require.NoError(t, err)
budget, err := registry.GetBudget("test-budget")
diff --git a/upstream/registry.go b/upstream/registry.go
index 9557992cb..fdf26d594 100644
--- a/upstream/registry.go
+++ b/upstream/registry.go
@@ -606,7 +606,7 @@ func (u *UpstreamsRegistry) RefreshUpstreamNetworkMethodScores() error {
prev = 0
}
if math.IsNaN(instant) || math.IsInf(instant, 0) {
- u.logger.Trace().
+ u.logger.Warn().
Str("upstreamId", upsId).
Str("network", km.network).
Str("method", km.method).
diff --git a/upstream/registry_contention_bench_test.go b/upstream/registry_contention_bench_test.go
index 6dd5bcc72..bccac6e60 100644
--- a/upstream/registry_contention_bench_test.go
+++ b/upstream/registry_contention_bench_test.go
@@ -25,7 +25,7 @@ func buildRegistryForBench(b *testing.B, numNetworks, upstreamsPerNetwork int, m
vr := thirdparty.NewVendorsRegistry()
pr, _ := thirdparty.NewProvidersRegistry(&log.Logger, vr, nil, nil)
- rlr, _ := NewRateLimitersRegistry(nil, &log.Logger)
+ rlr, _ := NewRateLimitersRegistry(context.Background(), nil, &log.Logger)
mt := health.NewTracker(&log.Logger, "bench-prj", time.Minute)
reg := NewUpstreamsRegistry(