From 4a766d976910f575799ef99116a27be8256db18d Mon Sep 17 00:00:00 2001 From: Hubert Bugaj Date: Fri, 8 May 2026 19:19:30 +0200 Subject: [PATCH] fix(eth): scope receipt logs by message, cap event filter, max rpc size --- CHANGELOG.md | 4 + src/cli_shared/cli/config.rs | 13 +++ src/rpc/client.rs | 4 +- src/rpc/methods/chain.rs | 2 +- src/rpc/methods/eth.rs | 126 ++++++++++++++---------- src/rpc/methods/eth/errors.rs | 17 ++++ src/rpc/methods/eth/filter/event.rs | 2 + src/rpc/methods/eth/filter/mod.rs | 144 +++++++++++++++++++++++++--- src/rpc/mod.rs | 13 ++- 9 files changed, 254 insertions(+), 71 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 543e51d49dd6..ccdd0d8577dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,12 +29,16 @@ ### Added +- [#7024](https://github.com/ChainSafe/forest/pull/7024): `FOREST_RPC_MAX_RESPONSE_BODY_SIZE` environment variable. Sets the JSON-RPC server's maximum response body size in bytes (default 64 MiB). Operators serving log-heavy `eth_getTransactionReceipt`/`eth_getBlockReceipts` calls can raise this above 64 MiB. + ### Changed ### Removed ### Fixed +- [#7024](https://github.com/ChainSafe/forest/pull/7024): `eth_getTransactionReceipt` no longer fails when another transaction in the same tipset emits a large number of events. `max_filter_results` now caps only multi-tipset event queries; single-block calls (`eth_getLogs` with `blockHash`, `eth_getBlockReceipts`, `eth_getTransactionReceipt`) bypass it. Public RPC operators should apply rate and response-size limits at the proxy layer for these calls; a single response can be large when a block contains log-heavy transactions. Ports [filecoin-project/lotus#13617](https://github.com/filecoin-project/lotus/pull/13617). + ## Forest v0.33.3 "Dawn" Non-mandatory release for all node operators. It includes a few fixes to make the chain following logic more robust and eliminate a few non-critical warnings. diff --git a/src/cli_shared/cli/config.rs b/src/cli_shared/cli/config.rs index 1c02bc5f3f49..1c078d3b6aee 100644 --- a/src/cli_shared/cli/config.rs +++ b/src/cli_shared/cli/config.rs @@ -44,8 +44,21 @@ impl Default for DaemonConfig { #[derive(Deserialize, Serialize, PartialEq, Eq, Debug, Clone)] #[cfg_attr(test, derive(derive_quickcheck_arbitrary::Arbitrary))] pub struct EventsConfig { + /// Caps the events returned by event-filter queries used by the actor + /// events API and the Ethereum event and receipt APIs (`eth_getLogs`, + /// `eth_getFilterLogs`, `eth_getFilterChanges`). Set to `0` for no limit. + /// + /// The cap is a hard limit only when a query's events come from more than + /// one tipset. A range whose events all live in a single tipset may + /// exceed this value; queries scoped to a single tipset (`block_hash`, + /// `eth_getBlockReceipts`) bypass it entirely. `eth_getTransactionReceipt` + /// narrows to a single message and is also unaffected. + /// + /// Self-hosted nodes serving trusted callers can use `0` or a high value. + /// Public RPC operators should keep it bounded. #[cfg_attr(test, arbitrary(gen(|g| u32::arbitrary(g) as _)))] pub max_filter_results: usize, + /// Maximum block-range span (in epochs) accepted in event-filter queries. pub max_filter_height_range: ChainEpoch, } diff --git a/src/rpc/client.rs b/src/rpc/client.rs index 880337ac7568..08e8ac8fb1e0 100644 --- a/src/rpc/client.rs +++ b/src/rpc/client.rs @@ -211,7 +211,7 @@ impl UrlClient { jsonrpsee::ws_client::WsClientBuilder::new() .set_headers(headers) .max_request_size(MAX_REQUEST_BODY_SIZE) - .max_response_size(MAX_RESPONSE_BODY_SIZE) + .max_response_size(*MAX_RESPONSE_BODY_SIZE) .request_timeout(ONE_DAY) .build(&url) .await?, @@ -220,7 +220,7 @@ impl UrlClient { jsonrpsee::http_client::HttpClientBuilder::new() .set_headers(headers) .max_request_size(MAX_REQUEST_BODY_SIZE) - .max_response_size(MAX_RESPONSE_BODY_SIZE) + .max_response_size(*MAX_RESPONSE_BODY_SIZE) .request_timeout(ONE_DAY) .build(&url)?, ), diff --git a/src/rpc/methods/chain.rs b/src/rpc/methods/chain.rs index 8ec3ecea3055..b008faa8aad3 100644 --- a/src/rpc/methods/chain.rs +++ b/src/rpc/methods/chain.rs @@ -131,7 +131,7 @@ pub(crate) fn logs( let handle = tokio::spawn(async move { while let Ok(changes) = head_changes_rx.recv().await { for ts in changes.applies { - match eth_logs_with_filter(&ctx, &ts, filter.clone(), None).await { + match eth_logs_with_filter(&ctx, &ts, filter.clone()).await { Ok(logs) => { if !logs.is_empty() && let Err(e) = sender.send(logs) diff --git a/src/rpc/methods/eth.rs b/src/rpc/methods/eth.rs index 2c5c6a712712..ada9c4713eae 100644 --- a/src/rpc/methods/eth.rs +++ b/src/rpc/methods/eth.rs @@ -1237,6 +1237,7 @@ async fn new_eth_tx_receipt( ctx: &Ctx, tipset: &Tipset, tx: &ApiEthTx, + msg_cid: Cid, msg_receipt: &Receipt, ) -> anyhow::Result { let mut tx_receipt = EthTxReceipt { @@ -1282,7 +1283,7 @@ async fn new_eth_tx_receipt( if msg_receipt.events_root().is_some() { let logs = - eth_logs_for_block_and_transaction(ctx, tipset, &tx.block_hash, &tx.hash).await?; + eth_logs_for_block_and_transaction(ctx, tipset, &tx.block_hash, &msg_cid).await?; if !logs.is_empty() { tx_receipt.logs = logs; } @@ -1304,21 +1305,34 @@ pub async fn eth_logs_for_block_and_transaction, ts: &Tipset, block_hash: &EthHash, - tx_hash: &EthHash, + msg_cid: &Cid, ) -> anyhow::Result> { - let spec = EthFilterSpec { - block_hash: Some(*block_hash), - ..Default::default() - }; + // Refuse to serve events for tipsets at or after head (deferred execution). + let heaviest_epoch = ctx.chain_store().heaviest_tipset().epoch(); + if ts.epoch() >= heaviest_epoch { + return Err(EthErrors::EventsNotYetAvailable.into()); + } - eth_logs_with_filter(ctx, ts, Some(spec), Some(tx_hash)).await + let pf = ParsedFilter::new_with_tipset_and_msg( + ParsedFilterTipsets::Hash(*block_hash), + Some(*msg_cid), + ); + let mut events = vec![]; + EthEventHandler::collect_events( + ctx, + ts, + Some(&pf), + SkipEvent::OnUnresolvedAddress, + &mut events, + ) + .await?; + eth_filter_logs_from_events(ctx, &events) } pub async fn eth_logs_with_filter( ctx: &Ctx, ts: &Tipset, spec: Option, - tx_hash: Option<&EthHash>, ) -> anyhow::Result> { let mut events = vec![]; EthEventHandler::collect_events( @@ -1329,15 +1343,7 @@ pub async fn eth_logs_with_filter( &mut events, ) .await?; - - let logs = eth_filter_logs_from_events(ctx, &events)?; - Ok(match tx_hash { - Some(hash) => logs - .into_iter() - .filter(|log| &log.transaction_hash == hash) - .collect(), - None => logs, // no tx hash, keep all logs - }) + eth_filter_logs_from_events(ctx, &events) } fn get_signed_message(ctx: &Ctx, message_cid: Cid) -> Result { @@ -1453,7 +1459,7 @@ async fn get_block_receipts( i as u64, )?; - let receipt = new_eth_tx_receipt(ctx, &ts_ref, &tx, receipt).await?; + let receipt = new_eth_tx_receipt(ctx, &ts_ref, &tx, message.cid(), receipt).await?; eth_receipts.push(receipt); } Ok(eth_receipts) @@ -2853,7 +2859,8 @@ async fn get_eth_transaction_receipt( ) })?; - let tx_receipt = new_eth_tx_receipt(&ctx, &parent_ts, &tx, &message_lookup.receipt).await?; + let tx_receipt = + new_eth_tx_receipt(&ctx, &parent_ts, &tx, msg_cid, &message_lookup.receipt).await?; Ok(Some(tx_receipt)) } @@ -3060,20 +3067,6 @@ fn eth_tx_hash_from_message_cid( Ok(None) } -fn transform_events(events: &[CollectedEvent], f: F) -> anyhow::Result> -where - F: Fn(&CollectedEvent) -> anyhow::Result>, -{ - events - .iter() - .filter_map(|event| match f(event) { - Ok(Some(eth_log)) => Some(Ok(eth_log)), - Ok(None) => None, - Err(e) => Some(Err(e)), - }) - .collect() -} - fn eth_filter_logs_from_tipsets(events: &[CollectedEvent]) -> anyhow::Result> { events .iter() @@ -3108,25 +3101,55 @@ fn eth_filter_logs_from_events( ctx: &Ctx, events: &[CollectedEvent], ) -> anyhow::Result> { - transform_events(events, |event| { - let (data, topics) = if let Some((data, topics)) = eth_log_from_event(&event.entries) { - (data, topics) + use ahash::AHashMap as HashMap; + + let chain_id = ctx.state_manager.chain_config().eth_chain_id; + let mut tx_hash_by_msg: HashMap = HashMap::new(); + let mut block_hash_by_tipset: HashMap = HashMap::new(); + let mut eth_addr_by_emitter: HashMap = HashMap::new(); + + let mut logs = Vec::with_capacity(events.len()); + for event in events { + let (data, topics) = match eth_log_from_event(&event.entries) { + Some(parts) => parts, + None => { + tracing::warn!("Ignoring event"); + continue; + } + }; + + let transaction_hash = if let Some(h) = tx_hash_by_msg.get(&event.msg_cid) { + *h } else { - tracing::warn!("Ignoring event"); - return Ok(None); + match eth_tx_hash_from_message_cid(ctx.store(), &event.msg_cid, chain_id)? { + Some(h) => { + tx_hash_by_msg.insert(event.msg_cid, h); + h + } + None => { + tracing::warn!("Ignoring event"); + continue; + } + } }; - let transaction_hash = if let Some(transaction_hash) = eth_tx_hash_from_message_cid( - ctx.store(), - &event.msg_cid, - ctx.state_manager.chain_config().eth_chain_id, - )? { - transaction_hash + + let block_hash = if let Some(h) = block_hash_by_tipset.get(&event.tipset_key) { + *h } else { - tracing::warn!("Ignoring event"); - return Ok(None); + let h: EthHash = event.tipset_key.cid()?.into(); + block_hash_by_tipset.insert(event.tipset_key.clone(), h); + h }; - let address = EthAddress::from_filecoin_address(&event.emitter_addr)?; - Ok(Some(EthLog { + + let address = if let Some(a) = eth_addr_by_emitter.get(&event.emitter_addr) { + *a + } else { + let a = EthAddress::from_filecoin_address(&event.emitter_addr)?; + eth_addr_by_emitter.insert(event.emitter_addr, a); + a + }; + + logs.push(EthLog { address, data, topics, @@ -3134,10 +3157,11 @@ fn eth_filter_logs_from_events( log_index: event.event_idx.into(), transaction_index: event.msg_idx.into(), transaction_hash, - block_hash: event.tipset_key.cid()?.into(), + block_hash, block_number: (event.height as u64).into(), - })) - }) + }); + } + Ok(logs) } fn eth_filter_result_from_events( diff --git a/src/rpc/methods/eth/errors.rs b/src/rpc/methods/eth/errors.rs index 79d724e3124f..af456b8e8d94 100644 --- a/src/rpc/methods/eth/errors.rs +++ b/src/rpc/methods/eth/errors.rs @@ -24,6 +24,8 @@ pub enum EthErrors { given: i64, message: String, }, + #[error("events for the requested block are not yet available")] + EventsNotYetAvailable, } impl EthErrors { @@ -57,6 +59,7 @@ impl RpcErrorData for EthErrors { match self { EthErrors::ExecutionReverted { .. } => Some(EXECUTION_REVERTED_CODE), EthErrors::BlockRangeExceeded { .. } => Some(LIMIT_EXCEEDED_CODE), + EthErrors::EventsNotYetAvailable => None, } } @@ -64,6 +67,7 @@ impl RpcErrorData for EthErrors { match self { EthErrors::ExecutionReverted { message, .. } => Some(message.clone()), EthErrors::BlockRangeExceeded { message, .. } => Some(message.clone()), + EthErrors::EventsNotYetAvailable => Some(self.to_string()), } } @@ -73,6 +77,7 @@ impl RpcErrorData for EthErrors { Some(serde_json::Value::String(data.clone())) } EthErrors::BlockRangeExceeded { .. } => None, + EthErrors::EventsNotYetAvailable => None, } } } @@ -106,4 +111,16 @@ mod tests { "block range exceeds maximum of 2880 (got 5000)" ); } + + #[test] + fn test_events_not_yet_available_converts_to_server_error() { + let err = EthErrors::EventsNotYetAvailable; + let server_err: ServerError = err.into(); + + // No specific RPC error code is assigned; falls back to default. + assert_eq!( + server_err.message(), + "events for the requested block are not yet available" + ); + } } diff --git a/src/rpc/methods/eth/filter/event.rs b/src/rpc/methods/eth/filter/event.rs index 9a1940102de9..45b49ee2732a 100644 --- a/src/rpc/methods/eth/filter/event.rs +++ b/src/rpc/methods/eth/filter/event.rs @@ -33,6 +33,7 @@ impl From<&EventFilter> for ParsedFilter { tipsets: event_filter.tipsets.clone(), addresses: event_filter.addresses.clone(), keys: event_filter.keys_with_codec.clone(), + msg_cid: None, } } } @@ -102,6 +103,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(RangeInclusive::new(0, 100)), addresses: vec![Address::new_id(123)], keys: HashMap::new(), + msg_cid: None, }; // Test case 1: Install the EventFilter let filter = event_manager diff --git a/src/rpc/methods/eth/filter/mod.rs b/src/rpc/methods/eth/filter/mod.rs index 1940d8e212ed..70230d384840 100644 --- a/src/rpc/methods/eth/filter/mod.rs +++ b/src/rpc/methods/eth/filter/mod.rs @@ -75,6 +75,13 @@ pub trait Matcher { emitter_addr: &crate::shim::address::Address, entries: &[Entry], ) -> anyhow::Result; + + /// Restricts a filter to events emitted by a single message. Returns `None` + /// when the filter applies to all messages in the matched tipset(s). + /// Defaults to `None`; only `ParsedFilter` overrides this. + fn msg_cid_filter(&self) -> Option<&Cid> { + None + } } /// Trait for managing filters. Provides common functionality for installing and removing filters. @@ -83,6 +90,24 @@ pub trait FilterManager { fn remove(&self, filter_id: &FilterID) -> Option>; } +/// Decide whether to fire the cross-tipset event-filter cap. +/// +/// `max_filter_results == 0` disables the cap entirely. Single-tipset queries +/// (`tipsets_contributing <= 1`) always pass — the natural unit is the tipset. +/// Once two or more tipsets have contributed events, returns an error if the +/// running total exceeds `max_filter_results`. +fn ensure_filter_cap( + max_filter_results: usize, + tipsets_contributing: usize, + total_events: usize, +) -> anyhow::Result<()> { + ensure!( + max_filter_results == 0 || tipsets_contributing <= 1 || total_events <= max_filter_results, + "filter matches too many events across multiple tipsets (maximum {max_filter_results}); narrow the block range", + ); + Ok(()) +} + /// Handles Ethereum event filters, providing an interface for creating and managing filters. /// /// The `EthEventHandler` structure is the central point for managing Ethereum filters, @@ -279,13 +304,17 @@ impl EthEventHandler { }); } let max_filter_results = ctx.eth_event_handler.max_filter_results; + let mut tipsets_contributing = 0usize; while let Some(events) = tasks.try_next().await? { - let remaining = max_filter_results.saturating_sub(collected_events.len()); - ensure!( - events.len() <= remaining, - "filter matches too many events (maximum {max_filter_results}), try a more restricted filter" - ); + if !events.is_empty() { + tipsets_contributing += 1; + } collected_events.extend(events); + ensure_filter_cap( + max_filter_results, + tipsets_contributing, + collected_events.len(), + )?; } Ok(()) } @@ -297,7 +326,7 @@ impl EthEventHandler { skip_event: SkipEvent, collected_events: &mut Vec, ) -> anyhow::Result<()> { - let max_filter_results = ctx.eth_event_handler.max_filter_results; + let msg_cid_filter = spec.and_then(|s| s.msg_cid_filter()).copied(); let height = tipset.epoch(); let tipset_key = tipset.key(); let ExecutedTipset { @@ -312,6 +341,17 @@ impl EthEventHandler { }, ) in executed_messages.iter().enumerate() { + if let Some(want) = msg_cid_filter + && message.cid() != want + { + // Update event_count to keep event_idx_base monotonic across the + // tipset, even though we skip these events: it mirrors the + // index a SQL-backed indexer would assign for the row. + if let Some(events) = events { + event_count += events.len(); + } + continue; + } if let Some(events) = events { let event_idx_base = u64::try_from(event_count)?; event_count += events.len(); @@ -332,7 +372,6 @@ impl EthEventHandler { let resolved = if let Some(resolved) = resolved_opt { resolved } else if matches!(skip_event, SkipEvent::OnUnresolvedAddress) { - // Skip event continue; } else { id_addr @@ -368,10 +407,6 @@ impl EthEventHandler { msg_idx: msg_idx as u64, msg_cid: message.cid(), }; - ensure!( - collected_events.len() <= max_filter_results, - "filter matches too many events (maximum {max_filter_results} allowed), try a more restricted filter" - ); collected_events.push(ce); } } @@ -420,10 +455,9 @@ impl EthEventHandler { // we can't return events for the heaviest tipset as the transactions in that tipset will be executed // in the next non-null tipset (because of Filecoin's "deferred execution" model) let heaviest_epoch = ctx.chain_store().heaviest_tipset().epoch(); - ensure!( - *range.end() < heaviest_epoch, - "max_height requested is greater than the heaviest tipset" - ); + if *range.end() >= heaviest_epoch { + return Err(EthErrors::EventsNotYetAvailable.into()); + } let max_height = if *range.end() == -1 { // heaviest tipset doesn't have events because its messages haven't been executed yet heaviest_epoch - 1 @@ -502,6 +536,7 @@ impl EthFilterSpec { tipsets, addresses, keys, + msg_cid: None, }) } } @@ -681,6 +716,9 @@ pub struct ParsedFilter { pub(crate) tipsets: ParsedFilterTipsets, pub(crate) addresses: Vec
, pub(crate) keys: HashMap>, + /// When set, only events emitted by this message CID are returned. Mirrors + /// Lotus's `index.EventFilter.MsgCid`. Used by `eth_getTransactionReceipt`. + pub(crate) msg_cid: Option, } impl ParsedFilter { @@ -689,8 +727,19 @@ impl ParsedFilter { tipsets, addresses: vec![], keys: HashMap::new(), + msg_cid: None, + } + } + + pub fn new_with_tipset_and_msg(tipsets: ParsedFilterTipsets, msg_cid: Option) -> Self { + ParsedFilter { + tipsets, + addresses: vec![], + keys: HashMap::new(), + msg_cid, } } + pub fn from_actor_event_filter( chain_height: ChainEpoch, _max_filter_height_range: ChainEpoch, @@ -725,6 +774,7 @@ impl ParsedFilter { tipsets, addresses, keys, + msg_cid: None, }) } } @@ -755,6 +805,10 @@ impl Matcher for ParsedFilter { Ok(match_addr && match_fields) } + + fn msg_cid_filter(&self) -> Option<&Cid> { + self.msg_cid.as_ref() + } } impl Matcher for EventFilter { @@ -1468,6 +1522,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys: Default::default(), + msg_cid: None, }; let addr0 = Address::from_str("t410f744ma4xsq3r3eczzktfj7goal67myzfkusna2hy").unwrap(); @@ -1511,6 +1566,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![addr0], keys: Default::default(), + msg_cid: None, }; assert!(filter0.matches(&addr0, &[]).unwrap()); @@ -1522,6 +1578,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![addr0, addr1], keys: Default::default(), + msg_cid: None, }; assert!(filter1.matches(&addr0, &[]).unwrap()); @@ -1567,6 +1624,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys: Default::default(), + msg_cid: None, }; assert!(empty_filter.matches(&addr0, &entries0).unwrap()); @@ -1586,6 +1644,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys, + msg_cid: None, }; assert!(filter1.matches(&addr0, &entries0).unwrap()); @@ -1605,6 +1664,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys, + msg_cid: None, }; assert!(!filter2.matches(&addr0, &entries0).unwrap()); @@ -1624,6 +1684,7 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys, + msg_cid: None, }; assert!(!filter2.matches(&addr0, &entries0).unwrap()); @@ -1652,8 +1713,61 @@ mod tests { tipsets: ParsedFilterTipsets::Range(0..=0), addresses: vec![], keys, + msg_cid: None, }; assert!(!filter3.matches(&addr0, &entries0).unwrap()); } + + #[test] + fn test_eth_filter_spec_msg_cid_filter_default_none() { + let spec = EthFilterSpec::default(); + assert!(spec.msg_cid_filter().is_none()); + } + + #[test] + fn test_parsed_filter_msg_cid_filter_returns_field() { + let pf_none = ParsedFilter::new_with_tipset(ParsedFilterTipsets::Range(0..=0)); + assert!(pf_none.msg_cid_filter().is_none()); + + let cid = Cid::from_str("bafy2bzaceaxm23epjsmh75yvzcecsrbavlmkcxnva66bkdebdcnyw3bjrc74u") + .unwrap(); + let pf_some = + ParsedFilter::new_with_tipset_and_msg(ParsedFilterTipsets::Range(0..=0), Some(cid)); + assert_eq!(pf_some.msg_cid_filter(), Some(&cid)); + } + + #[test] + fn test_ensure_filter_cap_disabled_when_max_zero() { + // max=0 means "no cap"; never errors regardless of state. + assert!(ensure_filter_cap(0, 0, 0).is_ok()); + assert!(ensure_filter_cap(0, 1, 1_000_000).is_ok()); + assert!(ensure_filter_cap(0, 5, 1_000_000).is_ok()); + } + + #[test] + fn test_ensure_filter_cap_single_tipset_bypasses() { + // tipsets_contributing <= 1: cap never fires regardless of total. + assert!(ensure_filter_cap(10, 0, 0).is_ok()); + assert!(ensure_filter_cap(10, 1, 5).is_ok()); + assert!(ensure_filter_cap(10, 1, 100).is_ok()); // exceeds max but only one tipset + } + + #[test] + fn test_ensure_filter_cap_multi_tipset_within_limit() { + // tipsets_contributing >= 2 and total <= max: ok. + assert!(ensure_filter_cap(10, 2, 5).is_ok()); + assert!(ensure_filter_cap(10, 2, 10).is_ok()); // boundary + assert!(ensure_filter_cap(10, 5, 10).is_ok()); + } + + #[test] + fn test_ensure_filter_cap_multi_tipset_exceeds_limit() { + // tipsets_contributing >= 2 and total > max: error. + let err = ensure_filter_cap(10, 2, 11).unwrap_err(); + assert!(err.to_string().contains("filter matches too many events")); + assert!(err.to_string().contains("maximum 10")); + + assert!(ensure_filter_cap(100, 3, 101).is_err()); + } } diff --git a/src/rpc/mod.rs b/src/rpc/mod.rs index 1d56d72c5310..55801c29d154 100644 --- a/src/rpc/mod.rs +++ b/src/rpc/mod.rs @@ -432,6 +432,7 @@ use crate::rpc::metrics_layer::MetricsLayer; use crate::{chain_sync::network_context::SyncNetworkContext, key_management::KeyStore}; use crate::blocks::FullTipset; +use crate::utils::misc::env::env_or_default; use fvm_ipld_blockstore::Blockstore; use jsonrpsee::{ Methods, @@ -468,7 +469,15 @@ static DEFAULT_MAX_CONNECTIONS: LazyLock = LazyLock::new(|| { }); const MAX_REQUEST_BODY_SIZE: u32 = 64 * 1024 * 1024; -const MAX_RESPONSE_BODY_SIZE: u32 = MAX_REQUEST_BODY_SIZE; + +/// Maximum JSON-RPC response body size in bytes. Defaults to 64 MiB. +/// +/// `eth_getTransactionReceipt` and `eth_getBlockReceipts` can return very +/// large responses for log-heavy transactions (a single tx emitting hundreds +/// of thousands of events can exceed 64 MiB). Operators serving such queries +/// can raise this with `FOREST_RPC_MAX_RESPONSE_BODY_SIZE` (in bytes). +static MAX_RESPONSE_BODY_SIZE: LazyLock = + LazyLock::new(|| env_or_default("FOREST_RPC_MAX_RESPONSE_BODY_SIZE", MAX_REQUEST_BODY_SIZE)); /// This is where you store persistent data, or at least access to stateful /// data. @@ -567,7 +576,7 @@ where ServerConfig::builder() // Default size (10 MiB) is not enough for methods like `Filecoin.StateMinerActiveSectors` .max_request_body_size(MAX_REQUEST_BODY_SIZE) - .max_response_body_size(MAX_RESPONSE_BODY_SIZE) + .max_response_body_size(*MAX_RESPONSE_BODY_SIZE) .max_connections(*DEFAULT_MAX_CONNECTIONS) .set_id_provider(RandomHexStringIdProvider::new()) .build(),