From e40776fb5f916478341f5aaf6e61e0f61645f6f8 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Fri, 6 Mar 2026 13:36:39 +0530 Subject: [PATCH 01/14] feat: surge actual real time proving --- .env.sample.migration | 74 ++ CATALYST_MIGRATION.md | 1146 +++++++++++++++++ Cargo.lock | 36 + Cargo.toml | 2 + FETCH_REAL_TIME_PROOF.md | 532 ++++++++ PROTOCOL_MIGRATION_REAL_TIME_FORK.md | 451 +++++++ realtime/Cargo.toml | 45 + realtime/src/chain_monitor/mod.rs | 12 + realtime/src/l1/abi/Multicall.json | 44 + realtime/src/l1/abi/RealTimeInbox.json | 1 + realtime/src/l1/bindings.rs | 36 + realtime/src/l1/config.rs | 27 + realtime/src/l1/execution_layer.rs | 350 +++++ realtime/src/l1/mod.rs | 5 + realtime/src/l1/proposal_tx_builder.rs | 231 ++++ realtime/src/l1/protocol_config.rs | 30 + realtime/src/l2/abi/Anchor.json | 1 + realtime/src/l2/bindings.rs | 10 + realtime/src/l2/execution_layer.rs | 369 ++++++ realtime/src/l2/mod.rs | 3 + realtime/src/l2/taiko.rs | 327 +++++ realtime/src/lib.rs | 159 +++ realtime/src/node/mod.rs | 493 +++++++ .../node/proposal_manager/async_submitter.rs | 252 ++++ .../node/proposal_manager/batch_builder.rs | 327 +++++ .../node/proposal_manager/bridge_handler.rs | 255 ++++ .../node/proposal_manager/l2_block_payload.rs | 12 + realtime/src/node/proposal_manager/mod.rs | 421 ++++++ .../src/node/proposal_manager/proposal.rs | 109 ++ realtime/src/raiko/mod.rs | 130 ++ realtime/src/shared_abi/Bridge.json | 738 +++++++++++ realtime/src/shared_abi/bindings.rs | 17 + realtime/src/shared_abi/mod.rs | 1 + realtime/src/utils/config.rs | 63 + realtime/src/utils/mod.rs | 1 + 35 files changed, 6710 insertions(+) create mode 100644 .env.sample.migration create mode 100644 CATALYST_MIGRATION.md create mode 100644 FETCH_REAL_TIME_PROOF.md create mode 100644 PROTOCOL_MIGRATION_REAL_TIME_FORK.md create mode 100644 realtime/Cargo.toml create mode 100644 realtime/src/chain_monitor/mod.rs create mode 100644 realtime/src/l1/abi/Multicall.json create mode 100644 realtime/src/l1/abi/RealTimeInbox.json create mode 100644 realtime/src/l1/bindings.rs create mode 100644 realtime/src/l1/config.rs create mode 100644 realtime/src/l1/execution_layer.rs create mode 100644 realtime/src/l1/mod.rs create mode 100644 realtime/src/l1/proposal_tx_builder.rs create mode 100644 realtime/src/l1/protocol_config.rs create mode 100644 realtime/src/l2/abi/Anchor.json create mode 100644 realtime/src/l2/bindings.rs create mode 100644 realtime/src/l2/execution_layer.rs create mode 100644 realtime/src/l2/mod.rs create mode 100644 realtime/src/l2/taiko.rs create mode 100644 realtime/src/lib.rs create mode 100644 realtime/src/node/mod.rs create mode 100644 realtime/src/node/proposal_manager/async_submitter.rs create mode 100644 realtime/src/node/proposal_manager/batch_builder.rs create mode 100644 realtime/src/node/proposal_manager/bridge_handler.rs create mode 100644 realtime/src/node/proposal_manager/l2_block_payload.rs create mode 100644 realtime/src/node/proposal_manager/mod.rs create mode 100644 realtime/src/node/proposal_manager/proposal.rs create mode 100644 realtime/src/raiko/mod.rs create mode 100644 realtime/src/shared_abi/Bridge.json create mode 100644 realtime/src/shared_abi/bindings.rs create mode 100644 realtime/src/shared_abi/mod.rs create mode 100644 realtime/src/utils/config.rs create mode 100644 realtime/src/utils/mod.rs diff --git a/.env.sample.migration b/.env.sample.migration new file mode 100644 index 00000000..ce900df8 --- /dev/null +++ b/.env.sample.migration @@ -0,0 +1,74 @@ +# ============================================================================= +# SHARED WITH SHASTA (existing env variables) +# ============================================================================= + +CATALYST_NODE_ECDSA_PRIVATE_KEY= +# PRECONFER_ADDRESS= +# WEB3SIGNER_L1_URL= +# WEB3SIGNER_L2_URL= + +L1_RPC_URLS= +L1_BEACON_URL= +BLOB_INDEXER_URL= + +TAIKO_GETH_RPC_URL=ws://127.0.0.1:1234 +TAIKO_GETH_AUTH_RPC_URL=http://127.0.0.1:1235 +TAIKO_DRIVER_URL=http://127.0.0.1:1236 +JWT_SECRET_FILE_PATH=/tmp/jwtsecret + +L1_SLOT_DURATION_SEC=12 +L1_SLOTS_PER_EPOCH=32 +PRECONF_HEARTBEAT_MS=2000 + +TAIKO_ANCHOR_ADDRESS=0x1670010000000000000000000000000000010001 +TAIKO_BRIDGE_L2_ADDRESS=0x0000000000000000000000000000000000000000 + +BLOBS_PER_BATCH=3 +MAX_BLOCKS_PER_BATCH=1 +MAX_TIME_SHIFT_BETWEEN_BLOCKS_SEC=255 +MAX_ANCHOR_HEIGHT_OFFSET_REDUCTION_VALUE=10 + +MAX_BYTES_PER_TX_LIST=126976 +MIN_BYTES_PER_TX_LIST=8192 +THROTTLING_FACTOR=2 +PRECONF_MIN_TXS=3 +PRECONF_MAX_SKIPPED_L2_SLOTS=2 + +MIN_PRIORITY_FEE_PER_GAS_WEI=1000000000 +TX_FEES_INCREASE_PERCENTAGE=0 +MAX_ATTEMPTS_TO_SEND_TX=4 +MAX_ATTEMPTS_TO_WAIT_TX=5 +DELAY_BETWEEN_TX_ATTEMPTS_SEC=63 +EXTRA_GAS_PERCENTAGE=100 + +RPC_L2_EXECUTION_LAYER_TIMEOUT_MS=1000 +RPC_DRIVER_PRECONF_TIMEOUT_MS=60000 +RPC_DRIVER_STATUS_TIMEOUT_MS=1000 + +FUNDS_MONITOR_INTERVAL_SEC=60 +THRESHOLD_ETH=500000000000000000 +THRESHOLD_TAIKO=0 + +DISABLE_BRIDGING=true +AMOUNT_TO_BRIDGE_FROM_L2_TO_L1=1000000000000000000 +BRIDGE_RELAYER_FEE=3047459064000000 +BRIDGE_TRANSACTION_FEE=1000000000000000 + +FORK_SWITCH_TRANSITION_PERIOD_SEC=60 +PACAYA_TIMESTAMP_SEC=0 +SHASTA_TIMESTAMP_SEC=99999999999 +WHITELIST_MONITOR_INTERVAL_SEC=60 + +# ============================================================================= +# NEW FOR REALTIME FORK +# ============================================================================= + +REALTIME_INBOX_ADDRESS= +PROPOSER_MULTICALL_ADDRESS= +L1_BRIDGE_ADDRESS= + +RAIKO_URL=http://localhost:8080 +RAIKO_API_KEY= +RAIKO_PROOF_TYPE=sgx +RAIKO_L2_NETWORK=taiko_mainnet +RAIKO_L1_NETWORK=ethereum diff --git a/CATALYST_MIGRATION.md b/CATALYST_MIGRATION.md new file mode 100644 index 00000000..4551daac --- /dev/null +++ b/CATALYST_MIGRATION.md @@ -0,0 +1,1146 @@ +# Catalyst Real-Time Fork Migration Plan + +> Step-by-step plan to migrate Catalyst from the asynchronous Shasta proving model to the +> single-phase **RealTimeInbox** (atomic propose+prove) model. + +--- + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Current Architecture (Shasta)](#2-current-architecture-shasta) +3. [Target Architecture (RealTime)](#3-target-architecture-realtime) +4. [Migration Strategy](#4-migration-strategy) +5. [Step 1 — Scaffold the `realtime` Crate](#step-1--scaffold-the-realtime-crate) +6. [Step 2 — Contract Bindings & ABIs](#step-2--contract-bindings--abis) +7. [Step 3 — Configuration & Environment](#step-3--configuration--environment) +8. [Step 4 — Protocol Config Adapter](#step-4--protocol-config-adapter) +9. [Step 5 — Proposal Struct Changes](#step-5--proposal-struct-changes) +10. [Step 6 — Raiko Proof Client](#step-6--raiko-proof-client) +11. [Step 7 — Proposal Transaction Builder](#step-7--proposal-transaction-builder) +12. [Step 8 — L1 Execution Layer](#step-8--l1-execution-layer) +13. [Step 9 — L2 Anchor Transaction](#step-9--l2-anchor-transaction) +14. [Step 10 — Node Main Loop](#step-10--node-main-loop) +15. [Step 11 — Batch Manager / Proposal Manager](#step-11--batch-manager--proposal-manager) +16. [Step 12 — Remove Dead Code](#step-12--remove-dead-code) +17. [Step 13 — Integration Testing](#step-13--integration-testing) +18. [Appendix A — File Mapping (Shasta → RealTime)](#appendix-a--file-mapping-shasta--realtime) +19. [Appendix B — Environment Variable Changes](#appendix-b--environment-variable-changes) +20. [Appendix C — Raiko API Quick Reference](#appendix-c--raiko-api-quick-reference) + +--- + +## 1. Executive Summary + +**Shasta** (current) uses a two-phase model: proposals are submitted to L1 and later proven +by an external prover. Catalyst preconfirms blocks, batches them, and submits via +`SurgeInbox.proposeWithProof()` where the "proof" is a signed checkpoint (161-byte signature). + +**RealTime** (target) collapses propose + prove into a single atomic transaction. +Before submitting to L1, the sequencer must: +1. Execute the L2 blocks locally. +2. Request a ZK proof from Raiko covering those blocks. +3. Submit the proposal + ZK proof to `RealTimeInbox.propose()` in one tx. + +### Key Differences + +| Aspect | Shasta | RealTime | +|---|---|---| +| L1 Contract | `Inbox` (SurgeInbox fork) | `RealTimeInbox` | +| Propose function | `proposeWithProof(data, input, proof, signalSlots)` | `propose(data, checkpoint, proof)` | +| Proof type | Signed checkpoint (161 bytes) | ZK proof from Raiko | +| Prove phase | Separate (external prover) | Embedded in propose tx | +| State tracking | Ring buffer, `CoreState`, proposal IDs | Single `lastProposalHash` | +| Bonds / forced inclusions | Yes | No | +| Batch size | Multiple proposals per proof | Exactly 1 proposal per proof | +| L2 Anchor | `anchorV4WithSignalSlots` | `anchorV4WithSignalSlots` (same) | +| Proposal identification | Sequential `id` | Hash-based (`proposalHash`) | + +--- + +## 2. Current Architecture (Shasta) + +### Data Flow + +``` +L2 Txs → preconfirm_block() → BatchBuilder → Proposal → ProposalTxBuilder + ↓ + build_propose_call() + ↓ + sign checkpoint (161-byte proof) + ↓ + Multicall { user_ops, propose, l1_calls } + ↓ + EIP-4844 blob tx → L1 +``` + +### Key Files + +| Component | Path | +|---|---| +| Entry point | `shasta/src/lib.rs` | +| Node loop | `shasta/src/node/mod.rs` | +| Proposal Manager | `shasta/src/node/proposal_manager/mod.rs` | +| Batch Builder | `shasta/src/node/proposal_manager/batch_builder.rs` | +| Proposal struct | `shasta/src/node/proposal_manager/proposal.rs` | +| TX Builder | `shasta/src/l1/proposal_tx_builder.rs` | +| L1 Execution Layer | `shasta/src/l1/execution_layer.rs` | +| L1 Bindings | `shasta/src/l1/bindings.rs` | +| L1 Config | `shasta/src/l1/config.rs` | +| Protocol Config | `shasta/src/l1/protocol_config.rs` | +| L2 Execution Layer | `shasta/src/l2/execution_layer.rs` | +| L2 Anchor Bindings | `shasta/src/l2/bindings.rs` | +| Forced Inclusion | `shasta/src/forced_inclusion/mod.rs` | +| Bridge Handler | `shasta/src/node/proposal_manager/bridge_handler.rs` | +| Utils / Config | `shasta/src/utils/config.rs` | + +### Current Proof Flow (Shasta) + +In `ProposalTxBuilder::build_proof_data()` (proposal_tx_builder.rs:148-162): +```rust +// 1. ABI-encode the checkpoint (blockNumber, blockHash, stateRoot) +let checkpoint_encoded = checkpoint.abi_encode(); // 96 bytes +// 2. Keccak hash and sign with hardcoded anvil key +let checkpoint_digest = keccak256(&checkpoint_encoded); +let signature = self.checkpoint_signer.sign_hash(&checkpoint_digest).await?; +// 3. Concatenate: [96-byte checkpoint || 65-byte signature] = 161 bytes +``` + +This is submitted as the `proof` parameter to `SurgeInbox.proposeWithProof()`. + +--- + +## 3. Target Architecture (RealTime) + +### Data Flow + +``` +L2 Txs → preconfirm_block() → ProposalManager → Proposal + ↓ + finalize proposal (checkpoint known) + ↓ + Request ZK proof from Raiko + (poll until ready) + ↓ + build_propose_call() + ↓ + Multicall { user_ops, propose, l1_calls } + ↓ + EIP-4844 blob tx → L1 +``` + +### RealTimeInbox Contract Interface + +```solidity +function propose( + bytes calldata _data, // abi.encode(ProposeInput) + ICheckpointStore.Checkpoint calldata _checkpoint, + bytes calldata _proof // ZK proof from Raiko +) external; + +function getLastProposalHash() external view returns (bytes32); +function getConfig() external view returns (Config memory); +// Config = { proofVerifier, signalService, basefeeSharingPctg } +``` + +### Proof Verification On-Chain + +``` +commitmentHash = keccak256(abi.encode( + proposalHash, // keccak256(abi.encode(Proposal)) + checkpoint.blockNumber, + checkpoint.blockHash, + checkpoint.stateRoot +)) + +verifyProof(0, commitmentHash, proof) +``` + +--- + +## 4. Migration Strategy + +The `realtime` crate will be a **separate crate** alongside `shasta`, sharing `common` and +workspace dependencies. Code will be copied from shasta and modified — not forked with feature +flags. + +**Rationale**: The protocol changes are deep enough (different contract, different proof model, +removed features) that a clean separation avoids conditional compilation complexity and makes +each crate self-contained. + +### What to Keep From Shasta +- Multicall batching logic (user ops + propose + l1 calls) +- Bridge handler RPC (port 4545) +- Blob encoding (manifest compression via `taiko_protocol::shasta`) +- L2 anchor construction (`anchorV4WithSignalSlots` — unchanged) +- Slot clock, heartbeat, operator management +- Signal slot handling +- Metrics, watchdog, cancellation + +### What to Remove +- Forced inclusion subsystem (`forced_inclusion/` module) +- Bond management (`getBond`, `deposit`, `withdraw`) +- Proposal ID tracking (sequential IDs → hash-based) +- `CoreState` queries (`getCoreState`, `getInboxState`, `nextProposalId`) +- Ring buffer queries (`getProposalHash`) +- Proving window / liveness checks +- `activationTimestamp` warmup (replace with `getLastProposalHash` check) +- Verifier / handover window logic (no batched proving needed) +- `proposerChecker` / whitelist checks (anyone can propose) + +### What to Add +- Raiko HTTP client for proof generation +- Polling loop for proof readiness +- Proposal hash computation (local, for `parentProposalHash` tracking) +- `maxAnchorBlockNumber` / `maxAnchorBlockHash` fields + +--- + +## Step 1 — Scaffold the `realtime` Crate + +### Directory Structure + +``` +realtime/ +├── Cargo.toml +├── src/ +│ ├── lib.rs # create_realtime_node() +│ ├── raiko/ +│ │ └── mod.rs # Raiko HTTP client +│ ├── l1/ +│ │ ├── mod.rs +│ │ ├── bindings.rs # RealTimeInbox + Multicall bindings +│ │ ├── config.rs # Contract addresses +│ │ ├── execution_layer.rs # L1 interaction (slimmed) +│ │ ├── proposal_tx_builder.rs # Build propose tx with ZK proof +│ │ ├── protocol_config.rs # 3-field config from RealTimeInbox +│ │ └── abi/ +│ │ ├── RealTimeInbox.json # From realtime/RealtimeInbox.json +│ │ └── Multicall.json # Copied from shasta +│ ├── l2/ +│ │ ├── mod.rs +│ │ ├── bindings.rs # Anchor bindings (new ABI) +│ │ ├── execution_layer.rs # Mostly unchanged +│ │ ├── extra_data.rs # Copied (or removed if no proposal_id) +│ │ └── abi/ +│ │ └── Anchor.json # From realtime/Anchor.json +│ ├── node/ +│ │ ├── mod.rs # Simplified main loop +│ │ └── proposal_manager/ +│ │ ├── mod.rs # Slimmed ProposalManager +│ │ ├── proposal.rs # New Proposal struct +│ │ ├── batch_builder.rs # Simplified builder +│ │ ├── l2_block_payload.rs # Copied +│ │ └── bridge_handler.rs # Copied +│ ├── shared_abi/ +│ │ ├── mod.rs +│ │ ├── bindings.rs +│ │ └── Bridge.json # Copied from shasta +│ ├── chain_monitor/ +│ │ └── mod.rs # Listen for ProposedAndProved events +│ ├── metrics/ +│ │ └── mod.rs +│ └── utils/ +│ └── config.rs # RealtimeConfig +``` + +### Cargo.toml + +```toml +[package] +name = "realtime" +version = "0.1.0" +edition = "2021" + +[dependencies] +# Same workspace deps as shasta, plus: +reqwest = { version = "0.12", features = ["json"] } # For Raiko HTTP client +serde = { version = "1", features = ["derive"] } +serde_json = "1" + +# Workspace dependencies (same as shasta) +alloy = { workspace = true } +alloy-json-rpc = { workspace = true } +alloy-rlp = { workspace = true } +anyhow = { workspace = true } +common = { workspace = true } +taiko_alethia_reth = { workspace = true } +taiko_bindings = { workspace = true } +taiko_protocol = { workspace = true } +taiko_rpc = { workspace = true } +pacaya = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +jsonrpsee = { workspace = true } +``` + +### Actions + +1. Create the directory structure above. +2. Copy `Cargo.toml` from shasta, rename package to `realtime`, add `reqwest`. +3. Register `realtime` in the workspace `Cargo.toml`. +4. Copy bridge handler, l2_block_payload, shared_abi verbatim — these are unchanged. + +--- + +## Step 2 — Contract Bindings & ABIs + +### 2.1 Move ABIs + +``` +realtime/RealtimeInbox.json → realtime/src/l1/abi/RealTimeInbox.json +realtime/Anchor.json → realtime/src/l2/abi/Anchor.json +shasta/src/l1/abi/Multicall.json → realtime/src/l1/abi/Multicall.json (copy) +``` + +### 2.2 L1 Bindings (`realtime/src/l1/bindings.rs`) + +```rust +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + RealTimeInbox, + "src/l1/abi/RealTimeInbox.json" +); + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + Multicall, + "src/l1/abi/Multicall.json" +); +``` + +**Key changes vs shasta bindings:** +- `SurgeInbox` → `RealTimeInbox` +- Generated types will include: + - `RealTimeInbox::proposeCall` (data, checkpoint, proof) + - `IRealTimeInbox::Config` { proofVerifier, signalService, basefeeSharingPctg } + - `IRealTimeInbox::ProposeInput` { blobReference, signalSlots, maxAnchorBlockNumber } + - `ProposedAndProved` event + +### 2.3 L2 Bindings (`realtime/src/l2/bindings.rs`) + +Copy from shasta but point to the new Anchor ABI: + +```rust +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + Anchor, + "src/l2/abi/Anchor.json" +); +``` + +The new Anchor ABI includes `anchorV4WithSignalSlots` (same as shasta) plus the new `anchorV5`. +For the initial migration, continue using `anchorV4WithSignalSlots`. + +### 2.4 Type Mapping + +| Shasta Type | RealTime Type | Notes | +|---|---|---| +| `IInbox::ProposeInput` { deadline, blobReference, numForcedInclusions } | `IRealTimeInbox::ProposeInput` { blobReference, signalSlots, maxAnchorBlockNumber } | No deadline, no forced inclusions; signal slots and max anchor block are first-class | +| `IInbox::Config` (17 fields) | `IRealTimeInbox::Config` (3 fields) | Only proofVerifier, signalService, basefeeSharingPctg | +| `IInbox::CoreState` | N/A (removed) | Replaced by `getLastProposalHash()` | +| `ICheckpointStore::Checkpoint` | `ICheckpointStore::Checkpoint` | Unchanged | + +--- + +## Step 3 — Configuration & Environment + +### 3.1 RealTime Config (`realtime/src/utils/config.rs`) + +```rust +use alloy::primitives::Address; + +pub struct RealtimeConfig { + pub realtime_inbox: Address, // REALTIME_INBOX_ADDRESS + pub proposer_multicall: Address, // PROPOSER_MULTICALL_ADDRESS (same) + pub bridge: Address, // L1_BRIDGE_ADDRESS (same) + pub raiko_url: String, // RAIKO_URL + pub raiko_api_key: Option, // RAIKO_API_KEY (optional) + pub proof_type: String, // RAIKO_PROOF_TYPE (e.g. "sgx", "sp1", "native") + pub raiko_network: String, // RAIKO_L2_NETWORK + pub raiko_l1_network: String, // RAIKO_L1_NETWORK +} + +impl RealtimeConfig { + pub fn read_env_variables() -> Result { + Ok(Self { + realtime_inbox: std::env::var("REALTIME_INBOX_ADDRESS")?.parse()?, + proposer_multicall: std::env::var("PROPOSER_MULTICALL_ADDRESS")?.parse()?, + bridge: std::env::var("L1_BRIDGE_ADDRESS")?.parse()?, + raiko_url: std::env::var("RAIKO_URL") + .unwrap_or_else(|_| "http://localhost:8080".to_string()), + raiko_api_key: std::env::var("RAIKO_API_KEY").ok(), + proof_type: std::env::var("RAIKO_PROOF_TYPE") + .unwrap_or_else(|_| "sgx".to_string()), + raiko_network: std::env::var("RAIKO_L2_NETWORK") + .unwrap_or_else(|_| "taiko_mainnet".to_string()), + raiko_l1_network: std::env::var("RAIKO_L1_NETWORK") + .unwrap_or_else(|_| "ethereum".to_string()), + }) + } +} +``` + +### 3.2 Contract Addresses (`realtime/src/l1/config.rs`) + +```rust +pub struct ContractAddresses { + pub realtime_inbox: Address, // Was: shasta_inbox + pub proposer_multicall: Address, // Same + pub bridge: Address, // Same + // REMOVED: proposer_checker (anyone can propose in RealTime) +} +``` + +--- + +## Step 4 — Protocol Config Adapter + +### `realtime/src/l1/protocol_config.rs` + +```rust +// RealTimeInbox.getConfig() returns only 3 fields +use crate::l1::bindings::IRealTimeInbox::Config; + +#[derive(Clone, Default)] +pub struct ProtocolConfig { + pub basefee_sharing_pctg: u8, + pub proof_verifier: Address, + pub signal_service: Address, +} + +impl From<&Config> for ProtocolConfig { + fn from(config: &Config) -> Self { + Self { + basefee_sharing_pctg: config.basefeeSharingPctg, + proof_verifier: config.proofVerifier, + signal_service: config.signalService, + } + } +} +``` + +**Removed**: `max_anchor_offset` is no longer read from contract config. Use a constant +or derive from the `blockhash()` 256-block limit. + +--- + +## Step 5 — Proposal Struct Changes + +### `realtime/src/node/proposal_manager/proposal.rs` + +```rust +use alloy::primitives::{Address, B256, FixedBytes}; + +#[derive(Default, Clone)] +pub struct Proposal { + // REMOVED: pub id: u64 — no sequential IDs + pub l2_blocks: Vec, + pub total_bytes: u64, + pub coinbase: Address, + + // CHANGED: anchor → maxAnchor + pub max_anchor_block_number: u64, // Was: anchor_block_id + pub max_anchor_block_hash: B256, // Was: anchor_block_hash (now read from blockhash()) + // REMOVED: anchor_block_timestamp_sec — not needed + // REMOVED: anchor_state_root — not in RealTime proposal + + // REMOVED: num_forced_inclusion — no forced inclusions + + // Proof fields + pub checkpoint: Checkpoint, // Same as shasta + pub parent_proposal_hash: B256, // NEW: hash chain tracking + + // Surge POC fields (carried over) + pub user_ops: Vec, + pub signal_slots: Vec>, + pub l1_calls: Vec, + + // NEW: ZK proof (populated after Raiko call) + pub zk_proof: Option>, +} +``` + +### Proposal Hash Computation + +The proposal hash must be computed locally to track `parentProposalHash`: + +```rust +impl Proposal { + /// Compute the proposalHash as the on-chain contract does: + /// keccak256(abi.encode( + /// parentProposalHash, + /// maxAnchorBlockNumber, // padded to 32 bytes + /// maxAnchorBlockHash, + /// basefeeSharingPctg, // padded to 32 bytes + /// sources[], // dynamic array + /// signalSlotsHash + /// )) + pub fn compute_proposal_hash(&self, basefee_sharing_pctg: u8) -> B256 { + use alloy::sol_types::SolValue; + + let signal_slots_hash = if self.signal_slots.is_empty() { + B256::ZERO + } else { + alloy::primitives::keccak256(self.signal_slots.abi_encode()) + }; + + // Build the sources array (DerivationSource[]) + // ... (from blob sidecar data) + + let encoded = ( + self.parent_proposal_hash, + alloy::primitives::U256::from(self.max_anchor_block_number), + self.max_anchor_block_hash, + alloy::primitives::U256::from(basefee_sharing_pctg), + // sources encoding... + signal_slots_hash, + ).abi_encode(); + + alloy::primitives::keccak256(encoded) + } +} +``` + +--- + +## Step 6 — Raiko Proof Client + +This is the **biggest new component**. It does not exist in shasta. + +### `realtime/src/raiko/mod.rs` + +```rust +use anyhow::Error; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use tracing::{debug, info, warn}; + +#[derive(Clone)] +pub struct RaikoClient { + client: Client, + base_url: String, + api_key: Option, + proof_type: String, + l2_network: String, + l1_network: String, + prover_address: String, + poll_interval: Duration, + max_retries: u32, +} + +#[derive(Serialize)] +pub struct RaikoProofRequest { + pub l2_block_numbers: Vec, + pub proof_type: String, + pub max_anchor_block_number: u64, + pub parent_proposal_hash: String, // "0x..." + pub basefee_sharing_pctg: u8, + #[serde(skip_serializing_if = "Option::is_none")] + pub network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub prover: Option, + pub signal_slots: Vec, // "0x..." hex strings + pub sources: Vec, // DerivationSource[] + pub checkpoint: Option, + pub blob_proof_type: String, +} + +#[derive(Serialize, Deserialize)] +pub struct RaikoCheckpoint { + pub block_number: u64, + pub block_hash: String, + pub state_root: String, +} + +#[derive(Deserialize)] +pub struct RaikoResponse { + pub status: String, // "ok" or "error" + #[serde(default)] + pub proof_type: Option, + #[serde(default)] + pub data: Option, + #[serde(default)] + pub error: Option, + #[serde(default)] + pub message: Option, +} + +#[derive(Deserialize)] +#[serde(untagged)] +pub enum RaikoData { + Proof { proof: String }, + Status { status: String }, +} + +impl RaikoClient { + pub fn new(config: &RealtimeConfig, prover_address: String) -> Self { /* ... */ } + + /// Request a proof and poll until ready. + /// Returns the raw proof bytes. + pub async fn get_proof(&self, request: RaikoProofRequest) -> Result, Error> { + let url = format!("{}/v3/proof/batch/realtime", self.base_url); + + for attempt in 0..self.max_retries { + let mut req = self.client.post(&url) + .json(&request); + + if let Some(ref key) = self.api_key { + req = req.header("X-API-KEY", key); + } + + let resp = req.send().await?; + let body: RaikoResponse = resp.json().await?; + + if body.status == "error" { + return Err(anyhow::anyhow!( + "Raiko proof failed: {}", + body.message.unwrap_or_default() + )); + } + + match body.data { + Some(RaikoData::Proof { proof }) => { + info!("ZK proof received (attempt {})", attempt + 1); + // Decode hex proof to bytes + let proof_bytes = hex::decode(proof.trim_start_matches("0x"))?; + return Ok(proof_bytes); + } + Some(RaikoData::Status { ref status }) if status == "ZKAnyNotDrawn" => { + warn!("Raiko: ZK prover not drawn for this request"); + return Err(anyhow::anyhow!("ZK prover not drawn")); + } + Some(RaikoData::Status { ref status }) => { + debug!("Raiko status: {}, polling... (attempt {})", status, attempt + 1); + tokio::time::sleep(self.poll_interval).await; + } + None => { + return Err(anyhow::anyhow!("Raiko: unexpected empty response")); + } + } + } + + Err(anyhow::anyhow!("Raiko: proof not ready after {} attempts", self.max_retries)) + } +} +``` + +### Integration Point + +The Raiko client is called **after** a batch is finalized (all L2 blocks executed, checkpoint +known) and **before** the L1 transaction is built. This is the critical new step in the pipeline. + +--- + +## Step 7 — Proposal Transaction Builder + +### `realtime/src/l1/proposal_tx_builder.rs` + +Major changes from shasta's `ProposalTxBuilder`: + +1. **Remove `checkpoint_signer`** — no more signed checkpoint proofs. +2. **Remove `build_proof_data()`** — replaced by Raiko ZK proof. +3. **Change `build_propose_call()`** to use `RealTimeInbox.propose()` with 3 params. + +```rust +pub struct ProposalTxBuilder { + provider: DynProvider, + extra_gas_percentage: u64, + raiko_client: RaikoClient, + // REMOVED: checkpoint_signer +} + +impl ProposalTxBuilder { + async fn build_propose_call( + &self, + batch: &Proposal, + inbox_address: Address, + ) -> Result<(Multicall::Call, BlobTransactionSidecar), Error> { + // 1. Build blob sidecar (same as shasta) + let (sidecar, _manifest_data) = self.build_blob_sidecar(batch)?; + + // 2. Build ProposeInput (NEW structure) + // RealTimeInbox ProposeInput = { blobReference, signalSlots, maxAnchorBlockNumber } + let input = IRealTimeInbox::ProposeInput { + blobReference: BlobReference { + blobStartIndex: 0, + numBlobs: sidecar.blobs.len().try_into()?, + offset: U24::ZERO, + }, + signalSlots: batch.signal_slots.clone(), + maxAnchorBlockNumber: U48::from(batch.max_anchor_block_number), + }; + + // 3. Encode the input + let inbox = RealTimeInbox::new(inbox_address, self.provider.clone()); + let encoded_input = inbox.encodeProposeInput(input).call().await?; + + // 4. Use the ZK proof from Raiko (already obtained) + let proof = Bytes::from( + batch.zk_proof.as_ref() + .ok_or_else(|| anyhow::anyhow!("ZK proof not set on proposal"))? + .clone() + ); + + // 5. Build the propose call with 3 parameters: + // propose(bytes _data, Checkpoint _checkpoint, bytes _proof) + let call = inbox.propose( + encoded_input, // _data = abi.encode(ProposeInput) + batch.checkpoint.clone(), + proof, + ); + + Ok(( + Multicall::Call { + target: inbox_address, + value: U256::ZERO, + data: call.calldata().clone(), + }, + sidecar, + )) + } +} +``` + +### Multicall Composition (unchanged pattern) + +The multicall still follows the same pattern: +1. User ops (optional) +2. Propose call (with ZK proof instead of signed checkpoint) +3. L1 calls (optional) + +--- + +## Step 8 — L1 Execution Layer + +### `realtime/src/l1/execution_layer.rs` + +Key changes from shasta: + +```rust +pub struct ExecutionLayer { + common: ExecutionLayerCommon, + provider: DynProvider, + preconfer_address: Address, + pub transaction_monitor: TransactionMonitor, + contract_addresses: ContractAddresses, + // CHANGED: InboxInstance → RealTimeInboxInstance + realtime_inbox: RealTimeInbox::RealTimeInboxInstance, + // REMOVED: checkpoint_signer (no more signed proofs) +} +``` + +### Removed Methods + +- `get_inbox_state()` → removed (no CoreState) +- `get_inbox_next_proposal_id()` → removed (no sequential IDs) +- `get_activation_timestamp()` → removed (RealTimeInbox uses `activate()` differently) +- `get_forced_inclusion_*()` → removed (no forced inclusions) +- `get_preconfer_total_bonds()` → removed (no bonds) +- `is_operator_whitelisted()` → removed (anyone can propose) + +### New Methods + +```rust +impl ExecutionLayer { + /// Get the last proposal hash from RealTimeInbox + pub async fn get_last_proposal_hash(&self) -> Result { + let hash = self.realtime_inbox + .getLastProposalHash() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getLastProposalHash: {e}"))?; + Ok(hash) + } + + /// Fetch the 3-field config from RealTimeInbox + pub async fn fetch_protocol_config(&self) -> Result { + let config = self.realtime_inbox + .getConfig() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getConfig: {e}"))?; + Ok(ProtocolConfig::from(&config.config_)) + } +} +``` + +### Warmup Changes + +Replace the `activationTimestamp` wait loop with `getLastProposalHash` check: + +```rust +async fn warmup(&mut self) -> Result<(), Error> { + // Wait for RealTimeInbox activation (lastProposalHash != 0) + loop { + let hash = self.ethereum_l1.execution_layer + .get_last_proposal_hash().await?; + if hash != B256::ZERO { + info!("RealTimeInbox is active, lastProposalHash: {}", hash); + break; + } + warn!("RealTimeInbox not yet activated. Waiting..."); + sleep(Duration::from_secs(12)).await; + } + Ok(()) +} +``` + +--- + +## Step 9 — L2 Anchor Transaction + +### No Changes Required + +The L2 execution layer already uses `anchorV4WithSignalSlots`, which is the correct anchor +function for the RealTime fork. The Anchor contract ABI from `realtime/Anchor.json` includes +this function. + +The only change is the ABI file path in the bindings — point to the new `Anchor.json`. + +### Anchor Call (unchanged logic) + +```rust +// realtime/src/l2/execution_layer.rs +// Same as shasta/src/l2/execution_layer.rs:105-107 +let call_builder = self + .shasta_anchor + .anchorV4WithSignalSlots(anchor_block_params.0, anchor_block_params.1); +``` + +### Note on `anchorV5` + +The new Anchor ABI includes `anchorV5` with `ProposalParams` and `BlockParams`. This is for +future use. The initial migration should continue using `anchorV4WithSignalSlots`. + +--- + +## Step 10 — Node Main Loop + +### `realtime/src/node/mod.rs` + +The main loop is **simplified** because: +- No verifier (no separate proving window to monitor) +- No forced inclusion handling +- No proposer checker / whitelist validation +- No bond management +- Proof is obtained before submission (synchronous from node's perspective) + +### Simplified Loop + +```rust +async fn main_block_preconfirmation_step(&mut self) -> Result<(), Error> { + let (l2_slot_info, current_status, pending_tx_list) = + self.get_slot_info_and_status().await?; + + let transaction_in_progress = self.ethereum_l1.execution_layer + .is_transaction_in_progress().await?; + + // Preconfirmation phase + if current_status.is_preconfer() && current_status.is_driver_synced() { + // Head verification (same as shasta) + if !self.head_verifier.verify(...).await { /* ... */ } + + let l2_slot_context = L2SlotContext { /* ... */ }; + + if self.proposal_manager.should_new_block_be_created(&pending_tx_list, &l2_slot_context) { + if has_pending_txs || has_pending_user_ops { + let preconfed_block = self.proposal_manager + .preconfirm_block(pending_tx_list, &l2_slot_context).await?; + self.verify_preconfed_block(preconfed_block).await?; + } + } + } + + // Submission phase — NOW includes proof fetching + if current_status.is_submitter() && !transaction_in_progress { + // No verifier check needed — just submit if we have finalized batches + if self.proposal_manager.has_batches_ready_to_submit() { + self.proposal_manager.try_submit_oldest_batch().await?; + } + } + + // Cleanup (simplified — no verifier to clear) + if !current_status.is_submitter() && !current_status.is_preconfer() { + if self.proposal_manager.has_batches() { + self.proposal_manager.reset_builder().await?; + } + } + + Ok(()) +} +``` + +### Removed from Loop +- `check_for_missing_proposed_batches()` — no proposal IDs to compare +- `has_verified_unproposed_batches()` — no external verifier +- `check_and_handle_anchor_offset_for_unsafe_l2_blocks()` — simplified (use 256-block limit) +- `get_next_proposal_id()` — no sequential IDs +- Forced inclusion checks + +--- + +## Step 11 — Batch Manager / Proposal Manager + +### Key Change: Proof Fetching Before Submission + +The batch submission flow now has an additional step between finalization and L1 submission: + +``` +finalize_current_batch() + ↓ +fetch_proof_from_raiko() ← NEW + ↓ +send_batch_to_l1() +``` + +### `try_submit_oldest_batch()` (modified) + +```rust +pub async fn try_submit_oldest_batch(&mut self) -> Result<(), Error> { + if let Some(batch) = self.proposals_to_send.front_mut() { + // Step 1: If proof not yet obtained, fetch from Raiko + if batch.zk_proof.is_none() { + let l2_block_numbers: Vec = batch.l2_blocks.iter() + .map(|b| /* get block number from checkpoint or sequential */) + .collect(); + + let request = RaikoProofRequest { + l2_block_numbers, + proof_type: self.raiko_client.proof_type.clone(), + max_anchor_block_number: batch.max_anchor_block_number, + parent_proposal_hash: format!("0x{}", hex::encode(batch.parent_proposal_hash)), + basefee_sharing_pctg: self.protocol_config.basefee_sharing_pctg, + signal_slots: batch.signal_slots.iter() + .map(|s| format!("0x{}", hex::encode(s))) + .collect(), + sources: vec![], // Build from blob data + checkpoint: Some(RaikoCheckpoint { + block_number: batch.checkpoint.blockNumber.to::(), + block_hash: format!("0x{}", hex::encode(batch.checkpoint.blockHash)), + state_root: format!("0x{}", hex::encode(batch.checkpoint.stateRoot)), + }), + // ... other fields + }; + + let proof = self.raiko_client.get_proof(request).await?; + batch.zk_proof = Some(proof); + } + + // Step 2: Submit to L1 (same as shasta, but with ZK proof) + self.ethereum_l1.execution_layer + .send_batch_to_l1(batch.clone(), None, None) + .await?; + + self.proposals_to_send.pop_front(); + } + Ok(()) +} +``` + +### Proposal Hash Tracking + +Since RealTimeInbox uses `lastProposalHash` instead of sequential IDs, the manager must: + +1. **On startup**: Read `getLastProposalHash()` from L1 to initialize `parent_proposal_hash`. +2. **After each submission**: Compute and store the new proposal hash locally. +3. **Use `parent_proposal_hash`** when creating each new proposal. + +```rust +pub struct ProposalManager { + // ... + parent_proposal_hash: B256, // Tracks the chain head +} + +impl ProposalManager { + async fn create_new_batch(&mut self) -> Result<(), Error> { + // Read current L1 block for maxAnchorBlockNumber + let l1_block = self.ethereum_l1.execution_layer.common() + .get_latest_block_number().await?; + + // Ensure it's within 256 blocks (blockhash() limit) + let max_anchor = l1_block.saturating_sub(self.l1_height_lag); + + let max_anchor_hash = self.ethereum_l1.execution_layer.common() + .get_block_hash_by_number(max_anchor).await?; + + self.batch_builder.create_new_batch( + max_anchor, + max_anchor_hash, + self.parent_proposal_hash, + ); + + Ok(()) + } +} +``` + +--- + +## Step 12 — Remove Dead Code + +Files/modules from shasta that should NOT be copied to realtime: + +| Module | Reason | +|---|---| +| `forced_inclusion/mod.rs` | No forced inclusions in RealTime | +| `node/verifier.rs` | No separate proving window / handover verification | +| `node/l2_height_from_l1.rs` | Based on proposal ID lookups (replaced by hash tracking) | +| `l2/extra_data.rs` | Encodes `proposal_id` into block extra data (no IDs in RealTime) | + +Dependencies to remove from trait implementations: + +| Trait | Reason | +|---|---| +| `PreconferBondProvider` | No bonds | +| `WhitelistProvider` | No whitelist | + +--- + +## Step 13 — Integration Testing + +### 13.1 Unit Tests + +1. **Proposal hash computation** — verify local hash matches contract's `hashProposal()`. +2. **Commitment hash computation** — verify local commitment hash matches contract's `hashCommitment()`. +3. **Signal slots hash** — verify `bytes32(0)` for empty, `keccak256(abi.encode(slots))` for non-empty. +4. **ProposeInput encoding** — verify `encodeProposeInput()` output matches expectations. + +### 13.2 Integration Tests + +1. **Raiko client** — mock server returning Registered → WorkInProgress → proof. +2. **Full pipeline** — local anvil + mock Raiko: + - Preconfirm block → finalize → fetch proof → submit to RealTimeInbox. +3. **Multicall composition** — user op + propose + l1 call in one tx. +4. **Chain recovery** — restart node, read `getLastProposalHash()`, resume. + +### 13.3 E2E Test Script + +```bash +# 1. Deploy RealTimeInbox on local anvil +# 2. Activate with genesis hash +# 3. Start Raiko mock (return native proof) +# 4. Start realtime node +# 5. Send L2 transactions +# 6. Verify ProposedAndProved event emitted +# 7. Verify lastProposalHash updated +``` + +--- + +## Appendix A — File Mapping (Shasta → RealTime) + +| Shasta File | RealTime File | Action | +|---|---|---| +| `lib.rs` | `lib.rs` | Rewrite (simplified init) | +| `node/mod.rs` | `node/mod.rs` | Rewrite (simplified loop) | +| `node/verifier.rs` | — | Delete | +| `node/l2_height_from_l1.rs` | — | Delete | +| `node/proposal_manager/mod.rs` | `node/proposal_manager/mod.rs` | Heavy edit (add Raiko, remove FI) | +| `node/proposal_manager/proposal.rs` | `node/proposal_manager/proposal.rs` | Rewrite (new fields) | +| `node/proposal_manager/batch_builder.rs` | `node/proposal_manager/batch_builder.rs` | Edit (remove FI, ID tracking) | +| `node/proposal_manager/bridge_handler.rs` | `node/proposal_manager/bridge_handler.rs` | Copy verbatim | +| `node/proposal_manager/l2_block_payload.rs` | `node/proposal_manager/l2_block_payload.rs` | Copy (remove proposal_id if needed) | +| `l1/bindings.rs` | `l1/bindings.rs` | Rewrite (RealTimeInbox) | +| `l1/config.rs` | `l1/config.rs` | Edit (remove proposer_checker) | +| `l1/execution_layer.rs` | `l1/execution_layer.rs` | Heavy edit (new methods, remove old) | +| `l1/proposal_tx_builder.rs` | `l1/proposal_tx_builder.rs` | Rewrite (ZK proof, new propose call) | +| `l1/protocol_config.rs` | `l1/protocol_config.rs` | Rewrite (3-field config) | +| `l1/abi/SurgeInbox.json` | `l1/abi/RealTimeInbox.json` | Replace | +| `l1/abi/Multicall.json` | `l1/abi/Multicall.json` | Copy | +| `l2/execution_layer.rs` | `l2/execution_layer.rs` | Copy (minor path changes) | +| `l2/bindings.rs` | `l2/bindings.rs` | Copy (new Anchor ABI path) | +| `l2/extra_data.rs` | — | Delete (or adapt if extra_data still needed) | +| `l2/abi/Anchor.json` | `l2/abi/Anchor.json` | Replace with new ABI | +| `forced_inclusion/mod.rs` | — | Delete | +| `chain_monitor/mod.rs` | `chain_monitor/mod.rs` | Edit (listen for ProposedAndProved) | +| `shared_abi/*` | `shared_abi/*` | Copy verbatim | +| `utils/config.rs` | `utils/config.rs` | Rewrite (new env vars) | +| — | `raiko/mod.rs` | **New** | + +--- + +## Appendix B — Environment Variable Changes + +| Variable | Shasta | RealTime | Notes | +|---|---|---|---| +| `SHASTA_INBOX_ADDRESS` | Required | — | Removed | +| `REALTIME_INBOX_ADDRESS` | — | Required | **New** | +| `PROPOSER_MULTICALL_ADDRESS` | Required | Required | Same | +| `L1_BRIDGE_ADDRESS` | Required | Required | Same | +| `RAIKO_URL` | — | Required | **New** — e.g. `http://localhost:8080` | +| `RAIKO_API_KEY` | — | Optional | **New** — for authenticated Raiko | +| `RAIKO_PROOF_TYPE` | — | Optional | **New** — default `sgx` | +| `RAIKO_L2_NETWORK` | — | Optional | **New** — default `taiko_mainnet` | +| `RAIKO_L1_NETWORK` | — | Optional | **New** — default `ethereum` | + +--- + +## Appendix C — Raiko API Quick Reference + +### Endpoint + +``` +POST {RAIKO_URL}/v3/proof/batch/realtime +``` + +### Request (minimum required fields) + +```json +{ + "l2_block_numbers": [100, 101, 102], + "proof_type": "sgx", + "max_anchor_block_number": 19500000, + "parent_proposal_hash": "0x00...00", + "basefee_sharing_pctg": 0 +} +``` + +### Response States + +| Response | Meaning | Action | +|---|---|---| +| `data.proof` present | Proof ready | Use it | +| `data.status: "Registered"` | Queued | Poll (same request) | +| `data.status: "WorkInProgress"` | Generating | Poll (same request) | +| `data.status: "ZKAnyNotDrawn"` | Not selected | Don't retry | +| `status: "error"` | Failed | Check `message` | + +### Polling Model + +Re-submit the **identical** request body. The server deduplicates by request key. +Recommended interval: 5-30 seconds. + +See [FETCH_REAL_TIME_PROOF.md](FETCH_REAL_TIME_PROOF.md) for the full API specification. + +--- + +## Summary Checklist + +- [ ] Scaffold `realtime/` crate with Cargo.toml +- [ ] Copy and place ABIs (RealTimeInbox, Anchor, Multicall, Bridge) +- [ ] Create L1/L2 bindings with new ABIs +- [ ] Implement `RealtimeConfig` with new env vars +- [ ] Implement `ProtocolConfig` (3-field) +- [ ] Rewrite `Proposal` struct (hash-based, max anchor, no ID) +- [ ] Implement `RaikoClient` with polling +- [ ] Rewrite `ProposalTxBuilder` (ZK proof, new propose signature) +- [ ] Rewrite `ExecutionLayer` (getLastProposalHash, remove bonds/FI) +- [ ] Copy L2 execution layer (anchor unchanged) +- [ ] Simplify node main loop (remove verifier, FI handling) +- [ ] Modify `ProposalManager` (add Raiko call before submission) +- [ ] Implement proposal hash tracking (parent chain) +- [ ] Remove dead code (forced inclusion, verifier, bonds) +- [ ] Update chain monitor for `ProposedAndProved` events +- [ ] Write unit tests for hash computation +- [ ] Write integration tests with mock Raiko +- [ ] E2E test with local anvil diff --git a/Cargo.lock b/Cargo.lock index 4cb8f04b..c258e12c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7190,6 +7190,42 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "realtime" +version = "1.26.12" +dependencies = [ + "alethia-reth-consensus 0.6.0 (git+https://github.com/taikoxyz/alethia-reth.git?rev=3e0fdad9b9fbf7f6550c7209185fd6623468452c)", + "alloy", + "alloy-json-rpc", + "alloy-rlp", + "anyhow", + "async-trait", + "bindings", + "chrono", + "common", + "dotenvy", + "flate2", + "futures-util", + "hex", + "http 1.4.0", + "jsonrpsee", + "jsonwebtoken", + "mockito", + "pacaya", + "prometheus", + "protocol", + "reqwest", + "rpc", + "serde", + "serde_json", + "sled", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber 0.3.22", + "warp", +] + [[package]] name = "recvmsg" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index a4185927..82c1df68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "node", "pacaya", "permissionless", + "realtime", "shasta", "tools/p2p_node/p2p_boot_node", "urc", @@ -78,6 +79,7 @@ reqwest = { version = "0.12", default-features = true } serde = { version = "1.0", default-features = false, features = ["derive"] } sled = { version = "0.34", default-features = false } serde_json = { version = "1.0", default-features = false } +realtime = { path = "realtime" } shasta = { path = "shasta" } strum = { version = "0.27", features = ["derive"] } diff --git a/FETCH_REAL_TIME_PROOF.md b/FETCH_REAL_TIME_PROOF.md new file mode 100644 index 00000000..9f28c10b --- /dev/null +++ b/FETCH_REAL_TIME_PROOF.md @@ -0,0 +1,532 @@ +# Fetching a RealTime Proof from Raiko + +This document describes the complete mechanism for an external client to request and retrieve a **RealTime fork** proof from a Raiko prover server. + +--- + +## Table of Contents + +1. [Overview](#overview) +2. [Server Base URL & API Version](#server-base-url--api-version) +3. [Authentication](#authentication) +4. [Endpoint](#endpoint) +5. [Request Schema](#request-schema) +6. [Response Schema](#response-schema) +7. [Proof Lifecycle & Polling](#proof-lifecycle--polling) +8. [Task Reporting](#task-reporting) +9. [Error Handling](#error-handling) +10. [End-to-End Example](#end-to-end-example) + +--- + +## Overview + +The RealTime fork implements **atomic propose+prove**: one proposal per proof per transaction, with no aggregation. The client sends a single POST request containing the L2 block numbers, anchor info, signal slots, derivation sources, and prover configuration. The server performs a two-stage pipeline internally: + +1. **Guest input generation** — constructs the provable witness from L1/L2 state. +2. **Proof generation** — runs the selected prover backend (SGX, SP1, RISC0, etc.) on the witness. + +The response is **synchronous within a long-poll model**: the server returns immediately with either the completed proof or a status indicating work is in progress. If the proof is not ready, the client re-submits the same request to poll. + +--- + +## Server Base URL & API Version + +The RealTime endpoint lives under the **V3 API**. The V3 routes are also merged at the root, so both paths work: + +``` +POST {BASE_URL}/v3/proof/batch/realtime +POST {BASE_URL}/proof/batch/realtime +``` + +Where `{BASE_URL}` is the Raiko server address (e.g. `http://localhost:8080`). + +--- + +## Authentication + +Authentication is **optional** and depends on server configuration. + +### API Key (primary mechanism) + +If the server is started with `RAIKO_KEYS` set, all requests require an `X-API-KEY` header. + +``` +X-API-KEY: raiko_ +``` + +Keys are configured server-side via the `RAIKO_KEYS` environment variable as a JSON map of `{ "name": "key_value" }` pairs: + +```bash +RAIKO_KEYS='{"my-client":"raiko_abc123..."}' +``` + +**Rate limiting**: Default 600 requests/minute per key, configurable via `RAIKO_RATE_LIMIT`. + +**HTTP error codes for auth failures**: +| Code | Meaning | +|------|---------| +| 401 | Missing, invalid, or inactive API key | +| 429 | Rate limit exceeded | + +### JWT Bearer Token (fallback) + +If no API key store is configured but a JWT secret is set, the server falls back to `Authorization: Bearer ` validation. + +### No Auth + +If neither is configured, requests are accepted anonymously. + +--- + +## Endpoint + +``` +POST /v3/proof/batch/realtime +Content-Type: application/json +X-API-KEY: (if auth enabled) +``` + +--- + +## Request Schema + +```jsonc +{ + // === Required fields === + + // L2 block numbers covered by this proposal. + // Must be non-empty. + "l2_block_numbers": [100, 101, 102], + + // Proof backend. One of: "native", "sp1", "risc0", "sgx", "sgxgeth", "tdx", "azure_tdx" + // Special values: "zk_any" (server picks ZK prover), "sgx_any" (server picks SGX variant) + "proof_type": "sgx", + + // Highest L1 block number that the L2 derivation references. + "max_anchor_block_number": 19500000, + + // Hash of the parent proposal, obtained from the on-chain getLastProposalHash(). + // Use 0x0...0 (32-byte zero) for the first proposal after genesis. + "parent_proposal_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + + // Percentage of basefee paid to the coinbase address (0-100). + "basefee_sharing_pctg": 0, + + // === Optional fields (server defaults may apply) === + + // L2 network identifier. Must match a chain spec configured on the server. + "network": "taiko_mainnet", + + // L1 network identifier. + "l1_network": "ethereum", + + // EVM address of the prover (checksummed or lowercase hex). + "prover": "0xYourProverAddress", + + // Blob proof type. Defaults to "proof_of_equivalence". + "blob_proof_type": "proof_of_equivalence", + + // L1 signal slots to relay. Array of 32-byte hashes. Defaults to []. + // When non-empty, the on-chain signalSlotsHash = keccak256(abi.encode(signal_slots)). + // When empty, signalSlotsHash = bytes32(0). + "signal_slots": [], + + // Derivation sources for blob data. Defaults to []. + // Each source maps to the on-chain DerivationSource struct. + "sources": [ + { + "isForcedInclusion": false, + "blobSlice": { + "blobHashes": ["0x<32-byte-hash>"], + "offset": 0, + "timestamp": 1700000000 + } + } + ], + + // Previous finalized checkpoint. Null/omitted if none exists yet. + "checkpoint": { + "block_number": 99, + "block_hash": "0x<32-byte-hash>", + "state_root": "0x<32-byte-hash>" + }, + + // Prover-specific options. Keyed by prover backend name. + // Only the key matching your proof_type is used. + "sgx": null, + "sp1": null, + "risc0": null, + "native": null, + "sgxgeth": null, + "tdx": null, + "azure_tdx": null +} +``` + +### Field Details + +| Field | Type | Required | Description | +|---|---|---|---| +| `l2_block_numbers` | `u64[]` | Yes | L2 block numbers in this proposal. Must be non-empty. | +| `proof_type` | `string` | Yes | Prover backend: `native`, `sp1`, `risc0`, `sgx`, `sgxgeth`, `tdx`, `azure_tdx`, `zk_any`, `sgx_any` | +| `max_anchor_block_number` | `u64` | Yes | Highest L1 block the L2 derivation references | +| `parent_proposal_hash` | `bytes32` | Yes | Hash of the parent proposal from `getLastProposalHash()` | +| `basefee_sharing_pctg` | `u8` | Yes | Basefee sharing percentage (0-100) | +| `network` | `string` | No* | L2 network name (server default used if omitted) | +| `l1_network` | `string` | No* | L1 network name (server default used if omitted) | +| `prover` | `string` | No* | Prover EVM address (server default used if omitted) | +| `blob_proof_type` | `string` | No | Defaults to `"proof_of_equivalence"` | +| `signal_slots` | `bytes32[]` | No | L1 signal slots to relay. Defaults to `[]` | +| `sources` | `DerivationSource[]` | No | Blob derivation sources. Defaults to `[]` | +| `checkpoint` | `object \| null` | No | Previous finalized L2 checkpoint | + +\* These fields are required for proof generation but can be omitted if the server has defaults configured via its config file or command-line options. + +### `zk_any` and `sgx_any` Proof Types + +When you set `proof_type` to `"zk_any"` or `"sgx_any"`, the server **draws** (selects) a concrete prover backend based on internal ballot logic. If no prover is drawn, the response returns status `ZKAnyNotDrawn` — this is not an error, it means the server decided not to prove this request. The client should handle this gracefully. + +--- + +## Response Schema + +All responses use the V3 `Status` envelope, serialized with `"status"` as a discriminator tag. + +### Success — Proof Ready + +```json +{ + "status": "ok", + "proof_type": "sgx", + "data": { + "proof": "" + } +} +``` + +The `proof` field contains the proof bytes/string as produced by the prover backend. For SGX this is typically a quote; for ZK provers it's the serialized proof. + +### Success — Work In Progress + +Returned when the proof is still being generated. **Re-submit the same request to poll.** + +```json +{ + "status": "ok", + "proof_type": "sgx", + "data": { + "status": "WorkInProgress" + } +} +``` + +### Success — Registered + +Returned when the task has been registered but not yet started (e.g., guest input generation is still running). + +```json +{ + "status": "ok", + "proof_type": "sgx", + "data": { + "status": "Registered" + } +} +``` + +### Success — ZK Any Not Drawn + +Returned when `proof_type` was `"zk_any"` or `"sgx_any"` and the server decided not to prove this request. + +```json +{ + "status": "ok", + "proof_type": "native", + "data": { + "status": "ZKAnyNotDrawn" + } +} +``` + +### Error + +```json +{ + "status": "error", + "error": "task_failed", + "message": "Human-readable error description" +} +``` + +### Response Status Summary + +| `status` | `data` variant | Meaning | Client Action | +|---|---|---|---| +| `"ok"` | `{ "proof": "..." }` | Proof is ready | Extract and use the proof | +| `"ok"` | `{ "status": "Registered" }` | Task queued, guest input generating | Wait and re-submit same request | +| `"ok"` | `{ "status": "WorkInProgress" }` | Proof being generated | Wait and re-submit same request | +| `"ok"` | `{ "status": "Cancelled" }` | Task was cancelled | Do not retry | +| `"ok"` | `{ "status": "ZKAnyNotDrawn" }` | `zk_any`/`sgx_any` not selected | Handle gracefully; no proof produced | +| `"error"` | N/A | Failure | Inspect `error` and `message` fields | + +--- + +## Proof Lifecycle & Polling + +The RealTime endpoint uses a **re-submit polling** model, not a separate status endpoint. The flow: + +``` +Client Server + | | + |-- POST /v3/proof/batch/realtime -------->| + | |-- Stage 1: generate guest input + |<-- { "status":"ok", data.status: | + | "Registered" } --------------------| + | | + | (wait 5-10 seconds) | + | | + |-- POST /v3/proof/batch/realtime -------->| (same request body) + | |-- Stage 1 complete, Stage 2: prove + |<-- { "status":"ok", data.status: | + | "WorkInProgress" } -----------------| + | | + | (wait 5-30 seconds) | + | | + |-- POST /v3/proof/batch/realtime -------->| (same request body) + | |-- Proof ready + |<-- { "status":"ok", | + | data.proof: "0x..." } --------------| +``` + +**Key points:** +- Re-submit the **identical** request body each time. The server deduplicates by request key. +- The server internally manages the two-stage pipeline (guest input → proof). +- There is no separate GET endpoint for RealTime proof status. +- Recommended polling interval: 5-30 seconds depending on proof type (native is fast, ZK provers are slow). + +--- + +## Task Reporting + +To check the status of all in-flight tasks (including RealTime), use the report endpoint: + +``` +GET /v3/proof/report +X-API-KEY: +``` + +Response is an array of task reports: + +```json +[ + { + "descriptor": { + "RealTimeGuestInput": { + "l2_block_numbers": [100, 101], + "l1_network": "ethereum", + "l2_network": "taiko_mainnet", + "parent_proposal_hash": "0x..." + } + }, + "status": "WorkInProgress" + }, + { + "descriptor": { + "RealTimeProof": { + "l2_block_numbers": [100, 101], + "l1_network": "ethereum", + "l2_network": "taiko_mainnet", + "parent_proposal_hash": "0x...", + "proof_system": "sgx", + "prover": "0x..." + } + }, + "status": "Success" + } +] +``` + +RealTime tasks produce two descriptor types in reports: +- `RealTimeGuestInput` — the witness generation stage +- `RealTimeProof` — the actual proof generation stage + +--- + +## Error Handling + +### HTTP-Level Errors + +| HTTP Code | Cause | +|-----------|-------| +| 400 | Invalid request: missing required fields, bad field types, empty `l2_block_numbers` | +| 401 | Authentication failure (missing/invalid API key) | +| 429 | Rate limit exceeded | +| 500 | Internal server error (prover crash, I/O failure, etc.) | +| 503 | Server capacity full or system paused | + +### Application-Level Errors (in response JSON) + +These return HTTP 200 but with `"status": "error"`: + +```json +{ + "status": "error", + "error": "task_failed", + "message": "Task failed with status: AnyhowError(\"RPC timeout\")" +} +``` + +Common error messages: +- `"l2_block_numbers is empty"` — validation failure +- `"Missing network"` / `"Missing prover"` — required field not provided and no server default +- `"Invalid proof_type"` — unrecognized proof type string +- `"Feature not supported: "` — server not compiled with that prover backend + +--- + +## End-to-End Example + +### 1. Health Check + +```bash +curl http://localhost:8080/v3/health +# Expected: 200 OK +``` + +### 2. Submit RealTime Proof Request + +```bash +curl -X POST http://localhost:8080/v3/proof/batch/realtime \ + -H "Content-Type: application/json" \ + -H "X-API-KEY: raiko_your_key_here" \ + -d '{ + "l2_block_numbers": [100, 101, 102], + "proof_type": "sgx", + "network": "taiko_mainnet", + "l1_network": "ethereum", + "prover": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", + "max_anchor_block_number": 19500000, + "parent_proposal_hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "basefee_sharing_pctg": 0, + "signal_slots": [], + "sources": [], + "checkpoint": null, + "blob_proof_type": "proof_of_equivalence", + "sgx": null + }' +``` + +### 3. Poll Until Proof Ready + +```bash +# Response will be one of: +# { "status": "ok", "proof_type": "sgx", "data": { "status": "Registered" } } +# { "status": "ok", "proof_type": "sgx", "data": { "status": "WorkInProgress" } } +# { "status": "ok", "proof_type": "sgx", "data": { "proof": "0x..." } } + +# Re-submit the same request body until data.proof is present. +``` + +### 4. Pseudocode Client Loop + +```python +import requests, time + +RAIKO_URL = "http://localhost:8080/v3/proof/batch/realtime" +HEADERS = { + "Content-Type": "application/json", + "X-API-KEY": "raiko_your_key_here", +} + +payload = { + "l2_block_numbers": [100, 101, 102], + "proof_type": "sgx", + "network": "taiko_mainnet", + "l1_network": "ethereum", + "prover": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", + "max_anchor_block_number": 19500000, + "parent_proposal_hash": "0x" + "00" * 32, + "basefee_sharing_pctg": 0, + "signal_slots": [], + "sources": [], + "checkpoint": None, + "blob_proof_type": "proof_of_equivalence", +} + +while True: + resp = requests.post(RAIKO_URL, json=payload, headers=HEADERS) + body = resp.json() + + if body["status"] == "error": + raise Exception(f"Proof failed: {body['message']}") + + data = body["data"] + + # Proof returned + if "proof" in data: + proof = data["proof"] + print(f"Proof received: {proof[:40]}...") + break + + # zk_any/sgx_any not drawn — no proof will be produced + if data.get("status") == "ZKAnyNotDrawn": + print("Prover not drawn for this request") + break + + # Still in progress + status = data.get("status", "Unknown") + print(f"Status: {status}, polling again...") + time.sleep(10) +``` + +--- + +## Appendix: DerivationSource Schema + +The `sources` array contains objects matching the on-chain `DerivationSource` struct: + +```jsonc +{ + // Whether this source is from a forced inclusion. + // Always false for RealTimeInbox proposals. + "isForcedInclusion": false, + + // Blob slice referencing the transaction data. + "blobSlice": { + // Array of 32-byte blob hashes (versioned hashes from the blob transaction). + "blobHashes": ["0x01<...>"], + // Byte offset within the blob where this source's data begins. + "offset": 0, + // Timestamp associated with the blob. + "timestamp": 1700000000 + } +} +``` + +## Appendix: Checkpoint Schema + +```jsonc +{ + // L2 block number of the checkpoint. + "block_number": 99, + // Block hash at that L2 block. + "block_hash": "0x<32-byte-hex>", + // State root at that L2 block. + "state_root": "0x<32-byte-hex>" +} +``` + +## Appendix: Supported Proof Types + +| Value | Backend | Description | +|---|---|---| +| `"native"` | Native | Block construction + equality check (no cryptographic proof) | +| `"sp1"` | SP1 | Succinct SP1 zero-knowledge prover | +| `"risc0"` | RISC0 | RISC Zero zero-knowledge prover | +| `"sgx"` | Intel SGX | Trusted execution environment proof | +| `"sgxgeth"` | SGX + Geth | SGX with Geth execution client | +| `"tdx"` | Intel TDX | Trust Domain Extensions proof | +| `"azure_tdx"` | Azure TDX | Azure Confidential VM (TDX) proof | +| `"zk_any"` | Server-selected ZK | Server picks between SP1/RISC0 via ballot | +| `"sgx_any"` | Server-selected SGX | Server picks between SGX/SGXGeth via ballot | diff --git a/PROTOCOL_MIGRATION_REAL_TIME_FORK.md b/PROTOCOL_MIGRATION_REAL_TIME_FORK.md new file mode 100644 index 00000000..18b852e4 --- /dev/null +++ b/PROTOCOL_MIGRATION_REAL_TIME_FORK.md @@ -0,0 +1,451 @@ +# Real-Time Inbox: Technical Reference + +> Documents the shift from the standard two-phase `Inbox` (Shasta) to the single-phase +> `RealTimeInbox` for real-time proving. + +--- + +## 1. Architectural Shift + +### Standard Inbox — Two-Phase Model + +``` +Phase 1: propose(lookahead, data) → Store proposal hash in ring buffer, emit Proposed +Phase 2: prove(data, proof) → Verify proof for a BATCH of proposals, finalize state +``` + +Proposals accumulate on-chain. A prover later submits a single proof covering a contiguous range +of proposals `[N..M]`. The contract maintains `CoreState`, a ring buffer of proposal hashes, +forced inclusion queues, and bond balances. + +### RealTimeInbox — Atomic Single-Phase Model + +``` +propose(data, checkpoint, proof) → Build proposal + Verify proof + Finalize (one tx) +``` + +Each proposal is proven immediately in the same transaction. Only `bytes32 lastProposalHash` is +persisted. No batching — exactly one proposal per proof. No bonds, forced inclusions, proposer +checks, prover whitelist, or ring buffer. + +The prover must execute L2 blocks and generate the ZK proof **before** submitting the +transaction. + +--- + +## 2. Type Changes + +### 2.1 Config + +**Inbox** `IInbox.Config` — 17 fields: + +```solidity +struct Config { + address proofVerifier; + address proposerChecker; // REMOVED + address proverWhitelist; // REMOVED + address signalService; + address bondToken; // REMOVED + uint64 minBond; // REMOVED + uint64 livenessBond; // REMOVED + uint48 withdrawalDelay; // REMOVED + uint48 provingWindow; // REMOVED + uint48 permissionlessProvingDelay; // REMOVED + uint48 maxProofSubmissionDelay; // REMOVED + uint48 ringBufferSize; // REMOVED + uint8 basefeeSharingPctg; + uint16 forcedInclusionDelay; // REMOVED + uint64 forcedInclusionFeeInGwei; // REMOVED + uint64 forcedInclusionFeeDoubleThreshold; // REMOVED + uint8 permissionlessInclusionMultiplier; // REMOVED +} +``` + +**RealTimeInbox** `IRealTimeInbox.Config` — 3 fields: + +```solidity +struct Config { + address proofVerifier; // SurgeVerifier address + address signalService; // SignalService address + uint8 basefeeSharingPctg; // % of basefee paid to coinbase +} +``` + +### 2.2 ProposeInput + +**Inbox** `IInbox.ProposeInput`: + +```solidity +struct ProposeInput { + uint48 deadline; // REMOVED + LibBlobs.BlobReference blobReference; + uint16 numForcedInclusions; // REMOVED +} +``` + +**RealTimeInbox** `IRealTimeInbox.ProposeInput`: + +```solidity +struct ProposeInput { + LibBlobs.BlobReference blobReference; + bytes32[] signalSlots; // NEW — L1 signal slots to relay + uint48 maxAnchorBlockNumber; // NEW — highest L1 anchor block +} +``` + +- `signalSlots` is now a first-class input. Each slot is verified via + `_signalService.isSignalSent(slot)` and hashed into the proposal. +- `maxAnchorBlockNumber` must satisfy `blockhash(maxAnchorBlockNumber) != 0` + (within last 256 L1 blocks). The corresponding `maxAnchorBlockHash` is read on-chain via + `blockhash()` and included in the proposal. These new max anchor block values will be used + for verifying anchor linkage — the L2 node uses them to verify that anchor transactions + reference a valid, recent L1 block. + +### 2.3 Proposal + +**Inbox** `IInbox.Proposal` — stored in ring buffer: + +```solidity +struct Proposal { + uint48 id; // REMOVED + uint48 timestamp; // REMOVED + uint48 endOfSubmissionWindowTimestamp; // REMOVED + address proposer; // REMOVED + bytes32 parentProposalHash; + uint48 originBlockNumber; // REMOVED + bytes32 originBlockHash; // REMOVED + uint8 basefeeSharingPctg; + DerivationSource[] sources; + bytes32 signalSlotsHash; +} +``` + +**RealTimeInbox** `IRealTimeInbox.Proposal` — **transient, never stored** (only hashed): + +```solidity +struct Proposal { + bytes32 parentProposalHash; // Hash of parent (from lastProposalHash) + uint48 maxAnchorBlockNumber; // NEW — highest L1 anchor block number + bytes32 maxAnchorBlockHash; // NEW — blockhash(maxAnchorBlockNumber) + uint8 basefeeSharingPctg; + IInbox.DerivationSource[] sources; // Reuses IInbox.DerivationSource + bytes32 signalSlotsHash; +} +``` + +- No sequential `id` — proposals identified by hash only. +- No `timestamp`, `proposer`, or `endOfSubmissionWindowTimestamp`. +- `originBlockNumber`/`originBlockHash` replaced by `maxAnchorBlockNumber`/`maxAnchorBlockHash`. + The semantics shift from "L1 block the proposal was made in" to "highest L1 block the L2 + derivation can reference." The new max anchor block values will be used for verifying anchor + linkage — the L2 execution layer uses `maxAnchorBlockNumber` and `maxAnchorBlockHash` to + validate that anchor transactions in L2 blocks correctly reference an L1 block at or before + this height, ensuring L1-L2 state consistency. + +### 2.4 Commitment (Critical for Provers) + +**Inbox** `IInbox.Commitment` — covers a batch: + +```solidity +struct Commitment { + uint48 firstProposalId; + bytes32 firstProposalParentBlockHash; + bytes32 lastProposalHash; + address actualProver; + uint48 endBlockNumber; + bytes32 endStateRoot; + Transition[] transitions; // Per-proposal: { proposer, timestamp, blockHash } +} +``` + +**RealTimeInbox** `IRealTimeInbox.Commitment` — covers exactly one proposal: + +```solidity +struct Commitment { + bytes32 proposalHash; + ICheckpointStore.Checkpoint checkpoint; // { blockNumber, blockHash, stateRoot } +} +``` + +No batch support. No `actualProver`, no `Transition[]`. The checkpoint contains the finalized L2 +state for the single proposal. + +### 2.5 Removed Types + +| Type | Purpose | +| -------------------- | ----------------------------------------------------------------- | +| `CoreState` | Tracked nextProposalId, lastFinalizedProposalId, timestamps, etc. | +| `Transition` | Per-proposal transition data in batch proofs | +| `ProveInput` | Wrapper for Commitment in `prove()` | +| `ProvedEventPayload` | Event payload struct | + +### 2.6 Shared Types (Unchanged) + +```solidity +// IInbox — reused by RealTimeInbox +struct DerivationSource { + bool isForcedInclusion; // Always false in RealTimeInbox + LibBlobs.BlobSlice blobSlice; +} + +// LibBlobs +struct BlobReference { uint16 blobStartIndex; uint16 numBlobs; uint24 offset; } +struct BlobSlice { bytes32[] blobHashes; uint24 offset; uint48 timestamp; } + +// ICheckpointStore +struct Checkpoint { uint48 blockNumber; bytes32 blockHash; bytes32 stateRoot; } +``` + +--- + +## 3. Function Signatures + +### Activation + +```solidity +// Inbox +function activate(bytes32 _lastPacayaBlockHash) external onlyOwner; +// Sets up CoreState, stores genesis proposal hash in ring buffer slot 0 + +// RealTimeInbox +function activate(bytes32 _genesisProposalHash) external onlyOwner; +// Sets lastProposalHash = _genesisProposalHash. Can only be called once. +``` + +### Propose + +```solidity +// Inbox — proposal only, no proof +function propose(bytes calldata _lookahead, bytes calldata _data) external; + +// RealTimeInbox — atomic propose + prove +function propose( + bytes calldata _data, // abi.encode(IRealTimeInbox.ProposeInput) + ICheckpointStore.Checkpoint calldata _checkpoint, + bytes calldata _proof +) external; +``` + +### Prove (Removed) + +```solidity +// Inbox +function prove(bytes calldata _data, bytes calldata _proof) external; + +// RealTimeInbox — does not exist. Proving is embedded in propose(). +``` + +### Removed Function Groups + +- **Bond management**: `deposit`, `depositTo`, `withdraw`, `requestWithdrawal`, `cancelWithdrawal`, `getBond` +- **Forced inclusions**: `saveForcedInclusion`, `getCurrentForcedInclusionFee`, `getForcedInclusions`, `getForcedInclusionState` + +### State Queries + +```solidity +// Inbox +function getCoreState() external view returns (CoreState memory); +function getProposalHash(uint256 _proposalId) external view returns (bytes32); + +// RealTimeInbox — replaces both with: +function getLastProposalHash() external view returns (bytes32); +``` + +### Encoding Helpers + +RealTimeInbox uses plain `abi.encode`/`abi.decode` (no `LibCodec` or `LibHashOptimized`): + +```solidity +function encodeProposeInput(ProposeInput calldata) public pure returns (bytes memory); +function decodeProposeInput(bytes calldata) public pure returns (ProposeInput memory); +function hashProposal(Proposal memory) public pure returns (bytes32); // keccak256(abi.encode(...)) +function hashCommitment(Commitment memory) public pure returns (bytes32); // keccak256(abi.encode(...)) +function hashSignalSlots(bytes32[] memory) public pure returns (bytes32); // keccak256(abi.encode(...)) +``` + +--- + +## 4. On-Chain State + +**Inbox**: + +```solidity +uint48 public activationTimestamp; +CoreState internal _coreState; // 2 slots +mapping(uint256 bufferSlot => bytes32 proposalHash) _proposalHashes; // ring buffer +LibForcedInclusion.Storage _forcedInclusionStorage; // 2 slots +LibBonds.Storage _bondStorage; +``` + +**RealTimeInbox**: + +```solidity +bytes32 public lastProposalHash; // 1 slot — the chain head +``` + +--- + +## 5. Events + +**Inbox** emits separate events for proposing and proving: + +```solidity +event Proposed( + uint48 indexed id, address indexed proposer, + bytes32 parentProposalHash, uint48 endOfSubmissionWindowTimestamp, + uint8 basefeeSharingPctg, DerivationSource[] sources, bytes32 signalSlotsHash +); + +event Proved( + uint48 firstProposalId, uint48 firstNewProposalId, + uint48 lastProposalId, address indexed actualProver +); +``` + +**RealTimeInbox** emits a single combined event: + +```solidity +event ProposedAndProved( + bytes32 indexed proposalHash, + bytes32 parentProposalHash, + uint48 maxAnchorBlockNumber, + uint8 basefeeSharingPctg, + IInbox.DerivationSource[] sources, + bytes32 signalSlotsHash, + ICheckpointStore.Checkpoint checkpoint +); +``` + +- Indexed by `proposalHash` instead of sequential `id`. +- Includes the finalized `Checkpoint` directly. +- No `proposer` or `actualProver` field. + +--- + +## 6. Proof Verification + +Both contracts call `IProofVerifier.verifyProof(uint256, bytes32, bytes)`. The interface is +unchanged. + +**Inbox**: + +``` +proposalAge = block.timestamp - transitions[offset].timestamp +commitmentHash = LibHashOptimized.hashCommitment(commitment) +verifyProof(proposalAge, commitmentHash, proof) +``` + +**RealTimeInbox**: + +``` +proposalAge = 0 // always 0 +commitmentHash = keccak256(abi.encode(commitment)) // plain abi.encode +verifyProof(0, commitmentHash, proof) +``` + +### Commitment Hash Reconstruction + +For off-chain reconstruction of the commitment hash: + +``` +proposalHash = keccak256(abi.encode( + bytes32 parentProposalHash, + uint48 maxAnchorBlockNumber, // padded to 32 bytes by abi.encode + bytes32 maxAnchorBlockHash, + uint8 basefeeSharingPctg, // padded to 32 bytes by abi.encode + IInbox.DerivationSource[] sources, // dynamic array encoding + bytes32 signalSlotsHash +)) + +commitmentHash = keccak256(abi.encode( + bytes32 proposalHash, + uint48 checkpoint.blockNumber, // padded to 32 bytes by abi.encode + bytes32 checkpoint.blockHash, + bytes32 checkpoint.stateRoot +)) +``` + +### Signal Slots Hash + +```solidity +signalSlotsHash = bytes32(0) // if empty +signalSlotsHash = keccak256(abi.encode(signalSlots)) // if non-empty (bytes32[]) +``` + +--- + +## 7. L2 Anchor Integration — Signal Slot Relay + +`signalSlots` provided in `ProposeInput` must be relayed to L2 so that nodes can verify L1→L2 +cross-chain messages without a separate proof. The relay happens through the L2 anchor +transaction of the **first block** in the batch. + +### Anchor Function + +The standard `anchorV4` is replaced by `anchorV4WithSignalSlots`: + +```solidity +// Anchor.sol (L2) + +// Standard — no signal relay +function anchorV4(ICheckpointStore.Checkpoint calldata _checkpoint) external; + +// Real-time inbox — relays signal slots in the first block's anchor tx +function anchorV4WithSignalSlots( + ICheckpointStore.Checkpoint calldata _checkpoint, + bytes32[] calldata _signalSlots +) external; +``` + +### Placement Rule + +Only the **first block** of a batch carries all signal slots. Subsequent blocks in the same +batch call `anchorV4WithSignalSlots` with an empty `_signalSlots` array (or `anchorV4`). + +``` +Batch (from one propose() call) +├── Block 0 — anchorV4WithSignalSlots(checkpoint, signalSlots) ← all slots here +├── Block 1 — anchorV4WithSignalSlots(checkpoint, []) +└── Block N — anchorV4WithSignalSlots(checkpoint, []) +``` + +### What the Anchor Does with Signal Slots + +```solidity +if (_signalSlots.length > 0) { + ISignalService(address(checkpointStore)).setSignalsReceived(_signalSlots); +} +``` + +Each slot is marked as received in the `SignalService`, making L1 signals immediately +consumable on L2 without a merkle proof — consistent with the real-time proving model where +L1 state is already finalized before the L2 block is executed. + +### Relationship to `signalSlotsHash` + +The same `signalSlots` array that is passed to `anchorV4WithSignalSlots` on L2 is also hashed +into the proposal on L1: + +``` +L1 propose(): signalSlotsHash = keccak256(abi.encode(signalSlots)) → committed in proposalHash +L2 anchor(): anchorV4WithSignalSlots(checkpoint, signalSlots) → signals set in SignalService +``` + +The ZK proof covers both sides, ensuring the same set of slots is committed on L1 and +activated on L2. + +--- + +## 8. Removed Features Summary + +| Feature | Impact | +| ---------------------------------- | ----------------------------------------- | +| Batch proving | One proposal per proof; no `Transition[]` | +| Ring buffer | No historical proposal hash queries | +| Bonds | No economic security from proposer stakes | +| Forced inclusions | No censorship resistance mechanism | +| Proposer checker / lookahead | Anyone can propose | +| Prover whitelist | Anyone can prove | +| Proving window / liveness slashing | No deadlines or slashing | +| One-per-block limit | Multiple proposals per L1 block allowed | +| Transaction deadline | No `deadline` field in input | diff --git a/realtime/Cargo.toml b/realtime/Cargo.toml new file mode 100644 index 00000000..a9a9d7ed --- /dev/null +++ b/realtime/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "realtime" +version.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true +publish = false + +[dependencies] +alloy = { workspace = true } +alloy-json-rpc = { workspace = true } +alloy-rlp = { workspace = true } +anyhow = { workspace = true } +async-trait = { workspace = true } +chrono = { workspace = true } +common = { workspace = true } +dotenvy = { workspace = true } +flate2 = { workspace = true } +futures-util = { workspace = true } +hex = { workspace = true } +http = { workspace = true } +jsonrpsee = { workspace = true } +jsonwebtoken = { workspace = true } +pacaya = { workspace = true } +prometheus = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sled = { workspace = true } +taiko_alethia_reth = { workspace = true } +taiko_bindings = { workspace = true } +taiko_protocol = { workspace = true } +taiko_rpc = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +warp = { workspace = true } + +[dev-dependencies] +mockito = { workspace = true } +tokio = { workspace = true, features = ["full", "test-util"] } + +[lints] +workspace = true diff --git a/realtime/src/chain_monitor/mod.rs b/realtime/src/chain_monitor/mod.rs new file mode 100644 index 00000000..7fa2b710 --- /dev/null +++ b/realtime/src/chain_monitor/mod.rs @@ -0,0 +1,12 @@ +use crate::l1::bindings::RealTimeInbox; +use common::chain_monitor::ChainMonitor; +use tracing::info; + +pub type RealtimeChainMonitor = ChainMonitor; + +pub fn print_proposed_and_proved_info(event: &RealTimeInbox::ProposedAndProved) { + info!( + "ProposedAndProved event → proposalHash = {}, parentProposalHash = {}, maxAnchorBlockNumber = {}", + event.proposalHash, event.parentProposalHash, event.maxAnchorBlockNumber + ); +} diff --git a/realtime/src/l1/abi/Multicall.json b/realtime/src/l1/abi/Multicall.json new file mode 100644 index 00000000..53b423ce --- /dev/null +++ b/realtime/src/l1/abi/Multicall.json @@ -0,0 +1,44 @@ +{ + "abi": [ + { + "type": "receive", + "stateMutability": "payable" + }, + { + "type": "function", + "name": "multicall", + "inputs": [ + { + "name": "calls", + "type": "tuple[]", + "internalType": "struct Multicall.Call[]", + "components": [ + { + "name": "target", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "results", + "type": "bytes[]", + "internalType": "bytes[]" + } + ], + "stateMutability": "payable" + } + ] +} \ No newline at end of file diff --git a/realtime/src/l1/abi/RealTimeInbox.json b/realtime/src/l1/abi/RealTimeInbox.json new file mode 100644 index 00000000..0d444719 --- /dev/null +++ b/realtime/src/l1/abi/RealTimeInbox.json @@ -0,0 +1 @@ +{"abi":[{"type":"function","name":"activate","inputs":[{"name":"_genesisProposalHash","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"getConfig","inputs":[],"outputs":[{"name":"config_","type":"tuple","internalType":"struct IRealTimeInbox.Config","components":[{"name":"proofVerifier","type":"address","internalType":"address"},{"name":"signalService","type":"address","internalType":"address"},{"name":"basefeeSharingPctg","type":"uint8","internalType":"uint8"}]}],"stateMutability":"view"},{"type":"function","name":"getLastProposalHash","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"propose","inputs":[{"name":"_data","type":"bytes","internalType":"bytes"},{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"Activated","inputs":[{"name":"genesisProposalHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"ProposedAndProved","inputs":[{"name":"proposalHash","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"parentProposalHash","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"maxAnchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"basefeeSharingPctg","type":"uint8","indexed":false,"internalType":"uint8"},{"name":"sources","type":"tuple[]","indexed":false,"internalType":"struct IInbox.DerivationSource[]","components":[{"name":"isForcedInclusion","type":"bool","internalType":"bool"},{"name":"blobSlice","type":"tuple","internalType":"struct LibBlobs.BlobSlice","components":[{"name":"blobHashes","type":"bytes32[]","internalType":"bytes32[]"},{"name":"offset","type":"uint24","internalType":"uint24"},{"name":"timestamp","type":"uint48","internalType":"uint48"}]}]},{"name":"signalSlots","type":"bytes32[]","indexed":false,"internalType":"bytes32[]"},{"name":"checkpoint","type":"tuple","indexed":false,"internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"anonymous":false}]} \ No newline at end of file diff --git a/realtime/src/l1/bindings.rs b/realtime/src/l1/bindings.rs new file mode 100644 index 00000000..dbee3b8d --- /dev/null +++ b/realtime/src/l1/bindings.rs @@ -0,0 +1,36 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug, Default)] + RealTimeInbox, + "src/l1/abi/RealTimeInbox.json" +); + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + Multicall, + "src/l1/abi/Multicall.json" +); + +// Define ProposeInput and BlobReference manually since the RealTimeInbox ABI +// only exposes propose(bytes _data, ...) where _data is abi.encode(ProposeInput). +// These types are internal to the contract but needed for encoding. +sol! { + struct BlobReference { + uint16 blobStartIndex; + uint16 numBlobs; + uint24 offset; + } + + struct ProposeInput { + BlobReference blobReference; + bytes32[] signalSlots; + uint48 maxAnchorBlockNumber; + } +} diff --git a/realtime/src/l1/config.rs b/realtime/src/l1/config.rs new file mode 100644 index 00000000..3ac97e12 --- /dev/null +++ b/realtime/src/l1/config.rs @@ -0,0 +1,27 @@ +use crate::utils::config::RealtimeConfig; +use alloy::primitives::Address; + +#[derive(Clone)] +pub struct ContractAddresses { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, +} + +pub struct EthereumL1Config { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, +} + +impl TryFrom for EthereumL1Config { + type Error = anyhow::Error; + + fn try_from(config: RealtimeConfig) -> Result { + Ok(EthereumL1Config { + realtime_inbox: config.realtime_inbox, + proposer_multicall: config.proposer_multicall, + bridge: config.bridge, + }) + } +} diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs new file mode 100644 index 00000000..514ff289 --- /dev/null +++ b/realtime/src/l1/execution_layer.rs @@ -0,0 +1,350 @@ +use super::config::EthereumL1Config; +use super::proposal_tx_builder::ProposalTxBuilder; +use super::protocol_config::ProtocolConfig; +use crate::node::proposal_manager::proposal::Proposal; +use crate::raiko::RaikoClient; +use crate::shared_abi::bindings::{Bridge::MessageSent, IBridge::Message, SignalSent}; +use crate::{l1::config::ContractAddresses, node::proposal_manager::bridge_handler::UserOp}; +use alloy::{ + eips::{BlockId, BlockNumberOrTag}, + primitives::{Address, B256, FixedBytes}, + providers::{DynProvider, ext::DebugApi}, + rpc::types::{ + TransactionRequest, + trace::geth::{ + GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, + GethDebugTracingOptions, + }, + }, + sol_types::SolEvent, +}; +use anyhow::{Error, anyhow}; +use common::{ + l1::{ + traits::{ELTrait, PreconferProvider}, + transaction_error::TransactionError, + }, + metrics::Metrics, + shared::{ + alloy_tools, execution_layer::ExecutionLayer as ExecutionLayerCommon, + transaction_monitor::TransactionMonitor, + }, +}; +use crate::l1::bindings::RealTimeInbox::{self, RealTimeInboxInstance}; +use pacaya::l1::traits::{OperatorError, PreconfOperator}; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; +use tracing::info; + +pub struct ExecutionLayer { + common: ExecutionLayerCommon, + provider: DynProvider, + preconfer_address: Address, + pub transaction_monitor: TransactionMonitor, + contract_addresses: ContractAddresses, + realtime_inbox: RealTimeInboxInstance, + raiko_client: RaikoClient, +} + +impl ELTrait for ExecutionLayer { + type Config = EthereumL1Config; + async fn new( + common_config: common::l1::config::EthereumL1Config, + specific_config: Self::Config, + transaction_error_channel: Sender, + metrics: Arc, + ) -> Result { + let provider = alloy_tools::construct_alloy_provider( + &common_config.signer, + common_config + .execution_rpc_urls + .first() + .ok_or_else(|| anyhow!("L1 RPC URL is required"))?, + ) + .await?; + let common = ExecutionLayerCommon::new(provider.clone()).await?; + + let transaction_monitor = TransactionMonitor::new( + provider.clone(), + &common_config, + transaction_error_channel, + metrics.clone(), + common.chain_id(), + ) + .await + .map_err(|e| Error::msg(format!("Failed to create TransactionMonitor: {e}")))?; + + let realtime_inbox = + RealTimeInbox::new(specific_config.realtime_inbox, provider.clone()); + + let config = realtime_inbox + .getConfig() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getConfig for RealTimeInbox: {e}"))?; + + tracing::info!( + "RealTimeInbox: {}, proofVerifier: {}, signalService: {}", + specific_config.realtime_inbox, + config.proofVerifier, + config.signalService, + ); + + let contract_addresses = ContractAddresses { + realtime_inbox: specific_config.realtime_inbox, + proposer_multicall: specific_config.proposer_multicall, + bridge: specific_config.bridge, + }; + + // Read Raiko config from environment + let realtime_config = crate::utils::config::RealtimeConfig::read_env_variables() + .map_err(|e| anyhow::anyhow!("Failed to read RealtimeConfig for Raiko: {e}"))?; + let raiko_client = RaikoClient::new(&realtime_config); + + Ok(Self { + common, + provider, + preconfer_address: common_config.signer.get_address(), + transaction_monitor, + contract_addresses, + realtime_inbox, + raiko_client, + }) + } + + fn common(&self) -> &ExecutionLayerCommon { + &self.common + } +} + +use common::config::ConfigTrait; + +impl PreconferProvider for ExecutionLayer { + async fn get_preconfer_wallet_eth(&self) -> Result { + self.common() + .get_account_balance(self.preconfer_address) + .await + } + + async fn get_preconfer_nonce_pending(&self) -> Result { + self.common() + .get_account_nonce(self.preconfer_address, BlockNumberOrTag::Pending) + .await + } + + async fn get_preconfer_nonce_latest(&self) -> Result { + self.common() + .get_account_nonce(self.preconfer_address, BlockNumberOrTag::Latest) + .await + } + + fn get_preconfer_alloy_address(&self) -> Address { + self.preconfer_address + } +} + +impl PreconfOperator for ExecutionLayer { + fn get_preconfer_address(&self) -> Address { + self.preconfer_address + } + + async fn get_operators_for_current_and_next_epoch( + &self, + _current_epoch_timestamp: u64, + ) -> Result<(Address, Address), OperatorError> { + // RealTime: anyone can propose, but we still use operator tracking for slot management. + // Return self as both current and next operator. + Ok((self.preconfer_address, self.preconfer_address)) + } + + async fn is_preconf_router_specified_in_taiko_wrapper(&self) -> Result { + Ok(true) + } + + async fn get_l2_height_from_taiko_inbox(&self) -> Result { + Ok(0) + } + + async fn get_handover_window_slots(&self) -> Result { + Err(anyhow::anyhow!( + "Not implemented for RealTime execution layer" + )) + } +} + +impl ExecutionLayer { + pub fn get_raiko_client(&self) -> &RaikoClient { + &self.raiko_client + } + + pub async fn send_batch_to_l1( + &self, + batch: Proposal, + tx_hash_notifier: Option>, + tx_result_notifier: Option>, + ) -> Result<(), Error> { + info!( + "📦 Proposing with {} blocks | user_ops: {:?} | signal_slots: {:?} | l1_calls: {:?} | zk_proof: {}", + batch.l2_blocks.len(), + batch.user_ops, + batch.signal_slots, + batch.l1_calls, + batch.zk_proof.is_some(), + ); + + let builder = ProposalTxBuilder::new(self.provider.clone(), 10); + + let tx = builder + .build_propose_tx( + batch, + self.preconfer_address, + self.contract_addresses.clone(), + ) + .await?; + + let pending_nonce = self.get_preconfer_nonce_pending().await?; + self.transaction_monitor + .monitor_new_transaction(tx, pending_nonce, tx_hash_notifier, tx_result_notifier) + .await + .map_err(|e| Error::msg(format!("Sending batch to L1 failed: {e}")))?; + + Ok(()) + } + + pub async fn is_transaction_in_progress(&self) -> Result { + self.transaction_monitor.is_transaction_in_progress().await + } + + pub async fn fetch_protocol_config(&self) -> Result { + let config = self + .realtime_inbox + .getConfig() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getConfig for RealTimeInbox: {e}"))?; + + info!( + "RealTimeInbox config: basefeeSharingPctg: {}", + config.basefeeSharingPctg, + ); + + Ok(ProtocolConfig::from(&config)) + } + + pub async fn get_last_proposal_hash(&self) -> Result { + let result = self + .realtime_inbox + .getLastProposalHash() + .call() + .await + .map_err(|e| anyhow::anyhow!("Failed to call getLastProposalHash: {e}"))?; + + Ok(result) + } +} + +// Surge: L1 EL ops for Bridge Handler + +use alloy::rpc::types::trace::geth::{CallFrame, CallLogFrame}; + +fn collect_logs_recursive(frame: &CallFrame) -> Vec { + let mut logs = frame.logs.clone(); + + for subcall in &frame.calls { + logs.extend(collect_logs_recursive(subcall)); + } + + logs +} + +pub trait L1BridgeHandlerOps { + async fn find_message_and_signal_slot( + &self, + user_op: UserOp, + ) -> Result)>, anyhow::Error>; +} + +impl L1BridgeHandlerOps for ExecutionLayer { + async fn find_message_and_signal_slot( + &self, + user_op_data: UserOp, + ) -> Result)>, anyhow::Error> { + let tx_request = TransactionRequest::default() + .from(self.preconfer_address) + .to(user_op_data.submitter) + .input(user_op_data.calldata.into()); + + let mut tracer_config = serde_json::Map::new(); + tracer_config.insert("withLog".to_string(), serde_json::Value::Bool(true)); + tracer_config.insert("onlyTopCall".to_string(), serde_json::Value::Bool(false)); + + let tracing_options = GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: serde_json::Value::Object(tracer_config).into(), + ..Default::default() + }; + + let call_options = GethDebugTracingCallOptions { + tracing_options, + ..Default::default() + }; + + let trace_result = self + .provider + .debug_trace_call( + tx_request, + BlockId::Number(BlockNumberOrTag::Latest), + call_options, + ) + .await + .map_err(|e| anyhow!("Failed to simulate executeBatch on L1: {e}"))?; + + tracing::debug!("Received trace result: {:?}", trace_result); + + let mut message: Option = None; + let mut slot: Option> = None; + + if let alloy::rpc::types::trace::geth::GethTrace::CallTracer(call_frame) = trace_result { + let all_logs = collect_logs_recursive(&call_frame); + tracing::debug!("Collected {} logs from call trace", all_logs.len()); + + for log in all_logs { + if let Some(topics) = &log.topics { + if !topics.is_empty() { + if topics[0] == MessageSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = MessageSent::decode_log_data(&log_data).map_err(|e| { + anyhow!("Failed to decode MessageSent event L1: {e}") + })?; + + message = Some(decoded.message); + } else if topics[0] == SignalSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = SignalSent::decode_log_data(&log_data).map_err(|e| { + anyhow!("Failed to decode SignalSent event L1: {e}") + })?; + + slot = Some(decoded.slot); + } + } + } + } + } + + tracing::debug!("{:?} {:?}", message, slot); + + if let (Some(message), Some(slot)) = (message, slot) { + return Ok(Some((message, slot))); + } + + Ok(None) + } +} diff --git a/realtime/src/l1/mod.rs b/realtime/src/l1/mod.rs new file mode 100644 index 00000000..7bcc9c57 --- /dev/null +++ b/realtime/src/l1/mod.rs @@ -0,0 +1,5 @@ +pub mod bindings; +pub mod config; +pub mod execution_layer; +pub mod proposal_tx_builder; +pub mod protocol_config; diff --git a/realtime/src/l1/proposal_tx_builder.rs b/realtime/src/l1/proposal_tx_builder.rs new file mode 100644 index 00000000..740fb708 --- /dev/null +++ b/realtime/src/l1/proposal_tx_builder.rs @@ -0,0 +1,231 @@ +use crate::l1::{ + bindings::{BlobReference, Multicall, ProposeInput, RealTimeInbox}, + config::ContractAddresses, +}; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + proposal::Proposal, +}; +use crate::shared_abi::bindings::Bridge; +use alloy::{ + consensus::SidecarBuilder, + eips::eip4844::BlobTransactionSidecar, + network::TransactionBuilder4844, + primitives::{ + Address, Bytes, U256, + aliases::{U24, U48}, + }, + providers::{DynProvider, Provider}, + rpc::types::TransactionRequest, + sol_types::SolValue, +}; +use alloy_json_rpc::RpcError; +use anyhow::Error; +use common::l1::{fees_per_gas::FeesPerGas, tools, transaction_error::TransactionError}; +use taiko_protocol::shasta::{ + BlobCoder, + manifest::{BlockManifest, DerivationSourceManifest}, +}; +use tracing::{info, warn}; + +pub struct ProposalTxBuilder { + provider: DynProvider, + extra_gas_percentage: u64, +} + +impl ProposalTxBuilder { + pub fn new(provider: DynProvider, extra_gas_percentage: u64) -> Self { + Self { + provider, + extra_gas_percentage, + } + } + + #[allow(clippy::too_many_arguments)] + pub async fn build_propose_tx( + &self, + batch: Proposal, + from: Address, + contract_addresses: ContractAddresses, + ) -> Result { + let tx_blob = self + .build_propose_blob(batch, from, contract_addresses) + .await?; + let tx_blob_gas = match self.provider.estimate_gas(tx_blob.clone()).await { + Ok(gas) => gas, + Err(e) => { + warn!( + "Build proposeBatch: Failed to estimate gas for blob transaction: {}", + e + ); + match e { + RpcError::ErrorResp(err) => { + return Err(anyhow::anyhow!( + tools::convert_error_payload(&err.to_string()) + .unwrap_or(TransactionError::EstimationFailed) + )); + } + _ => return Ok(tx_blob), + } + } + }; + let tx_blob_gas = tx_blob_gas + tx_blob_gas * self.extra_gas_percentage / 100; + + let fees_per_gas = match FeesPerGas::get_fees_per_gas(&self.provider).await { + Ok(fees_per_gas) => fees_per_gas, + Err(e) => { + warn!("Build proposeBatch: Failed to get fees per gas: {}", e); + return Ok(tx_blob); + } + }; + + let tx_blob = fees_per_gas.update_eip4844(tx_blob, tx_blob_gas); + + Ok(tx_blob) + } + + #[allow(clippy::too_many_arguments)] + pub async fn build_propose_blob( + &self, + batch: Proposal, + from: Address, + contract_addresses: ContractAddresses, + ) -> Result { + let mut multicalls: Vec = vec![]; + + // Add user op to multicall + if !batch.user_ops.is_empty() { + if let Some(user_op) = batch.user_ops.first() { + let user_op_call = self.build_user_op_call(user_op.clone()); + info!("Added user op to Multicall: {:?}", &user_op_call); + multicalls.push(user_op_call); + } + } + + // Add the proposal to the multicall + let (propose_call, blob_sidecar) = self + .build_propose_call(&batch, contract_addresses.realtime_inbox) + .await?; + info!("Added proposal to Multicall: {:?}", &propose_call); + multicalls.push(propose_call.clone()); + + // Add L1 calls + if !batch.l1_calls.is_empty() { + if let Some(l1_call) = batch.l1_calls.first() { + let l1_call_call = self.build_l1_call_call( + l1_call.clone(), + contract_addresses.bridge, + ); + info!("Added L1 call to Multicall: {:?}", &l1_call_call); + multicalls.push(l1_call_call.clone()); + } + } + + let multicall = Multicall::new(contract_addresses.proposer_multicall, &self.provider); + let call = multicall.multicall(multicalls); + + let tx = TransactionRequest::default() + .to(contract_addresses.proposer_multicall) + .from(from) + .input(call.calldata().clone().into()) + .with_blob_sidecar(blob_sidecar); + + Ok(tx) + } + + fn build_user_op_call(&self, user_op_data: UserOp) -> Multicall::Call { + Multicall::Call { + target: user_op_data.submitter, + value: U256::ZERO, + data: user_op_data.calldata, + } + } + + async fn build_propose_call( + &self, + batch: &Proposal, + inbox_address: Address, + ) -> Result<(Multicall::Call, BlobTransactionSidecar), anyhow::Error> { + let mut block_manifests = >::with_capacity(batch.l2_blocks.len()); + for l2_block in &batch.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + + let manifest_data = manifest + .encode_and_compress() + .map_err(|e| Error::msg(format!("Can't encode and compress manifest: {e}")))?; + + let sidecar_builder: SidecarBuilder = + SidecarBuilder::from_slice(&manifest_data); + let sidecar: BlobTransactionSidecar = sidecar_builder.build()?; + + let inbox = RealTimeInbox::new(inbox_address, self.provider.clone()); + + let proof = Bytes::from( + batch + .zk_proof + .as_ref() + .ok_or_else(|| anyhow::anyhow!("ZK proof not set on proposal"))? + .clone(), + ); + + // Build ProposeInput and ABI-encode it as the _data parameter + let blob_reference = BlobReference { + blobStartIndex: 0, + numBlobs: sidecar.blobs.len().try_into()?, + offset: U24::ZERO, + }; + + let propose_input = ProposeInput { + blobReference: blob_reference, + signalSlots: batch.signal_slots.clone(), + maxAnchorBlockNumber: U48::from(batch.max_anchor_block_number), + }; + + let encoded_input = Bytes::from(propose_input.abi_encode()); + + // Convert L1 Checkpoint type for the propose call + let checkpoint = crate::l1::bindings::ICheckpointStore::Checkpoint { + blockNumber: batch.checkpoint.blockNumber, + blockHash: batch.checkpoint.blockHash, + stateRoot: batch.checkpoint.stateRoot, + }; + + let call = inbox.propose(encoded_input, checkpoint, proof); + + Ok(( + Multicall::Call { + target: inbox_address, + value: U256::ZERO, + data: call.calldata().clone(), + }, + sidecar, + )) + } + + fn build_l1_call_call(&self, l1_call: L1Call, bridge_address: Address) -> Multicall::Call { + let bridge = Bridge::new(bridge_address, &self.provider); + let call = bridge.processMessage(l1_call.message_from_l2, l1_call.signal_slot_proof); + + Multicall::Call { + target: bridge_address, + value: U256::ZERO, + data: call.calldata().clone(), + } + } +} diff --git a/realtime/src/l1/protocol_config.rs b/realtime/src/l1/protocol_config.rs new file mode 100644 index 00000000..91b04387 --- /dev/null +++ b/realtime/src/l1/protocol_config.rs @@ -0,0 +1,30 @@ +use crate::l1::bindings::IRealTimeInbox::Config; +use alloy::primitives::Address; + +#[derive(Clone, Default)] +pub struct ProtocolConfig { + pub basefee_sharing_pctg: u8, + pub proof_verifier: Address, + pub signal_service: Address, +} + +impl From<&Config> for ProtocolConfig { + fn from(config: &Config) -> Self { + Self { + basefee_sharing_pctg: config.basefeeSharingPctg, + proof_verifier: config.proofVerifier, + signal_service: config.signalService, + } + } +} + +impl ProtocolConfig { + pub fn get_basefee_sharing_pctg(&self) -> u8 { + self.basefee_sharing_pctg + } + + /// Use the EVM blockhash() 256-block limit as the max anchor height offset. + pub fn get_max_anchor_height_offset(&self) -> u64 { + 256 + } +} diff --git a/realtime/src/l2/abi/Anchor.json b/realtime/src/l2/abi/Anchor.json new file mode 100644 index 00000000..b1ecd4b7 --- /dev/null +++ b/realtime/src/l2/abi/Anchor.json @@ -0,0 +1 @@ +{"abi":[{"type":"constructor","inputs":[{"name":"_checkpointStore","type":"address","internalType":"contract ICheckpointStore"},{"name":"_l1ChainId","type":"uint64","internalType":"uint64"}],"stateMutability":"nonpayable"},{"type":"function","name":"ANCHOR_GAS_LIMIT","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"GOLDEN_TOUCH_ADDRESS","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"acceptOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV4","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV4WithSignalSlots","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]},{"name":"_signalSlots","type":"bytes32[]","internalType":"bytes32[]"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"anchorV5","inputs":[{"name":"_proposalParams","type":"tuple","internalType":"struct Anchor.ProposalParams","components":[{"name":"submissionWindowEnd","type":"uint48","internalType":"uint48"}]},{"name":"_blockParams","type":"tuple","internalType":"struct Anchor.BlockParams","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"anchorBlockHash","type":"bytes32","internalType":"bytes32"},{"name":"anchorStateRoot","type":"bytes32","internalType":"bytes32"},{"name":"rawTxListHash","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"blockHashes","inputs":[{"name":"blockNumber","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"blockHash","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"checkpointStore","inputs":[],"outputs":[{"name":"","type":"address","internalType":"contract ICheckpointStore"}],"stateMutability":"view"},{"type":"function","name":"getBlockState","inputs":[],"outputs":[{"name":"","type":"tuple","internalType":"struct Anchor.BlockState","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"ancestorsHash","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"getPreconfMetadata","inputs":[{"name":"_blockNumber","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"tuple","internalType":"struct Anchor.PreconfMetadata","components":[{"name":"anchorBlockNumber","type":"uint48","internalType":"uint48"},{"name":"submissionWindowEnd","type":"uint48","internalType":"uint48"},{"name":"parentSubmissionWindowEnd","type":"uint48","internalType":"uint48"},{"name":"rawTxListHash","type":"bytes32","internalType":"bytes32"},{"name":"parentRawTxListHash","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"impl","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"inNonReentrant","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"init","inputs":[{"name":"_owner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"l1ChainId","inputs":[],"outputs":[{"name":"","type":"uint64","internalType":"uint64"}],"stateMutability":"view"},{"type":"function","name":"owner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"pause","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"paused","inputs":[],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"pendingOwner","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"proxiableUUID","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"renounceOwnership","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"resolver","inputs":[],"outputs":[{"name":"","type":"address","internalType":"address"}],"stateMutability":"view"},{"type":"function","name":"transferOwnership","inputs":[{"name":"newOwner","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"unpause","inputs":[],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"upgradeTo","inputs":[{"name":"newImplementation","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"upgradeToAndCall","inputs":[{"name":"newImplementation","type":"address","internalType":"address"},{"name":"data","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"payable"},{"type":"function","name":"withdraw","inputs":[{"name":"_token","type":"address","internalType":"address"},{"name":"_to","type":"address","internalType":"address"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"AdminChanged","inputs":[{"name":"previousAdmin","type":"address","indexed":false,"internalType":"address"},{"name":"newAdmin","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Anchored","inputs":[{"name":"prevAnchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"anchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"ancestorsHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"BeaconUpgraded","inputs":[{"name":"beacon","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Initialized","inputs":[{"name":"version","type":"uint8","indexed":false,"internalType":"uint8"}],"anonymous":false},{"type":"event","name":"OwnershipTransferStarted","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"OwnershipTransferred","inputs":[{"name":"previousOwner","type":"address","indexed":true,"internalType":"address"},{"name":"newOwner","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Paused","inputs":[{"name":"account","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Unpaused","inputs":[{"name":"account","type":"address","indexed":false,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Upgraded","inputs":[{"name":"implementation","type":"address","indexed":true,"internalType":"address"}],"anonymous":false},{"type":"event","name":"Withdrawn","inputs":[{"name":"token","type":"address","indexed":false,"internalType":"address"},{"name":"to","type":"address","indexed":false,"internalType":"address"},{"name":"amount","type":"uint256","indexed":false,"internalType":"uint256"}],"anonymous":false},{"type":"error","name":"ACCESS_DENIED","inputs":[]},{"type":"error","name":"AncestorsHashMismatch","inputs":[]},{"type":"error","name":"ETH_TRANSFER_FAILED","inputs":[]},{"type":"error","name":"FUNC_NOT_IMPLEMENTED","inputs":[]},{"type":"error","name":"INVALID_PAUSE_STATUS","inputs":[]},{"type":"error","name":"InvalidAddress","inputs":[]},{"type":"error","name":"InvalidBlockNumber","inputs":[]},{"type":"error","name":"InvalidL1ChainId","inputs":[]},{"type":"error","name":"InvalidL2ChainId","inputs":[]},{"type":"error","name":"InvalidSender","inputs":[]},{"type":"error","name":"REENTRANT_CALL","inputs":[]},{"type":"error","name":"ZERO_ADDRESS","inputs":[]},{"type":"error","name":"ZERO_VALUE","inputs":[]}]} \ No newline at end of file diff --git a/realtime/src/l2/bindings.rs b/realtime/src/l2/bindings.rs new file mode 100644 index 00000000..690f8934 --- /dev/null +++ b/realtime/src/l2/bindings.rs @@ -0,0 +1,10 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + Anchor, + "src/l2/abi/Anchor.json" +); diff --git a/realtime/src/l2/execution_layer.rs b/realtime/src/l2/execution_layer.rs new file mode 100644 index 00000000..ccc17c4e --- /dev/null +++ b/realtime/src/l2/execution_layer.rs @@ -0,0 +1,369 @@ +use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; +use crate::shared_abi::bindings::{ + Bridge::{self, MessageSent}, + IBridge::Message, + SignalSent, +}; +use alloy::{ + consensus::{ + SignableTransaction, Transaction as AnchorTransaction, TxEnvelope, + transaction::Recovered, + }, + primitives::{Address, B256, Bytes, FixedBytes}, + providers::{DynProvider, Provider}, + rpc::types::Transaction, + signers::{Signature, Signer as AlloySigner}, + sol_types::SolEvent, +}; +use anyhow::Error; +use common::shared::{ + alloy_tools, execution_layer::ExecutionLayer as ExecutionLayerCommon, + l2_slot_info_v2::L2SlotInfoV2, +}; +use common::{ + crypto::{GOLDEN_TOUCH_ADDRESS, GOLDEN_TOUCH_PRIVATE_KEY}, + signer::Signer, +}; +use pacaya::l2::config::TaikoConfig; +use std::sync::Arc; +use tracing::{debug, info, warn}; + +pub struct L2ExecutionLayer { + common: ExecutionLayerCommon, + pub provider: DynProvider, + anchor: Anchor::AnchorInstance, + pub bridge: Bridge::BridgeInstance, + pub signal_service: Address, + pub chain_id: u64, + pub config: TaikoConfig, + l2_call_signer: Arc, +} + +impl L2ExecutionLayer { + pub async fn new(taiko_config: TaikoConfig) -> Result { + let provider = + alloy_tools::create_alloy_provider_without_wallet(&taiko_config.taiko_geth_url).await?; + + let chain_id = provider + .get_chain_id() + .await + .map_err(|e| anyhow::anyhow!("Failed to get chain ID: {}", e))?; + info!("L2 Chain ID: {}", chain_id); + + let anchor = Anchor::new(taiko_config.taiko_anchor_address, provider.clone()); + + let chain_id_string = format!("{}", chain_id); + let zeros_needed = 38usize.saturating_sub(chain_id_string.len()); + let bridge_address: Address = + format!("0x{}{}01", chain_id_string, "0".repeat(zeros_needed)).parse()?; + let bridge = Bridge::new(bridge_address, provider.clone()); + + let signal_service: Address = + format!("0x{}{}05", chain_id_string, "0".repeat(zeros_needed)).parse()?; + + let common = ExecutionLayerCommon::new(provider.clone()).await?; + let l2_call_signer = taiko_config.signer.clone(); + + Ok(Self { + common, + provider, + anchor, + bridge, + signal_service, + chain_id, + l2_call_signer, + config: taiko_config, + }) + } + + pub fn common(&self) -> &ExecutionLayerCommon { + &self.common + } + + pub async fn construct_anchor_tx( + &self, + l2_slot_info: &L2SlotInfoV2, + anchor_block_params: (Checkpoint, Vec>), + ) -> Result { + debug!( + "Constructing anchor transaction for block number: {}", + l2_slot_info.parent_id() + 1 + ); + let nonce = self + .provider + .get_transaction_count(GOLDEN_TOUCH_ADDRESS) + .block_id((*l2_slot_info.parent_hash()).into()) + .await + .map_err(|e| { + self.common + .chain_error("Failed to get transaction count", Some(&e.to_string())) + })?; + + let call_builder = self + .anchor + .anchorV4WithSignalSlots(anchor_block_params.0, anchor_block_params.1) + .gas(1_000_000) + .max_fee_per_gas(u128::from(l2_slot_info.base_fee())) + .max_priority_fee_per_gas(0) + .nonce(nonce) + .chain_id(self.chain_id); + + let typed_tx = call_builder + .into_transaction_request() + .build_typed_tx() + .map_err(|_| anyhow::anyhow!("AnchorTX: Failed to build typed transaction"))?; + + let tx_eip1559 = typed_tx + .eip1559() + .ok_or_else(|| anyhow::anyhow!("AnchorTX: Failed to extract EIP-1559 transaction"))?; + + let signature = self.sign_hash_deterministic(tx_eip1559.signature_hash())?; + let sig_tx = tx_eip1559.clone().into_signed(signature); + + let tx_envelope = TxEnvelope::from(sig_tx); + + debug!("AnchorTX transaction hash: {}", tx_envelope.tx_hash()); + + let tx = Transaction { + inner: Recovered::new_unchecked(tx_envelope, GOLDEN_TOUCH_ADDRESS), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + Ok(tx) + } + + fn sign_hash_deterministic(&self, hash: B256) -> Result { + common::crypto::fixed_k_signer::sign_hash_deterministic(GOLDEN_TOUCH_PRIVATE_KEY, hash) + } + + pub async fn transfer_eth_from_l2_to_l1( + &self, + _amount: u128, + _dest_chain_id: u64, + _preconfer_address: Address, + _bridge_relayer_fee: u64, + ) -> Result<(), Error> { + warn!("Implement bridge transfer logic here"); + Ok(()) + } + + pub async fn get_last_synced_anchor_block_id_from_geth(&self) -> Result { + self.get_latest_anchor_transaction_input() + .await + .map_err(|e| anyhow::anyhow!("get_last_synced_anchor_block_id_from_geth: {e}")) + .and_then(|input| Self::decode_anchor_id_from_tx_data(&input)) + } + + async fn get_latest_anchor_transaction_input(&self) -> Result, Error> { + let block = self.common.get_latest_block_with_txs().await?; + let anchor_tx = match block.transactions.as_transactions() { + Some(txs) => txs.first().ok_or_else(|| { + anyhow::anyhow!( + "get_latest_anchor_transaction_input: Cannot get anchor transaction from block {}", + block.number() + ) + })?, + None => { + return Err(anyhow::anyhow!( + "No transactions in L2 block {}", + block.number() + )); + } + }; + + Ok(anchor_tx.input().to_vec()) + } + + pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode anchor id from tx data: {}", e))?; + Ok(tx_data._checkpoint.blockNumber.to::()) + } + + pub fn get_anchor_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode anchor tx data: {}", e))?; + Ok(tx_data) + } + + pub async fn get_head_l1_origin(&self) -> Result { + let response = self + .provider + .raw_request::<_, serde_json::Value>( + std::borrow::Cow::Borrowed("taiko_headL1Origin"), + (), + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to fetch taiko_headL1Origin: {}", e))?; + + let hex_str = response + .get("blockID") + .or_else(|| response.get("blockId")) + .and_then(serde_json::Value::as_str) + .ok_or_else(|| { + anyhow::anyhow!("Missing or invalid block id in taiko_headL1Origin response") + })?; + + u64::from_str_radix(hex_str.trim_start_matches("0x"), 16) + .map_err(|e| anyhow::anyhow!("Failed to parse 'blockID' as u64: {}", e)) + } + + pub async fn get_last_synced_block_params_from_geth(&self) -> Result { + self.get_latest_anchor_transaction_input() + .await + .map_err(|e| anyhow::anyhow!("get_last_synced_block_params_from_geth: {e}")) + .and_then(|input| Self::decode_block_params_from_tx_data(&input)) + } + + pub fn decode_block_params_from_tx_data(data: &[u8]) -> Result { + let tx_data = + ::abi_decode_validate( + data, + ) + .map_err(|e| anyhow::anyhow!("Failed to decode block params from tx data: {}", e))?; + Ok(tx_data._checkpoint) + } +} + +// Surge: L2 EL ops for Bridge Handler + +pub trait L2BridgeHandlerOps { + async fn construct_l2_call_tx(&self, message: Message) -> Result; + async fn find_message_and_signal_slot( + &self, + block_id: u64, + ) -> Result)>, anyhow::Error>; +} + +impl L2BridgeHandlerOps for L2ExecutionLayer { + async fn construct_l2_call_tx(&self, message: Message) -> Result { + use alloy::signers::local::PrivateKeySigner; + use std::str::FromStr; + + debug!("Constructing bridge call transaction for L2 call"); + + let signer_address = self.l2_call_signer.get_address(); + + let nonce = self + .provider + .get_transaction_count(signer_address) + .await + .map_err(|e| anyhow::anyhow!("Failed to get nonce for bridge call: {}", e))?; + + let call_builder = self + .bridge + .processMessage(message, Bytes::new()) + .gas(1_000_000) + .max_fee_per_gas(1_000_000_000) + .max_priority_fee_per_gas(0) + .nonce(nonce) + .chain_id(self.chain_id); + + let typed_tx = call_builder + .into_transaction_request() + .build_typed_tx() + .map_err(|_| anyhow::anyhow!("L2 Call Tx: Failed to build typed transaction"))?; + + let tx_eip1559 = typed_tx + .eip1559() + .ok_or_else(|| anyhow::anyhow!("L2 Call Tx: Failed to extract EIP-1559 transaction"))? + .clone(); + + let signature = match self.l2_call_signer.as_ref() { + Signer::Web3signer(web3signer, address) => { + let signature_bytes = web3signer.sign_transaction(&tx_eip1559, *address).await?; + Signature::try_from(signature_bytes.as_slice()) + .map_err(|e| anyhow::anyhow!("Failed to parse signature: {}", e))? + } + Signer::PrivateKey(private_key, _) => { + let signer = PrivateKeySigner::from_str(private_key.as_str())?; + AlloySigner::sign_hash(&signer, &tx_eip1559.signature_hash()).await? + } + }; + + let sig_tx = tx_eip1559.into_signed(signature); + let tx_envelope = TxEnvelope::from(sig_tx); + + debug!("L2 Call transaction hash: {}", tx_envelope.tx_hash()); + + let tx = Transaction { + inner: Recovered::new_unchecked(tx_envelope, signer_address), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + Ok(tx) + } + + async fn find_message_and_signal_slot( + &self, + block_id: u64, + ) -> Result)>, anyhow::Error> { + use alloy::rpc::types::Filter; + + let bridge_address = *self.bridge.address(); + let signal_service_address = self.signal_service; + + let filter = Filter::new().from_block(block_id).to_block(block_id); + + let bridge_filter = filter + .clone() + .address(bridge_address) + .event_signature(MessageSent::SIGNATURE_HASH); + + let bridge_logs = self + .provider + .get_logs(&bridge_filter) + .await + .map_err(|e| anyhow::anyhow!("Failed to get MessageSent logs from bridge: {e}"))?; + + let signal_filter = filter + .address(signal_service_address) + .event_signature(SignalSent::SIGNATURE_HASH); + + let signal_logs = self.provider.get_logs(&signal_filter).await.map_err(|e| { + anyhow::anyhow!("Failed to get SignalSent logs from signal service: {e}") + })?; + + if bridge_logs.is_empty() || signal_logs.is_empty() { + return Ok(None); + } + + let message = { + let log = bridge_logs + .first() + .ok_or_else(|| anyhow::anyhow!("No bridge logs"))?; + let log_data = alloy::primitives::LogData::new_unchecked( + log.topics().to_vec(), + log.data().data.clone(), + ); + MessageSent::decode_log_data(&log_data) + .map_err(|e| anyhow::anyhow!("Failed to decode MessageSent event: {e}"))? + .message + }; + + let slot = { + let log = signal_logs + .first() + .ok_or_else(|| anyhow::anyhow!("No signal logs"))?; + let log_data = alloy::primitives::LogData::new_unchecked( + log.topics().to_vec(), + log.data().data.clone(), + ); + SignalSent::decode_log_data(&log_data) + .map_err(|e| anyhow::anyhow!("Failed to decode SignalSent event: {e}"))? + .slot + }; + + Ok(Some((message, slot))) + } +} diff --git a/realtime/src/l2/mod.rs b/realtime/src/l2/mod.rs new file mode 100644 index 00000000..b0e580e0 --- /dev/null +++ b/realtime/src/l2/mod.rs @@ -0,0 +1,3 @@ +pub mod bindings; +pub mod execution_layer; +pub mod taiko; diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs new file mode 100644 index 00000000..0d850ee7 --- /dev/null +++ b/realtime/src/l2/taiko.rs @@ -0,0 +1,327 @@ +#![allow(dead_code)] + +use super::execution_layer::L2ExecutionLayer; +use crate::l1::protocol_config::ProtocolConfig; +use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; +use crate::node::proposal_manager::l2_block_payload::L2BlockV2Payload; +use alloy::primitives::FixedBytes; +use alloy::{ + consensus::BlockHeader, + eips::BlockNumberOrTag, + primitives::{Address, B256}, + rpc::types::Block, +}; +use anyhow::Error; +use common::shared::l2_slot_info_v2::L2SlotContext; +use common::{ + l1::slot_clock::SlotClock, + l2::{ + engine::L2Engine, + taiko_driver::{ + OperationType, TaikoDriver, TaikoDriverConfig, + models::{BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, ExecutableData}, + }, + traits::Bridgeable, + }, + metrics::Metrics, + shared::{ + l2_slot_info_v2::L2SlotInfoV2, + l2_tx_lists::{self, PreBuiltTxList}, + }, +}; +use pacaya::l2::config::TaikoConfig; +use std::{sync::Arc, time::Duration}; +use taiko_alethia_reth::validation::ANCHOR_V3_V4_GAS_LIMIT; +use tracing::{debug, trace}; + +pub struct Taiko { + protocol_config: ProtocolConfig, + l2_execution_layer: Arc, + driver: Arc, + slot_clock: Arc, + coinbase: String, + l2_engine: L2Engine, +} + +impl Taiko { + pub async fn new( + slot_clock: Arc, + protocol_config: ProtocolConfig, + metrics: Arc, + taiko_config: TaikoConfig, + l2_engine: L2Engine, + ) -> Result { + let driver_config: TaikoDriverConfig = TaikoDriverConfig { + driver_url: taiko_config.driver_url.clone(), + rpc_driver_preconf_timeout: taiko_config.rpc_driver_preconf_timeout, + rpc_driver_status_timeout: taiko_config.rpc_driver_status_timeout, + jwt_secret_bytes: taiko_config.jwt_secret_bytes, + call_timeout: Duration::from_millis(taiko_config.preconf_heartbeat_ms / 2), + }; + Ok(Self { + protocol_config, + l2_execution_layer: Arc::new( + L2ExecutionLayer::new(taiko_config.clone()) + .await + .map_err(|e| anyhow::anyhow!("Failed to create L2ExecutionLayer: {}", e))?, + ), + driver: Arc::new(TaikoDriver::new(&driver_config, metrics).await?), + slot_clock, + coinbase: format!("0x{}", hex::encode(taiko_config.signer.get_address())), + l2_engine, + }) + } + + pub fn get_driver(&self) -> Arc { + self.driver.clone() + } + + pub fn l2_execution_layer(&self) -> Arc { + self.l2_execution_layer.clone() + } + + pub async fn get_pending_l2_tx_list_from_l2_engine( + &self, + base_fee: u64, + batches_ready_to_send: u64, + gas_limit: u64, + ) -> Result, Error> { + self.l2_engine + .get_pending_l2_tx_list(base_fee, batches_ready_to_send, gas_limit) + .await + } + + pub fn get_protocol_config(&self) -> &ProtocolConfig { + &self.protocol_config + } + + pub async fn get_latest_l2_block_id(&self) -> Result { + self.l2_execution_layer.common().get_latest_block_id().await + } + + pub async fn get_l2_block_by_number( + &self, + number: u64, + full_txs: bool, + ) -> Result { + self.l2_execution_layer + .common() + .get_block_by_number(number, full_txs) + .await + } + + pub async fn fetch_l2_blocks_until_latest( + &self, + start_block: u64, + full_txs: bool, + ) -> Result, Error> { + let start_time = std::time::Instant::now(); + let end_block = self.get_latest_l2_block_id().await?; + let mut blocks = Vec::with_capacity(usize::try_from(end_block - start_block + 1)?); + for block_number in start_block..=end_block { + let block = self.get_l2_block_by_number(block_number, full_txs).await?; + blocks.push(block); + } + debug!( + "Fetched L2 blocks from {} to {} in {} ms", + start_block, + end_block, + start_time.elapsed().as_millis() + ); + Ok(blocks) + } + + pub async fn get_transaction_by_hash( + &self, + hash: B256, + ) -> Result { + self.l2_execution_layer + .common() + .get_transaction_by_hash(hash) + .await + } + + pub async fn get_l2_block_hash(&self, number: u64) -> Result { + self.l2_execution_layer + .common() + .get_block_hash(number) + .await + } + + pub async fn get_l2_slot_info(&self) -> Result { + self.get_l2_slot_info_by_parent_block(BlockNumberOrTag::Latest) + .await + } + + pub async fn get_l2_slot_info_by_parent_block( + &self, + parent: BlockNumberOrTag, + ) -> Result { + let l2_slot_timestamp = self.slot_clock.get_l2_slot_begin_timestamp()?; + let parent_block = self + .l2_execution_layer + .common() + .get_block_header(parent) + .await?; + let parent_id = parent_block.header.number(); + let parent_hash = parent_block.header.hash; + let parent_gas_limit = parent_block.header.gas_limit(); + let parent_timestamp = parent_block.header.timestamp(); + + let parent_gas_limit_without_anchor = if parent_id != 0 { + parent_gas_limit + .checked_sub(ANCHOR_V3_V4_GAS_LIMIT) + .ok_or_else(|| { + anyhow::anyhow!( + "parent_gas_limit {} is less than ANCHOR_V3_V4_GAS_LIMIT {}", + parent_gas_limit, + ANCHOR_V3_V4_GAS_LIMIT + ) + })? + } else { + parent_gas_limit + }; + + let base_fee: u64 = self.get_base_fee(parent_block).await?; + + trace!( + timestamp = %l2_slot_timestamp, + parent_hash = %parent_hash, + parent_gas_limit_without_anchor = %parent_gas_limit_without_anchor, + parent_timestamp = %parent_timestamp, + base_fee = %base_fee, + "L2 slot info" + ); + + Ok(L2SlotInfoV2::new( + base_fee, + l2_slot_timestamp, + parent_id, + parent_hash, + parent_gas_limit_without_anchor, + parent_timestamp, + )) + } + + async fn get_base_fee(&self, parent_block: Block) -> Result { + if parent_block.header.number() == 0 { + return Ok(taiko_alethia_reth::eip4396::SHASTA_INITIAL_BASE_FEE); + } + + let grandparent_number = parent_block.header.number() - 1; + let grandparent_timestamp = self + .l2_execution_layer + .common() + .get_block_header(BlockNumberOrTag::Number(grandparent_number)) + .await? + .header + .timestamp(); + + let timestamp_diff = parent_block + .header + .timestamp() + .checked_sub(grandparent_timestamp) + .ok_or_else(|| anyhow::anyhow!("Timestamp underflow occurred"))?; + + let base_fee = taiko_alethia_reth::eip4396::calculate_next_block_eip4396_base_fee( + &parent_block.header.inner, + timestamp_diff, + ); + + Ok(base_fee) + } + + #[allow(clippy::too_many_arguments)] + pub async fn advance_head_to_new_l2_block( + &self, + l2_block_payload: L2BlockV2Payload, + l2_slot_context: &L2SlotContext, + anchor_signal_slots: Vec>, + operation_type: OperationType, + ) -> Result { + tracing::debug!( + "Submitting new L2 block to the Taiko driver with {} txs", + l2_block_payload.tx_list.len() + ); + + let anchor_block_params = ( + Checkpoint { + blockNumber: l2_block_payload.anchor_block_id.try_into()?, + blockHash: l2_block_payload.anchor_block_hash, + stateRoot: l2_block_payload.anchor_state_root, + }, + anchor_signal_slots, + ); + + let anchor_tx = self + .l2_execution_layer + .construct_anchor_tx(&l2_slot_context.info, anchor_block_params) + .await + .map_err(|e| { + anyhow::anyhow!( + "advance_head_to_new_l2_block: Failed to construct anchor tx: {}", + e + ) + })?; + let tx_list = std::iter::once(anchor_tx) + .chain(l2_block_payload.tx_list.into_iter()) + .collect::>(); + + let tx_list_bytes = l2_tx_lists::encode_and_compress(&tx_list)?; + + let sharing_pctg = self.protocol_config.get_basefee_sharing_pctg(); + + // RealTime: extra data only contains basefee_sharing_pctg (1 byte) + let extra_data = format!("0x{:02x}", sharing_pctg); + + let executable_data = ExecutableData { + base_fee_per_gas: l2_slot_context.info.base_fee(), + block_number: l2_slot_context.info.parent_id() + 1, + extra_data, + fee_recipient: l2_block_payload.coinbase.to_string(), + gas_limit: l2_block_payload.gas_limit_without_anchor + ANCHOR_V3_V4_GAS_LIMIT, + parent_hash: format!("0x{}", hex::encode(l2_slot_context.info.parent_hash())), + timestamp: l2_block_payload.timestamp_sec, + transactions: format!("0x{}", hex::encode(tx_list_bytes)), + }; + + let request_body = BuildPreconfBlockRequestBody { + executable_data, + end_of_sequencing: l2_slot_context.end_of_sequencing, + is_forced_inclusion: false, + }; + + self.driver + .preconf_blocks(request_body, operation_type) + .await + } + + pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { + L2ExecutionLayer::decode_anchor_id_from_tx_data(data) + } + + pub fn get_anchor_tx_data(data: &[u8]) -> Result { + L2ExecutionLayer::get_anchor_tx_data(data) + } +} + +impl Bridgeable for Taiko { + async fn get_balance(&self, address: Address) -> Result { + self.l2_execution_layer + .common() + .get_account_balance(address) + .await + } + + async fn transfer_eth_from_l2_to_l1( + &self, + amount: u128, + dest_chain_id: u64, + address: Address, + bridge_relayer_fee: u64, + ) -> Result<(), Error> { + self.l2_execution_layer + .transfer_eth_from_l2_to_l1(amount, dest_chain_id, address, bridge_relayer_fee) + .await + } +} diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs new file mode 100644 index 00000000..448bb299 --- /dev/null +++ b/realtime/src/lib.rs @@ -0,0 +1,159 @@ +mod chain_monitor; +mod l1; +mod l2; +mod node; +pub mod raiko; +mod shared_abi; +mod utils; + +use crate::utils::config::RealtimeConfig; +use anyhow::Error; +use common::{ + batch_builder::BatchBuilderConfig, + config::Config, + config::ConfigTrait, + fork_info::ForkInfo, + l1::{self as common_l1, traits::PreconferProvider}, + l2::engine::{L2Engine, L2EngineConfig}, + metrics, + utils::cancellation_token::CancellationToken, +}; +use l1::execution_layer::ExecutionLayer; +use node::Node; +use std::sync::Arc; +use tokio::sync::mpsc; +use tracing::info; + +pub async fn create_realtime_node( + config: Config, + metrics: Arc, + cancel_token: CancellationToken, + fork_info: ForkInfo, +) -> Result<(), Error> { + info!("Creating RealTime node"); + + if !config.disable_bridging { + return Err(anyhow::anyhow!( + "Bridging is not implemented. Exiting RealTime node creation." + )); + } + + let realtime_config = RealtimeConfig::read_env_variables() + .map_err(|e| anyhow::anyhow!("Failed to read RealTime configuration: {}", e))?; + info!("RealTime config: {}", realtime_config); + + let (transaction_error_sender, transaction_error_receiver) = mpsc::channel(100); + let ethereum_l1 = common_l1::ethereum_l1::EthereumL1::::new( + common_l1::config::EthereumL1Config::new(&config).await?, + l1::config::EthereumL1Config::try_from(realtime_config.clone())?, + transaction_error_sender, + metrics.clone(), + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create EthereumL1: {}", e))?; + + let ethereum_l1 = Arc::new(ethereum_l1); + + let taiko_config = pacaya::l2::config::TaikoConfig::new(&config) + .await + .map_err(|e| anyhow::anyhow!("Failed to create TaikoConfig: {}", e))?; + + let l2_engine = L2Engine::new(L2EngineConfig::new( + &config, + taiko_config.signer.get_address(), + )?) + .map_err(|e| anyhow::anyhow!("Failed to create L2Engine: {}", e))?; + let protocol_config = ethereum_l1.execution_layer.fetch_protocol_config().await?; + + let taiko = crate::l2::taiko::Taiko::new( + ethereum_l1.slot_clock.clone(), + protocol_config.clone(), + metrics.clone(), + taiko_config, + l2_engine, + ) + .await?; + let taiko = Arc::new(taiko); + + let node_config = pacaya::node::config::NodeConfig { + preconf_heartbeat_ms: config.preconf_heartbeat_ms, + handover_window_slots: 8, + handover_start_buffer_ms: 500, + l1_height_lag: 8, + propose_forced_inclusion: false, + simulate_not_submitting_at_the_end_of_epoch: false, + }; + + let max_blocks_per_batch = if config.max_blocks_per_batch == 0 { + taiko_protocol::shasta::constants::PROPOSAL_MAX_BLOCKS.try_into()? + } else { + config.max_blocks_per_batch + }; + + // Use 256-block limit for anchor offset + let max_anchor_height_offset = 256u64; + + let batch_builder_config = BatchBuilderConfig { + max_bytes_size_of_batch: config.max_bytes_size_of_batch, + max_blocks_per_batch, + l1_slot_duration_sec: config.l1_slot_duration_sec, + max_time_shift_between_blocks_sec: config.max_time_shift_between_blocks_sec, + max_anchor_height_offset: max_anchor_height_offset + - config.max_anchor_height_offset_reduction, + default_coinbase: ethereum_l1.execution_layer.get_preconfer_alloy_address(), + preconf_min_txs: config.preconf_min_txs, + preconf_max_skipped_l2_slots: config.preconf_max_skipped_l2_slots, + }; + + // Initialize chain monitor for ProposedAndProved events + let chain_monitor = Arc::new( + chain_monitor::RealtimeChainMonitor::new( + config + .l1_rpc_urls + .first() + .expect("L1 RPC URL is required") + .clone(), + config.taiko_geth_rpc_url.clone(), + realtime_config.realtime_inbox, + cancel_token.clone(), + "ProposedAndProved", + chain_monitor::print_proposed_and_proved_info, + ) + .map_err(|e| anyhow::anyhow!("Failed to create RealtimeChainMonitor: {}", e))?, + ); + chain_monitor + .start() + .await + .map_err(|e| anyhow::anyhow!("Failed to start RealtimeChainMonitor: {}", e))?; + + // Read the last proposal hash from L1 + let parent_proposal_hash = ethereum_l1 + .execution_layer + .get_last_proposal_hash() + .await?; + info!("Initial parentProposalHash: {}", parent_proposal_hash); + + let raiko_client = raiko::RaikoClient::new(&realtime_config); + + let node = Node::new( + node_config, + cancel_token.clone(), + ethereum_l1.clone(), + taiko.clone(), + metrics.clone(), + batch_builder_config, + transaction_error_receiver, + fork_info, + parent_proposal_hash, + raiko_client, + protocol_config.basefee_sharing_pctg, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; + + node.entrypoint() + .await + .map_err(|e| anyhow::anyhow!("Failed to start Node: {}", e))?; + + Ok(()) +} diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs new file mode 100644 index 00000000..e8ad59ea --- /dev/null +++ b/realtime/src/node/mod.rs @@ -0,0 +1,493 @@ +pub mod proposal_manager; +use anyhow::Error; +use common::{ + fork_info::ForkInfo, + l1::{ethereum_l1::EthereumL1, transaction_error::TransactionError}, + l2::taiko_driver::{TaikoDriver, models::BuildPreconfBlockResponse}, + metrics::Metrics, + shared::{l2_slot_info_v2::L2SlotContext, l2_tx_lists::PreBuiltTxList}, + utils::{self as common_utils, cancellation_token::CancellationToken}, +}; +use pacaya::node::operator::Status as OperatorStatus; +use pacaya::node::{config::NodeConfig, operator::Operator}; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +use crate::l1::execution_layer::ExecutionLayer; +use crate::l2::taiko::Taiko; +use common::batch_builder::BatchBuilderConfig; +use common::l1::traits::PreconferProvider; +use common::shared::head_verifier::HeadVerifier; +use common::shared::l2_slot_info_v2::L2SlotInfoV2; +use proposal_manager::BatchManager; + +use tokio::{ + sync::mpsc::{Receiver, error::TryRecvError}, + time::{Duration, sleep}, +}; + +pub struct Node { + config: NodeConfig, + cancel_token: CancellationToken, + ethereum_l1: Arc>, + taiko: Arc, + watchdog: common_utils::watchdog::Watchdog, + operator: Operator, + metrics: Arc, + proposal_manager: BatchManager, + head_verifier: HeadVerifier, + transaction_error_channel: Receiver, +} + +impl Node { + #[allow(clippy::too_many_arguments)] + pub async fn new( + config: NodeConfig, + cancel_token: CancellationToken, + ethereum_l1: Arc>, + taiko: Arc, + metrics: Arc, + batch_builder_config: BatchBuilderConfig, + transaction_error_channel: Receiver, + fork_info: ForkInfo, + parent_proposal_hash: alloy::primitives::B256, + raiko_client: crate::raiko::RaikoClient, + basefee_sharing_pctg: u8, + ) -> Result { + let operator = Operator::new( + ethereum_l1.execution_layer.clone(), + ethereum_l1.slot_clock.clone(), + taiko.get_driver(), + config.handover_window_slots, + config.handover_start_buffer_ms, + config.simulate_not_submitting_at_the_end_of_epoch, + cancel_token.clone(), + fork_info.clone(), + ) + .map_err(|e| anyhow::anyhow!("Failed to create Operator: {}", e))?; + let watchdog = common_utils::watchdog::Watchdog::new( + cancel_token.clone(), + ethereum_l1.slot_clock.get_l2_slots_per_epoch() / 2, + ); + let head_verifier = HeadVerifier::default(); + + let proposal_manager = BatchManager::new( + config.l1_height_lag, + batch_builder_config, + ethereum_l1.clone(), + taiko.clone(), + metrics.clone(), + cancel_token.clone(), + parent_proposal_hash, + raiko_client, + basefee_sharing_pctg, + ) + .await + .map_err(|e| anyhow::anyhow!("Failed to create BatchManager: {}", e))?; + + let start = std::time::Instant::now(); + common::blob::build_default_kzg_settings(); + info!( + "Setup build_default_kzg_settings in {} milliseconds", + start.elapsed().as_millis() + ); + + Ok(Self { + config, + cancel_token, + ethereum_l1, + taiko, + watchdog, + operator, + metrics, + proposal_manager, + head_verifier, + transaction_error_channel, + }) + } + + pub async fn entrypoint(mut self) -> Result<(), Error> { + info!("Starting RealTime node"); + + if let Err(err) = self.warmup().await { + error!("Failed to warm up node: {}. Shutting down.", err); + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!(err)); + } + + info!("Node warmup successful"); + + tokio::spawn(async move { + self.preconfirmation_loop().await; + }); + + Ok(()) + } + + async fn preconfirmation_loop(&mut self) { + debug!("Main preconfirmation loop started"); + common_utils::synchronization::synchronize_with_l1_slot_start(&self.ethereum_l1).await; + + let mut interval = + tokio::time::interval(Duration::from_millis(self.config.preconf_heartbeat_ms)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + interval.tick().await; + + if self.cancel_token.is_cancelled() { + info!("Shutdown signal received, exiting main loop..."); + return; + } + + if let Err(err) = self.main_block_preconfirmation_step().await { + error!("Failed to execute main block preconfirmation step: {}", err); + self.watchdog.increment(); + } else { + self.watchdog.reset(); + } + } + } + + async fn main_block_preconfirmation_step(&mut self) -> Result<(), Error> { + let (l2_slot_info, current_status, pending_tx_list) = + self.get_slot_info_and_status().await?; + + // Always poll for completed async submissions (non-blocking) + if let Some(result) = self.proposal_manager.poll_submission_result() { + match result { + Ok(()) => info!("Async submission completed successfully"), + Err(e) => { + if let Some(transaction_error) = e.downcast_ref::() { + self.handle_transaction_error( + transaction_error, + ¤t_status, + &l2_slot_info, + ) + .await?; + } else { + error!("Async submission failed: {}", e); + } + } + } + } + + self.check_transaction_error_channel(¤t_status, &l2_slot_info) + .await?; + + if current_status.is_preconfirmation_start_slot() { + self.head_verifier + .set(l2_slot_info.parent_id(), *l2_slot_info.parent_hash()) + .await; + } + + // Preconfirmation phase — runs even while proof is being fetched async + if current_status.is_preconfer() && current_status.is_driver_synced() { + if !self + .head_verifier + .verify(l2_slot_info.parent_id(), l2_slot_info.parent_hash()) + .await + { + self.head_verifier.log_error().await; + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!( + "Unexpected L2 head detected. Restarting node..." + )); + } + + let l2_slot_context = L2SlotContext { + info: l2_slot_info.clone(), + end_of_sequencing: current_status.is_end_of_sequencing(), + allow_forced_inclusion: false, + }; + + if self + .proposal_manager + .should_new_block_be_created(&pending_tx_list, &l2_slot_context) + { + if pending_tx_list + .as_ref() + .is_some_and(|pre_built_list| pre_built_list.tx_list.len() != 0) + || self.proposal_manager.has_pending_user_ops().await + { + let preconfed_block = self + .proposal_manager + .preconfirm_block(pending_tx_list, &l2_slot_context) + .await?; + + self.verify_preconfed_block(preconfed_block).await?; + } + } + } + + // Submission phase — non-blocking: starts async proof fetch + L1 tx + if current_status.is_submitter() + && !self.proposal_manager.is_submission_in_progress() + { + if let Err(err) = self + .proposal_manager + .try_start_submission(current_status.is_preconfer()) + .await + { + if let Some(transaction_error) = err.downcast_ref::() { + self.handle_transaction_error( + transaction_error, + ¤t_status, + &l2_slot_info, + ) + .await?; + } else { + return Err(err); + } + } + } + + // Cleanup + if !current_status.is_submitter() && !current_status.is_preconfer() { + if self.proposal_manager.has_batches() { + error!( + "Resetting batch builder. has batches: {}", + self.proposal_manager.has_batches(), + ); + self.proposal_manager.reset_builder().await?; + } + } + + Ok(()) + } + + async fn handle_transaction_error( + &mut self, + error: &TransactionError, + _current_status: &OperatorStatus, + _l2_slot_info: &L2SlotInfoV2, + ) -> Result<(), Error> { + match error { + TransactionError::ReanchorRequired => { + warn!("Unexpected ReanchorRequired error received"); + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "ReanchorRequired error received unexpectedly, exiting" + )) + } + TransactionError::NotConfirmed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "Transaction not confirmed for a long time, exiting" + )) + } + TransactionError::UnsupportedTransactionType => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "Unsupported transaction type" + )) + } + TransactionError::GetBlockNumberFailed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Failed to get block number from L1")) + } + TransactionError::EstimationTooEarly => { + warn!("Transaction estimation too early"); + Ok(()) + } + TransactionError::InsufficientFunds => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!( + "Transaction reverted with InsufficientFunds error" + )) + } + TransactionError::EstimationFailed => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Transaction estimation failed, exiting")) + } + TransactionError::TransactionReverted => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Transaction reverted, exiting")) + } + TransactionError::OldestForcedInclusionDue => { + // No forced inclusions in RealTime, but handle gracefully + warn!("OldestForcedInclusionDue received in RealTime mode, ignoring"); + Ok(()) + } + TransactionError::NotTheOperatorInCurrentEpoch => { + warn!("Propose batch transaction executed too late."); + Ok(()) + } + } + } + + async fn get_slot_info_and_status( + &mut self, + ) -> Result<(L2SlotInfoV2, OperatorStatus, Option), Error> { + let l2_slot_info = self.taiko.get_l2_slot_info().await; + let current_status = match &l2_slot_info { + Ok(info) => self.operator.get_status(info).await, + Err(_) => Err(anyhow::anyhow!("Failed to get L2 slot info")), + }; + + let gas_limit_without_anchor = match &l2_slot_info { + Ok(info) => info.parent_gas_limit_without_anchor(), + Err(_) => { + error!("Failed to get L2 slot info set gas_limit_without_anchor to 0"); + 0u64 + } + }; + + let pending_tx_list = if gas_limit_without_anchor != 0 { + let batches_ready_to_send = 0; + match &l2_slot_info { + Ok(info) => { + self.taiko + .get_pending_l2_tx_list_from_l2_engine( + info.base_fee(), + batches_ready_to_send, + gas_limit_without_anchor, + ) + .await + } + Err(_) => Err(anyhow::anyhow!("Failed to get L2 slot info")), + } + } else { + Ok(None) + }; + + self.print_current_slots_info( + ¤t_status, + &pending_tx_list, + &l2_slot_info, + self.proposal_manager.get_number_of_batches(), + )?; + + Ok((l2_slot_info?, current_status?, pending_tx_list?)) + } + + async fn verify_preconfed_block( + &self, + l2_block: BuildPreconfBlockResponse, + ) -> Result<(), Error> { + if !self + .head_verifier + .verify_next_and_set(l2_block.number, l2_block.hash, l2_block.parent_hash) + .await + { + self.head_verifier.log_error().await; + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!( + "Unexpected L2 head after preconfirmation. Restarting node..." + )); + } + Ok(()) + } + + async fn check_transaction_error_channel( + &mut self, + current_status: &OperatorStatus, + l2_slot_info: &L2SlotInfoV2, + ) -> Result<(), Error> { + match self.transaction_error_channel.try_recv() { + Ok(error) => { + self.handle_transaction_error(&error, current_status, l2_slot_info) + .await + } + Err(err) => match err { + TryRecvError::Empty => Ok(()), + TryRecvError::Disconnected => { + self.cancel_token.cancel_on_critical_error(); + Err(anyhow::anyhow!("Transaction error channel disconnected")) + } + }, + } + } + + fn print_current_slots_info( + &self, + current_status: &Result, + pending_tx_list: &Result, Error>, + l2_slot_info: &Result, + batches_number: u64, + ) -> Result<(), Error> { + let l1_slot = self.ethereum_l1.slot_clock.get_current_slot()?; + info!(target: "heartbeat", + "| Epoch: {:<6} | Slot: {:<2} | L2 Slot: {:<2} | {}{} Batches: {batches_number} | {} |", + self.ethereum_l1.slot_clock.get_epoch_from_slot(l1_slot), + self.ethereum_l1.slot_clock.slot_of_epoch(l1_slot), + self.ethereum_l1 + .slot_clock + .get_current_l2_slot_within_l1_slot()?, + if let Ok(pending_tx_list) = pending_tx_list { + format!( + "Txs: {:<4} |", + pending_tx_list + .as_ref() + .map_or(0, |tx_list| tx_list.tx_list.len()) + ) + } else { + "Txs: unknown |".to_string() + }, + if let Ok(l2_slot_info) = l2_slot_info { + format!( + " Fee: {:<7} | L2: {:<6} | Time: {:<10} | Hash: {} |", + l2_slot_info.base_fee(), + l2_slot_info.parent_id(), + l2_slot_info.slot_timestamp(), + &l2_slot_info.parent_hash().to_string()[..8] + ) + } else { + " L2 slot info unknown |".to_string() + }, + if let Ok(status) = current_status { + status.to_string() + } else { + "Unknown".to_string() + }, + ); + Ok(()) + } + + async fn warmup(&mut self) -> Result<(), Error> { + info!("Warmup RealTime node"); + + // Wait for RealTimeInbox activation (lastProposalHash != 0) + loop { + let hash = self + .ethereum_l1 + .execution_layer + .get_last_proposal_hash() + .await?; + if hash != alloy::primitives::B256::ZERO { + info!("RealTimeInbox is active, lastProposalHash: {}", hash); + break; + } + warn!("RealTimeInbox not yet activated. Waiting..."); + sleep(Duration::from_secs(12)).await; + } + + // Wait for the last sent transaction to be executed + self.wait_for_sent_transactions().await?; + + Ok(()) + } + + async fn wait_for_sent_transactions(&self) -> Result<(), Error> { + loop { + let nonce_latest: u64 = self + .ethereum_l1 + .execution_layer + .get_preconfer_nonce_latest() + .await?; + let nonce_pending: u64 = self + .ethereum_l1 + .execution_layer + .get_preconfer_nonce_pending() + .await?; + if nonce_pending == nonce_latest { + break; + } + debug!( + "Waiting for sent transactions to be executed. Nonce Latest: {nonce_latest}, Nonce Pending: {nonce_pending}" + ); + sleep(Duration::from_secs(6)).await; + } + + Ok(()) + } +} diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs new file mode 100644 index 00000000..70b0c65d --- /dev/null +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -0,0 +1,252 @@ +use crate::l1::execution_layer::ExecutionLayer; +use crate::node::proposal_manager::bridge_handler::{UserOpStatus, UserOpStatusStore}; +use crate::node::proposal_manager::proposal::Proposal; +use crate::raiko::{RaikoCheckpoint, RaikoClient, RaikoProofRequest}; +use alloy::primitives::B256; +use anyhow::Error; +use common::l1::ethereum_l1::EthereumL1; +use std::sync::Arc; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use tracing::info; + +pub struct SubmissionResult { + pub new_parent_proposal_hash: B256, +} + +struct InFlightSubmission { + result_rx: oneshot::Receiver>, + handle: JoinHandle<()>, +} + +pub struct AsyncSubmitter { + in_flight: Option, + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, +} + +impl AsyncSubmitter { + pub fn new( + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, + ) -> Self { + Self { + in_flight: None, + raiko_client, + basefee_sharing_pctg, + ethereum_l1, + } + } + + pub fn is_busy(&self) -> bool { + self.in_flight.is_some() + } + + /// Non-blocking check for completed submission. Returns None if idle or still in progress. + pub fn try_recv_result(&mut self) -> Option> { + let in_flight = self.in_flight.as_mut()?; + match in_flight.result_rx.try_recv() { + Ok(result) => { + self.in_flight = None; + Some(result) + } + Err(oneshot::error::TryRecvError::Empty) => None, + Err(oneshot::error::TryRecvError::Closed) => { + self.in_flight = None; + Some(Err(anyhow::anyhow!( + "Submission task panicked or was dropped" + ))) + } + } + } + + /// Submit a proposal asynchronously. Spawns a background task that fetches the ZK proof + /// from Raiko and then sends the L1 transaction. Results are retrieved via `try_recv_result`. + pub fn submit(&mut self, proposal: Proposal, status_store: Option) { + assert!( + !self.is_busy(), + "Cannot submit while another submission is in flight" + ); + + let (result_tx, result_rx) = oneshot::channel(); + let raiko_client = self.raiko_client.clone(); + let basefee_sharing_pctg = self.basefee_sharing_pctg; + let ethereum_l1 = self.ethereum_l1.clone(); + + let handle = tokio::spawn(async move { + let result = submission_task( + proposal, + &raiko_client, + basefee_sharing_pctg, + ethereum_l1, + status_store, + ) + .await; + let _ = result_tx.send(result); + }); + + self.in_flight = Some(InFlightSubmission { result_rx, handle }); + } + + pub fn abort(&mut self) { + if let Some(in_flight) = self.in_flight.take() { + in_flight.handle.abort(); + } + } +} + +async fn submission_task( + mut proposal: Proposal, + raiko_client: &RaikoClient, + basefee_sharing_pctg: u8, + ethereum_l1: Arc>, + status_store: Option, +) -> Result { + // Step 1: Fetch ZK proof from Raiko + if proposal.zk_proof.is_none() { + info!( + "Fetching ZK proof from Raiko for batch with {} blocks", + proposal.l2_blocks.len() + ); + + let l2_block_numbers: Vec = (proposal.checkpoint.blockNumber.to::() + - u64::try_from(proposal.l2_blocks.len())? + 1 + ..=proposal.checkpoint.blockNumber.to::()) + .collect(); + + let request = RaikoProofRequest { + l2_block_numbers, + proof_type: raiko_client.proof_type.clone(), + max_anchor_block_number: proposal.max_anchor_block_number, + parent_proposal_hash: format!("0x{}", hex::encode(proposal.parent_proposal_hash)), + basefee_sharing_pctg, + network: None, + l1_network: None, + prover: None, + signal_slots: proposal + .signal_slots + .iter() + .map(|s| format!("0x{}", hex::encode(s))) + .collect(), + sources: vec![], + checkpoint: Some(RaikoCheckpoint { + block_number: proposal.checkpoint.blockNumber.to::(), + block_hash: format!("0x{}", hex::encode(proposal.checkpoint.blockHash)), + state_root: format!("0x{}", hex::encode(proposal.checkpoint.stateRoot)), + }), + blob_proof_type: "ProofOfEquivalence".to_string(), + }; + + let proof = raiko_client.get_proof(&request).await?; + proposal.zk_proof = Some(proof); + } + + // Step 2: Send L1 transaction + let user_op_ids: Vec = proposal.user_ops.iter().map(|op| op.id).collect(); + let has_user_ops = !user_op_ids.is_empty() && status_store.is_some(); + + let (tx_hash_sender, tx_hash_receiver) = if has_user_ops { + let (s, r) = tokio::sync::oneshot::channel(); + (Some(s), Some(r)) + } else { + (None, None) + }; + let (tx_result_sender, tx_result_receiver) = if has_user_ops { + let (s, r) = tokio::sync::oneshot::channel(); + (Some(s), Some(r)) + } else { + (None, None) + }; + + if let Err(err) = ethereum_l1 + .execution_layer + .send_batch_to_l1(proposal.clone(), tx_hash_sender, tx_result_sender) + .await + { + // Mark user ops as rejected on failure + if let Some(ref store) = status_store { + let reason = format!("L1 multicall failed: {}", err); + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::Rejected { + reason: reason.clone(), + }, + ); + } + } + return Err(err); + } + + // Step 3: Compute new parent proposal hash + let new_parent_proposal_hash = alloy::primitives::keccak256( + &alloy::sol_types::SolValue::abi_encode(&( + proposal.parent_proposal_hash, + proposal.max_anchor_block_number, + proposal.max_anchor_block_hash, + )), + ); + + // Step 4: Spawn user-op status tracker + if let (Some(hash_rx), Some(result_rx), Some(store)) = + (tx_hash_receiver, tx_result_receiver, status_store) + { + tokio::spawn(async move { + let tx_hash = match hash_rx.await { + Ok(tx_hash) => { + for id in &user_op_ids { + store.set(*id, &UserOpStatus::Processing { tx_hash }); + } + Some(tx_hash) + } + Err(_) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "Transaction failed to send".to_string(), + }, + ); + } + None + } + }; + + if tx_hash.is_some() { + match result_rx.await { + Ok(true) => { + for id in &user_op_ids { + store.set(*id, &UserOpStatus::Executed); + } + } + Ok(false) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "L1 multicall reverted".to_string(), + }, + ); + } + } + Err(_) => { + for id in &user_op_ids { + store.set( + *id, + &UserOpStatus::Rejected { + reason: "Transaction monitor dropped".to_string(), + }, + ); + } + } + } + } + }); + } + + Ok(SubmissionResult { + new_parent_proposal_hash, + }) +} diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs new file mode 100644 index 00000000..8e1aee6b --- /dev/null +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -0,0 +1,327 @@ +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + l2_block_payload::L2BlockV2Payload, + proposal::Proposal, +}; +use alloy::primitives::{B256, FixedBytes}; +use anyhow::Error; +use common::metrics::Metrics; +use common::{ + batch_builder::BatchBuilderConfig, + shared::l2_block_v2::L2BlockV2Draft, +}; +use common::{ + l1::slot_clock::SlotClock, + shared::anchor_block_info::AnchorBlockInfo, +}; +use std::{collections::VecDeque, sync::Arc}; +use tracing::{debug, info, trace, warn}; + +pub struct BatchBuilder { + config: BatchBuilderConfig, + proposals_to_send: VecDeque, + current_proposal: Option, + slot_clock: Arc, + metrics: Arc, +} + +impl BatchBuilder { + pub fn new( + config: BatchBuilderConfig, + slot_clock: Arc, + metrics: Arc, + ) -> Self { + Self { + config, + proposals_to_send: VecDeque::new(), + current_proposal: None, + slot_clock, + metrics, + } + } + + pub fn get_config(&self) -> &BatchBuilderConfig { + &self.config + } + + pub fn can_consume_l2_block(&mut self, l2_draft_block: &L2BlockV2Draft) -> bool { + let is_time_shift_expired = self.is_time_shift_expired(l2_draft_block.timestamp_sec); + self.current_proposal.as_mut().is_some_and(|batch| { + let new_block_count = match u16::try_from(batch.l2_blocks.len() + 1) { + Ok(n) => n, + Err(_) => return false, + }; + + let mut new_total_bytes = + batch.total_bytes + l2_draft_block.prebuilt_tx_list.bytes_length; + + if !self.config.is_within_bytes_limit(new_total_bytes) { + batch.compress(); + new_total_bytes = batch.total_bytes + l2_draft_block.prebuilt_tx_list.bytes_length; + if !self.config.is_within_bytes_limit(new_total_bytes) { + let start = std::time::Instant::now(); + let mut batch_clone = batch.clone(); + batch_clone.add_l2_draft_block(l2_draft_block.clone()); + batch_clone.compress(); + new_total_bytes = batch_clone.total_bytes; + debug!( + "can_consume_l2_block: Second compression took {} ms, new total bytes: {}", + start.elapsed().as_millis(), + new_total_bytes + ); + } + } + + self.config.is_within_bytes_limit(new_total_bytes) + && self.config.is_within_block_limit(new_block_count) + && !is_time_shift_expired + }) + } + + pub fn create_new_batch( + &mut self, + anchor_block: AnchorBlockInfo, + parent_proposal_hash: B256, + ) { + self.finalize_current_batch(); + + self.current_proposal = Some(Proposal { + l2_blocks: vec![], + total_bytes: 0, + coinbase: self.config.default_coinbase, + max_anchor_block_number: anchor_block.id(), + max_anchor_block_hash: anchor_block.hash(), + checkpoint: Checkpoint::default(), + parent_proposal_hash, + user_ops: vec![], + signal_slots: vec![], + l1_calls: vec![], + zk_proof: None, + }); + } + + pub fn add_l2_draft_block( + &mut self, + l2_draft_block: L2BlockV2Draft, + ) -> Result { + if let Some(current_proposal) = self.current_proposal.as_mut() { + let payload = current_proposal.add_l2_draft_block(l2_draft_block); + + debug!( + "Added L2 draft block to batch: l2 blocks: {}, total bytes: {}", + current_proposal.l2_blocks.len(), + current_proposal.total_bytes + ); + Ok(payload) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn add_user_op(&mut self, user_op_data: UserOp) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.user_ops.push(user_op_data.clone()); + info!("Added user op: {:?}", user_op_data); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn add_signal_slot(&mut self, signal_slot: FixedBytes<32>) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.signal_slots.push(signal_slot); + info!("Added signal slot: {:?}", signal_slot); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn add_l1_call(&mut self, l1_call: L1Call) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.l1_calls.push(l1_call.clone()); + info!("Added L1 call: {:?}", l1_call); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn set_proposal_checkpoint(&mut self, checkpoint: Checkpoint) -> Result<&Proposal, Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.checkpoint = checkpoint.clone(); + debug!("Update proposal checkpoint: {:?}", checkpoint); + Ok(current_proposal) + } else { + Err(anyhow::anyhow!("No current batch")) + } + } + + pub fn get_current_proposal_last_block_timestamp(&self) -> Option { + self.current_proposal + .as_ref() + .and_then(|p| p.l2_blocks.last().map(|b| b.timestamp_sec)) + } + + pub fn remove_last_l2_block(&mut self) { + if let Some(current_proposal) = self.current_proposal.as_mut() { + let removed_block = current_proposal.l2_blocks.pop(); + if let Some(removed_block) = removed_block { + current_proposal.total_bytes -= removed_block.prebuilt_tx_list.bytes_length; + if current_proposal.l2_blocks.is_empty() { + self.current_proposal = None; + } + debug!( + "Removed L2 block from batch: {} txs, {} bytes", + removed_block.prebuilt_tx_list.tx_list.len(), + removed_block.prebuilt_tx_list.bytes_length + ); + } + } + } + + pub fn is_empty(&self) -> bool { + trace!( + "batch_builder::is_empty: current_proposal is none: {}, proposals_to_send len: {}", + self.current_proposal.is_none(), + self.proposals_to_send.len() + ); + self.current_proposal.is_none() && self.proposals_to_send.is_empty() + } + + /// Finalize the current batch if appropriate for submission. + pub fn finalize_if_needed(&mut self, submit_only_full_batches: bool) { + if self.current_proposal.is_some() + && (!submit_only_full_batches + || !self.config.is_within_block_limit( + u16::try_from( + self.current_proposal + .as_ref() + .map(|b| b.l2_blocks.len()) + .unwrap_or(0), + ) + .unwrap_or(u16::MAX) + + 1, + )) + { + self.finalize_current_batch(); + } + } + + /// Pop the oldest finalized batch, stamping it with the current parent_proposal_hash. + pub fn pop_oldest_batch(&mut self, parent_proposal_hash: B256) -> Option { + if let Some(mut batch) = self.proposals_to_send.pop_front() { + batch.parent_proposal_hash = parent_proposal_hash; + Some(batch) + } else { + None + } + } + + /// Re-queue a batch at the front (e.g., when submission couldn't start). + pub fn push_front_batch(&mut self, batch: Proposal) { + self.proposals_to_send.push_front(batch); + } + + pub fn is_time_shift_expired(&self, current_l2_slot_timestamp: u64) -> bool { + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + return current_l2_slot_timestamp - last_block.timestamp_sec + > self.config.max_time_shift_between_blocks_sec; + } + false + } + + pub fn is_time_shift_between_blocks_expiring(&self, current_l2_slot_timestamp: u64) -> bool { + if let Some(current_proposal) = self.current_proposal.as_ref() { + if let Some(last_block) = current_proposal.l2_blocks.last() { + if current_l2_slot_timestamp < last_block.timestamp_sec { + warn!("Preconfirmation timestamp is before the last block timestamp"); + return false; + } + return self.is_the_last_l1_slot_to_add_an_empty_l2_block( + current_l2_slot_timestamp, + last_block.timestamp_sec, + ); + } + } + false + } + + fn is_the_last_l1_slot_to_add_an_empty_l2_block( + &self, + current_l2_slot_timestamp: u64, + last_block_timestamp: u64, + ) -> bool { + current_l2_slot_timestamp - last_block_timestamp + >= self.config.max_time_shift_between_blocks_sec - self.config.l1_slot_duration_sec + } + + pub fn is_greater_than_max_anchor_height_offset(&self) -> Result { + if let Some(current_proposal) = self.current_proposal.as_ref() { + let current_l1_block = self.slot_clock.get_current_slot()?; + if current_l1_block > current_proposal.max_anchor_block_number { + let offset = current_l1_block - current_proposal.max_anchor_block_number; + return Ok(offset > self.config.max_anchor_height_offset); + } + } + Ok(false) + } + + fn is_empty_block_required(&self, preconfirmation_timestamp: u64) -> bool { + self.is_time_shift_between_blocks_expiring(preconfirmation_timestamp) + } + + pub fn get_number_of_batches(&self) -> u64 { + self.proposals_to_send.len() as u64 + + if self.current_proposal.is_some() { + 1 + } else { + 0 + } + } + + pub fn finalize_current_batch(&mut self) { + if let Some(batch) = self.current_proposal.take() + && !batch.l2_blocks.is_empty() + { + self.proposals_to_send.push_back(batch); + } + } + + pub fn should_new_block_be_created( + &self, + pending_tx_list: &Option, + current_l2_slot_timestamp: u64, + end_of_sequencing: bool, + ) -> bool { + let number_of_pending_txs = pending_tx_list + .as_ref() + .map(|tx_list| tx_list.tx_list.len()) + .unwrap_or(0) as u64; + + if self.is_empty_block_required(current_l2_slot_timestamp) || end_of_sequencing { + return true; + } + + if number_of_pending_txs >= self.config.preconf_min_txs { + return true; + } + + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + let number_of_l2_slots = + (current_l2_slot_timestamp.saturating_sub(last_block.timestamp_sec)) * 1000 + / self.slot_clock.get_preconf_heartbeat_ms(); + return number_of_l2_slots > self.config.preconf_max_skipped_l2_slots; + } + + true + } +} + +use common::shared::l2_tx_lists::PreBuiltTxList; diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs new file mode 100644 index 00000000..e544cbfc --- /dev/null +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -0,0 +1,255 @@ +use crate::l2::taiko::Taiko; +use crate::shared_abi::bindings::IBridge::Message; +use crate::{ + l1::execution_layer::{ExecutionLayer, L1BridgeHandlerOps}, + l2::execution_layer::L2BridgeHandlerOps, +}; +use alloy::primitives::{Address, Bytes, FixedBytes}; +use alloy::signers::Signer; +use anyhow::Result; +use common::{l1::ethereum_l1::EthereumL1, utils::cancellation_token::CancellationToken}; +use jsonrpsee::server::{RpcModule, ServerBuilder}; +use serde::{Deserialize, Serialize}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::mpsc::{self, Receiver}; +use tracing::{error, info, warn}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "status")] +pub enum UserOpStatus { + Pending, + Processing { tx_hash: FixedBytes<32> }, + Rejected { reason: String }, + Executed, +} + +/// Disk-backed user op status store using sled. +#[derive(Clone)] +pub struct UserOpStatusStore { + db: sled::Db, +} + +impl UserOpStatusStore { + pub fn open(path: &str) -> Result { + let db = sled::open(path) + .map_err(|e| anyhow::anyhow!("Failed to open user op status store: {}", e))?; + Ok(Self { db }) + } + + pub fn set(&self, id: u64, status: &UserOpStatus) { + if let Ok(value) = serde_json::to_vec(status) { + if let Err(e) = self.db.insert(id.to_be_bytes(), value) { + error!("Failed to write user op status: {}", e); + } + } + } + + pub fn get(&self, id: u64) -> Option { + self.db + .get(id.to_be_bytes()) + .ok() + .flatten() + .and_then(|v| serde_json::from_slice(&v).ok()) + } + + pub fn remove(&self, id: u64) { + let _ = self.db.remove(id.to_be_bytes()); + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct UserOp { + #[serde(default)] + pub id: u64, + pub submitter: Address, + pub calldata: Bytes, +} + +// Data required to build the L1 call transaction initiated by an L2 contract via the bridge +#[derive(Clone, Debug)] +pub struct L1Call { + pub message_from_l2: Message, + pub signal_slot_proof: Bytes, +} + +// Data required to build the L2 call transaction initiated by an L1 contract via the bridge +#[derive(Clone, Debug)] +pub struct L2Call { + pub message_from_l1: Message, + pub signal_slot_on_l2: FixedBytes<32>, +} + +#[derive(Clone)] +struct BridgeRpcContext { + tx: mpsc::Sender, + status_store: UserOpStatusStore, + next_id: Arc, +} + +pub struct BridgeHandler { + ethereum_l1: Arc>, + taiko: Arc, + rx: Receiver, + l1_call_proof_signer: alloy::signers::local::PrivateKeySigner, + status_store: UserOpStatusStore, +} + +impl BridgeHandler { + pub async fn new( + addr: SocketAddr, + ethereum_l1: Arc>, + taiko: Arc, + cancellation_token: CancellationToken, + ) -> Result { + let (tx, rx) = mpsc::channel::(1024); + let status_store = UserOpStatusStore::open("data/user_op_status")?; + + let rpc_context = BridgeRpcContext { + tx, + status_store: status_store.clone(), + next_id: Arc::new(AtomicU64::new(1)), + }; + + let server = ServerBuilder::default() + .build(addr) + .await + .map_err(|e| anyhow::anyhow!("Failed to build RPC server: {}", e))?; + + let mut module = RpcModule::new(rpc_context); + + module.register_async_method("surge_sendUserOp", |params, ctx, _| async move { + let mut user_op: UserOp = params.parse()?; + let id = ctx.next_id.fetch_add(1, Ordering::Relaxed); + user_op.id = id; + + info!( + "Received UserOp: id={}, submitter={:?}, calldata_len={}", + id, + user_op.submitter, + user_op.calldata.len() + ); + + ctx.status_store.set(id, &UserOpStatus::Pending); + + ctx.tx.send(user_op).await.map_err(|e| { + error!("Failed to send UserOp to queue: {}", e); + ctx.status_store.remove(id); + jsonrpsee::types::ErrorObjectOwned::owned( + -32000, + "Failed to queue user operation", + Some(format!("{}", e)), + ) + })?; + + Ok::(id) + })?; + + module.register_async_method("surge_userOpStatus", |params, ctx, _| async move { + let id: u64 = params.one()?; + + match ctx.status_store.get(id) { + Some(status) => Ok::( + serde_json::to_value(status).map_err(|e| { + jsonrpsee::types::ErrorObjectOwned::owned( + -32603, + "Serialization error", + Some(format!("{}", e)), + ) + })?, + ), + None => Err(jsonrpsee::types::ErrorObjectOwned::owned( + -32001, + "UserOp not found", + Some(format!("No user operation with id {}", id)), + )), + } + })?; + + info!("Bridge handler RPC server starting on {}", addr); + let handle = server.start(module); + + tokio::spawn(async move { + cancellation_token.cancelled().await; + info!("Cancellation token triggered, stopping bridge handler RPC server"); + handle.stop().ok(); + }); + + Ok(Self { + ethereum_l1, + taiko, + rx, + // Surge: Hard coding the private key for the POC + l1_call_proof_signer: alloy::signers::local::PrivateKeySigner::from_bytes( + &"0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + .parse::>()?, + )?, + status_store, + }) + } + + pub fn status_store(&self) -> UserOpStatusStore { + self.status_store.clone() + } + + pub async fn next_user_op_and_l2_call( + &mut self, + ) -> Result, anyhow::Error> { + if let Ok(user_op) = self.rx.try_recv() { + if let Some((message_from_l1, signal_slot_on_l2)) = self + .ethereum_l1 + .execution_layer + .find_message_and_signal_slot(user_op.clone()) + .await? + { + return Ok(Some(( + user_op, + L2Call { + message_from_l1, + signal_slot_on_l2, + }, + ))); + } + + warn!( + "UserOp id={} rejected: no L2 call found in user op", + user_op.id + ); + self.status_store.set( + user_op.id, + &UserOpStatus::Rejected { + reason: "No L2 call found in user op".to_string(), + }, + ); + } + + Ok(None) + } + + pub async fn find_l1_call(&mut self, block_id: u64) -> Result, anyhow::Error> { + if let Some((message_from_l2, signal_slot)) = self + .taiko + .l2_execution_layer() + .find_message_and_signal_slot(block_id) + .await? + { + let signature = self.l1_call_proof_signer.sign_hash(&signal_slot).await?; + + let mut signal_slot_proof = [0_u8; 65]; + signal_slot_proof[..32].copy_from_slice(signature.r().to_be_bytes::<32>().as_slice()); + signal_slot_proof[32..64].copy_from_slice(signature.s().to_be_bytes::<32>().as_slice()); + signal_slot_proof[64] = (signature.v() as u8) + 27; + + return Ok(Some(L1Call { + message_from_l2, + signal_slot_proof: Bytes::from(signal_slot_proof), + })); + } + + Ok(None) + } + + pub fn has_pending_user_ops(&self) -> bool { + !self.rx.is_empty() + } +} diff --git a/realtime/src/node/proposal_manager/l2_block_payload.rs b/realtime/src/node/proposal_manager/l2_block_payload.rs new file mode 100644 index 00000000..6cb8e07a --- /dev/null +++ b/realtime/src/node/proposal_manager/l2_block_payload.rs @@ -0,0 +1,12 @@ +use alloy::primitives::B256; +use alloy::rpc::types::Transaction; + +pub struct L2BlockV2Payload { + pub coinbase: alloy::primitives::Address, + pub tx_list: Vec, + pub timestamp_sec: u64, + pub gas_limit_without_anchor: u64, + pub anchor_block_id: u64, + pub anchor_block_hash: B256, + pub anchor_state_root: B256, +} diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs new file mode 100644 index 00000000..dc24628b --- /dev/null +++ b/realtime/src/node/proposal_manager/mod.rs @@ -0,0 +1,421 @@ +mod async_submitter; +mod batch_builder; +pub mod bridge_handler; +pub mod l2_block_payload; +pub mod proposal; + +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::l2::execution_layer::L2BridgeHandlerOps; +use crate::node::proposal_manager::bridge_handler::UserOp; +use crate::raiko::RaikoClient; +use crate::{ + l1::execution_layer::ExecutionLayer, + l2::taiko::Taiko, +}; +use alloy::primitives::{B256, FixedBytes}; +use alloy::primitives::aliases::U48; +use anyhow::Error; +use async_submitter::AsyncSubmitter; +use batch_builder::BatchBuilder; +use bridge_handler::BridgeHandler; +use common::metrics::Metrics; +use common::{batch_builder::BatchBuilderConfig, shared::l2_slot_info_v2::L2SlotContext}; +use common::{ + l1::{ethereum_l1::EthereumL1, traits::ELTrait}, + l2::taiko_driver::{OperationType, models::BuildPreconfBlockResponse}, + shared::{ + anchor_block_info::AnchorBlockInfo, + l2_block_v2::L2BlockV2Draft, + l2_tx_lists::PreBuiltTxList, + }, + utils::cancellation_token::CancellationToken, +}; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::Mutex; +use tracing::{debug, error, info, warn}; + +use crate::node::L2SlotInfoV2; + +const MIN_ANCHOR_OFFSET: u64 = 2; + +pub struct BatchManager { + batch_builder: BatchBuilder, + async_submitter: AsyncSubmitter, + bridge_handler: Arc>, + ethereum_l1: Arc>, + pub taiko: Arc, + l1_height_lag: u64, + metrics: Arc, + cancel_token: CancellationToken, + parent_proposal_hash: B256, +} + +impl BatchManager { + #[allow(clippy::too_many_arguments)] + pub async fn new( + l1_height_lag: u64, + config: BatchBuilderConfig, + ethereum_l1: Arc>, + taiko: Arc, + metrics: Arc, + cancel_token: CancellationToken, + parent_proposal_hash: B256, + raiko_client: RaikoClient, + basefee_sharing_pctg: u8, + ) -> Result { + info!( + "Batch builder config:\n\ + max_bytes_size_of_batch: {}\n\ + max_blocks_per_batch: {}\n\ + l1_slot_duration_sec: {}\n\ + max_time_shift_between_blocks_sec: {}\n\ + max_anchor_height_offset: {}", + config.max_bytes_size_of_batch, + config.max_blocks_per_batch, + config.l1_slot_duration_sec, + config.max_time_shift_between_blocks_sec, + config.max_anchor_height_offset, + ); + + let bridge_addr: SocketAddr = "127.0.0.1:4545".parse()?; + let bridge_handler = Arc::new(Mutex::new( + BridgeHandler::new( + bridge_addr, + ethereum_l1.clone(), + taiko.clone(), + cancel_token.clone(), + ) + .await?, + )); + + let async_submitter = AsyncSubmitter::new( + raiko_client, + basefee_sharing_pctg, + ethereum_l1.clone(), + ); + + Ok(Self { + batch_builder: BatchBuilder::new( + config, + ethereum_l1.slot_clock.clone(), + metrics.clone(), + ), + async_submitter, + bridge_handler, + ethereum_l1, + taiko, + l1_height_lag, + metrics, + cancel_token, + parent_proposal_hash, + }) + } + + /// Non-blocking poll: check if the in-flight submission has completed. + /// On success, updates `parent_proposal_hash`. Returns None if idle or still in progress. + pub fn poll_submission_result(&mut self) -> Option> { + match self.async_submitter.try_recv_result() { + Some(Ok(result)) => { + info!( + "Submission completed. New parent proposal hash: {}", + result.new_parent_proposal_hash + ); + self.parent_proposal_hash = result.new_parent_proposal_hash; + Some(Ok(())) + } + Some(Err(e)) => Some(Err(e)), + None => None, + } + } + + /// Kick off an async submission if there's a finalized batch ready and the submitter is idle. + pub async fn try_start_submission( + &mut self, + submit_only_full_batches: bool, + ) -> Result<(), Error> { + if self.async_submitter.is_busy() { + return Ok(()); + } + + self.batch_builder.finalize_if_needed(submit_only_full_batches); + + let Some(batch) = self.batch_builder.pop_oldest_batch(self.parent_proposal_hash) else { + return Ok(()); + }; + + // Check no L1 tx already in progress + if self + .ethereum_l1 + .execution_layer + .is_transaction_in_progress() + .await? + { + debug!("Cannot submit batch, L1 transaction already in progress. Re-queuing."); + self.batch_builder.push_front_batch(batch); + return Ok(()); + } + + let status_store = self.bridge_handler.lock().await.status_store(); + + info!( + "Starting async submission: {} blocks, parent_hash: {}", + batch.l2_blocks.len(), + batch.parent_proposal_hash, + ); + + self.async_submitter.submit(batch, Some(status_store)); + Ok(()) + } + + pub fn is_submission_in_progress(&self) -> bool { + self.async_submitter.is_busy() + } + + pub fn should_new_block_be_created( + &self, + pending_tx_list: &Option, + l2_slot_context: &L2SlotContext, + ) -> bool { + self.batch_builder.should_new_block_be_created( + pending_tx_list, + l2_slot_context.info.slot_timestamp(), + l2_slot_context.end_of_sequencing, + ) + } + + pub async fn preconfirm_block( + &mut self, + pending_tx_list: Option, + l2_slot_context: &L2SlotContext, + ) -> Result { + let result = self + .add_new_l2_block( + pending_tx_list.unwrap_or_else(PreBuiltTxList::empty), + l2_slot_context, + OperationType::Preconfirm, + ) + .await?; + if self + .batch_builder + .is_greater_than_max_anchor_height_offset()? + { + info!("Maximum allowed anchor height offset exceeded, finalizing current batch."); + self.batch_builder.finalize_current_batch(); + } + + Ok(result) + } + + async fn add_new_l2_block( + &mut self, + prebuilt_tx_list: PreBuiltTxList, + l2_slot_context: &L2SlotContext, + operation_type: OperationType, + ) -> Result { + let timestamp = l2_slot_context.info.slot_timestamp(); + if let Some(last_block_timestamp) = self + .batch_builder + .get_current_proposal_last_block_timestamp() + && timestamp == last_block_timestamp + { + return Err(anyhow::anyhow!( + "Cannot add another block with the same timestamp as the last block, timestamp: {timestamp}, last block timestamp: {last_block_timestamp}" + )); + } + + info!( + "Adding new L2 block id: {}, timestamp: {}", + l2_slot_context.info.parent_id() + 1, + timestamp, + ); + + let l2_draft_block = L2BlockV2Draft { + prebuilt_tx_list: prebuilt_tx_list.clone(), + timestamp_sec: timestamp, + gas_limit_without_anchor: l2_slot_context.info.parent_gas_limit_without_anchor(), + }; + + if !self.batch_builder.can_consume_l2_block(&l2_draft_block) { + let _ = self.create_new_batch().await?; + } + + let preconfed_block = self + .add_draft_block_to_proposal(l2_draft_block, l2_slot_context, operation_type) + .await?; + + Ok(preconfed_block) + } + + pub async fn has_pending_user_ops(&self) -> bool { + self.bridge_handler.lock().await.has_pending_user_ops() + } + + async fn add_pending_l2_call_to_draft_block( + &mut self, + l2_draft_block: &mut L2BlockV2Draft, + ) -> Result)>, anyhow::Error> { + if let Some((user_op_data, l2_call)) = self + .bridge_handler + .lock() + .await + .next_user_op_and_l2_call() + .await? + { + info!("Processing pending L2 call: {:?}", l2_call); + + let l2_call_bridge_tx = self + .taiko + .l2_execution_layer() + .construct_l2_call_tx(l2_call.message_from_l1) + .await?; + + info!( + "Inserting L2 call bridge transaction into tx list: {:?}", + l2_call_bridge_tx + ); + + l2_draft_block + .prebuilt_tx_list + .tx_list + .push(l2_call_bridge_tx); + + return Ok(Some((user_op_data, l2_call.signal_slot_on_l2))); + } + + Ok(None) + } + + async fn add_draft_block_to_proposal( + &mut self, + mut l2_draft_block: L2BlockV2Draft, + l2_slot_context: &L2SlotContext, + operation_type: OperationType, + ) -> Result { + let mut anchor_signal_slots: Vec> = vec![]; + + debug!("Checking for pending L2 calls"); + if let Some((user_op_data, signal_slot)) = self + .add_pending_l2_call_to_draft_block(&mut l2_draft_block) + .await? + { + self.batch_builder.add_user_op(user_op_data)?; + self.batch_builder.add_signal_slot(signal_slot)?; + anchor_signal_slots.push(signal_slot); + } else { + debug!("No pending L2 calls"); + } + let payload = self.batch_builder.add_l2_draft_block(l2_draft_block)?; + + match self + .taiko + .advance_head_to_new_l2_block( + payload, + l2_slot_context, + anchor_signal_slots, + operation_type, + ) + .await + { + Ok(preconfed_block) => { + self.batch_builder.set_proposal_checkpoint(Checkpoint { + blockNumber: U48::from(preconfed_block.number), + stateRoot: preconfed_block.state_root, + blockHash: preconfed_block.hash, + })?; + + debug!("Checking for initiated L1 calls"); + if let Some(l1_call) = self + .bridge_handler + .lock() + .await + .find_l1_call(preconfed_block.number) + .await? + { + self.batch_builder.add_l1_call(l1_call)?; + } else { + debug!("No L1 calls initiated"); + } + + Ok(preconfed_block) + } + Err(err) => { + error!("Failed to advance head to new L2 block: {}", err); + self.remove_last_l2_block(); + Err(anyhow::anyhow!( + "Failed to advance head to new L2 block: {}", + err + )) + } + } + } + + async fn create_new_batch(&mut self) -> Result { + let last_anchor_id = self + .taiko + .l2_execution_layer() + .get_last_synced_anchor_block_id_from_geth() + .await + .unwrap_or_else(|e| { + warn!("Failed to get last synced anchor block ID from Taiko Geth: {e}"); + 0 + }); + let anchor_block_info = AnchorBlockInfo::from_chain_state( + self.ethereum_l1.execution_layer.common(), + self.l1_height_lag, + last_anchor_id, + MIN_ANCHOR_OFFSET, + ) + .await?; + + let anchor_block_id = anchor_block_info.id(); + // Use B256::ZERO as placeholder -- real parent hash is stamped at submission time + self.batch_builder + .create_new_batch(anchor_block_info, B256::ZERO); + + Ok(anchor_block_id) + } + + fn remove_last_l2_block(&mut self) { + self.batch_builder.remove_last_l2_block(); + } + + pub async fn reset_builder(&mut self) -> Result<(), Error> { + warn!("Resetting batch builder"); + + self.async_submitter.abort(); + + self.batch_builder = batch_builder::BatchBuilder::new( + self.batch_builder.get_config().clone(), + self.ethereum_l1.slot_clock.clone(), + self.metrics.clone(), + ); + + Ok(()) + } + + pub fn has_batches(&self) -> bool { + !self.batch_builder.is_empty() + } + + pub fn get_number_of_batches(&self) -> u64 { + self.batch_builder.get_number_of_batches() + } + + pub async fn reanchor_block( + &mut self, + pending_tx_list: PreBuiltTxList, + l2_slot_info: L2SlotInfoV2, + ) -> Result { + let l2_slot_context = L2SlotContext { + info: l2_slot_info, + end_of_sequencing: false, + allow_forced_inclusion: false, + }; + + let block = self + .add_new_l2_block(pending_tx_list, &l2_slot_context, OperationType::Reanchor) + .await?; + + Ok(block) + } +} diff --git a/realtime/src/node/proposal_manager/proposal.rs b/realtime/src/node/proposal_manager/proposal.rs new file mode 100644 index 00000000..b659d1da --- /dev/null +++ b/realtime/src/node/proposal_manager/proposal.rs @@ -0,0 +1,109 @@ +use crate::l1::bindings::ICheckpointStore::Checkpoint; +use crate::node::proposal_manager::{ + bridge_handler::{L1Call, UserOp}, + l2_block_payload::L2BlockV2Payload, +}; +use alloy::primitives::{Address, B256, FixedBytes}; +use common::shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}; +use std::collections::VecDeque; +use std::time::Instant; +use taiko_protocol::shasta::manifest::{BlockManifest, DerivationSourceManifest}; +use tracing::{debug, warn}; + +pub type Proposals = VecDeque; + +#[derive(Default, Clone)] +pub struct Proposal { + pub l2_blocks: Vec, + pub total_bytes: u64, + pub coinbase: Address, + + // RealTime: maxAnchor instead of anchor + pub max_anchor_block_number: u64, + pub max_anchor_block_hash: B256, + + // Proof fields + pub checkpoint: Checkpoint, + pub parent_proposal_hash: B256, + + // Surge POC fields (carried over) + pub user_ops: Vec, + pub signal_slots: Vec>, + pub l1_calls: Vec, + + // ZK proof (populated after Raiko call) + pub zk_proof: Option>, +} + +impl Proposal { + pub fn compress(&mut self) { + let start = Instant::now(); + + let mut block_manifests = >::with_capacity(self.l2_blocks.len()); + for l2_block in &self.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + + let manifest_data = match manifest.encode_and_compress() { + Ok(data) => data, + Err(err) => { + warn!("Failed to compress proposal manifest: {err}"); + return; + } + }; + + debug!( + "Proposal compression completed in {} ms. Total bytes before: {}. Total bytes after: {}.", + start.elapsed().as_millis(), + self.total_bytes, + manifest_data.len() + ); + + self.total_bytes = manifest_data.len() as u64; + } + + fn create_block_from_draft(&mut self, l2_draft_block: L2BlockV2Draft) -> L2BlockV2 { + L2BlockV2::new_from( + l2_draft_block.prebuilt_tx_list, + l2_draft_block.timestamp_sec, + self.coinbase, + self.max_anchor_block_number, + l2_draft_block.gas_limit_without_anchor, + ) + } + + pub fn add_l2_block(&mut self, l2_block: L2BlockV2) -> L2BlockV2Payload { + let l2_payload = L2BlockV2Payload { + coinbase: self.coinbase, + tx_list: l2_block.prebuilt_tx_list.tx_list.clone(), + timestamp_sec: l2_block.timestamp_sec, + gas_limit_without_anchor: l2_block.gas_limit_without_anchor, + anchor_block_id: self.max_anchor_block_number, + anchor_block_hash: self.max_anchor_block_hash, + anchor_state_root: B256::ZERO, // Not used in RealTime anchor + }; + self.total_bytes += l2_block.prebuilt_tx_list.bytes_length; + self.l2_blocks.push(l2_block); + l2_payload + } + + pub fn add_l2_draft_block(&mut self, l2_draft_block: L2BlockV2Draft) -> L2BlockV2Payload { + let l2_block = self.create_block_from_draft(l2_draft_block); + self.add_l2_block(l2_block) + } +} diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs new file mode 100644 index 00000000..3d36d8a5 --- /dev/null +++ b/realtime/src/raiko/mod.rs @@ -0,0 +1,130 @@ +use crate::utils::config::RealtimeConfig; +use anyhow::Error; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use tracing::{debug, info, warn}; + +#[derive(Clone)] +pub struct RaikoClient { + client: Client, + base_url: String, + api_key: Option, + pub proof_type: String, + l2_network: String, + l1_network: String, + poll_interval: Duration, + max_retries: u32, +} + +#[derive(Serialize)] +pub struct RaikoProofRequest { + pub l2_block_numbers: Vec, + pub proof_type: String, + pub max_anchor_block_number: u64, + pub parent_proposal_hash: String, + pub basefee_sharing_pctg: u8, + #[serde(skip_serializing_if = "Option::is_none")] + pub network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_network: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub prover: Option, + pub signal_slots: Vec, + pub sources: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub checkpoint: Option, + pub blob_proof_type: String, +} + +#[derive(Serialize, Deserialize)] +pub struct RaikoCheckpoint { + pub block_number: u64, + pub block_hash: String, + pub state_root: String, +} + +#[derive(Deserialize)] +pub struct RaikoResponse { + pub status: String, + #[serde(default)] + pub data: Option, + #[serde(default)] + pub error: Option, + #[serde(default)] + pub message: Option, +} + +#[derive(Deserialize)] +#[serde(untagged)] +pub enum RaikoData { + Proof { proof: String }, + Status { status: String }, +} + +impl RaikoClient { + pub fn new(config: &RealtimeConfig) -> Self { + Self { + client: Client::new(), + base_url: config.raiko_url.clone(), + api_key: config.raiko_api_key.clone(), + proof_type: config.proof_type.clone(), + l2_network: config.raiko_network.clone(), + l1_network: config.raiko_l1_network.clone(), + poll_interval: Duration::from_secs(10), + max_retries: 60, + } + } + + /// Request a proof and poll until ready. + /// Returns the raw proof bytes. + pub async fn get_proof(&self, request: &RaikoProofRequest) -> Result, Error> { + let url = format!("{}/v3/proof/batch/realtime", self.base_url); + + for attempt in 0..self.max_retries { + let mut req = self.client.post(&url).json(request); + + if let Some(ref key) = self.api_key { + req = req.header("X-API-KEY", key); + } + + let resp = req.send().await?; + let body: RaikoResponse = resp.json().await?; + + if body.status == "error" { + return Err(anyhow::anyhow!( + "Raiko proof failed: {}", + body.message.unwrap_or_default() + )); + } + + match body.data { + Some(RaikoData::Proof { proof }) => { + info!("ZK proof received (attempt {})", attempt + 1); + let proof_bytes = hex::decode(proof.trim_start_matches("0x"))?; + return Ok(proof_bytes); + } + Some(RaikoData::Status { ref status }) if status == "ZKAnyNotDrawn" => { + warn!("Raiko: ZK prover not drawn for this request"); + return Err(anyhow::anyhow!("ZK prover not drawn")); + } + Some(RaikoData::Status { ref status }) => { + debug!( + "Raiko status: {}, polling... (attempt {})", + status, + attempt + 1 + ); + tokio::time::sleep(self.poll_interval).await; + } + None => { + return Err(anyhow::anyhow!("Raiko: unexpected empty response")); + } + } + } + + Err(anyhow::anyhow!( + "Raiko: proof not ready after {} attempts", + self.max_retries + )) + } +} diff --git a/realtime/src/shared_abi/Bridge.json b/realtime/src/shared_abi/Bridge.json new file mode 100644 index 00000000..8f768573 --- /dev/null +++ b/realtime/src/shared_abi/Bridge.json @@ -0,0 +1,738 @@ +{ + "abi": [ + { + "type": "function", + "name": "context", + "inputs": [], + "outputs": [ + { + "name": "ctx_", + "type": "tuple", + "internalType": "struct IBridge.Context", + "components": [ + { + "name": "msgHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "failMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "hashMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "isMessageSent", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "nextMessageId", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "processMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "", + "type": "uint8", + "internalType": "enum IBridge.Status" + }, + { + "name": "", + "type": "uint8", + "internalType": "enum IBridge.StatusReason" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "recallMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_proof", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "retryMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + }, + { + "name": "_isLastAttempt", + "type": "bool", + "internalType": "bool" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "sendMessage", + "inputs": [ + { + "name": "_message", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "outputs": [ + { + "name": "msgHash_", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "message_", + "type": "tuple", + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "stateMutability": "payable" + }, + { + "type": "event", + "name": "MessageSent", + "inputs": [ + { + "name": "msgHash", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "message", + "type": "tuple", + "indexed": false, + "internalType": "struct IBridge.Message", + "components": [ + { + "name": "id", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "fee", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasLimit", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "srcChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "srcOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "destChainId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "destOwner", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "value", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ] + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "MessageStatusChanged", + "inputs": [ + { + "name": "msgHash", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "status", + "type": "uint8", + "indexed": false, + "internalType": "enum IBridge.Status" + } + ], + "anonymous": false + } + ] +} \ No newline at end of file diff --git a/realtime/src/shared_abi/bindings.rs b/realtime/src/shared_abi/bindings.rs new file mode 100644 index 00000000..2c6a471e --- /dev/null +++ b/realtime/src/shared_abi/bindings.rs @@ -0,0 +1,17 @@ +#![allow(clippy::too_many_arguments)] + +use alloy::sol; + +sol!( + #[allow(missing_docs)] + #[sol(rpc)] + #[derive(Debug)] + Bridge, + "src/shared_abi/Bridge.json" +); + +// SignalSent event emitted by the SignalService contract +sol! { + #[allow(missing_docs)] + event SignalSent(address app, bytes32 signal, bytes32 slot, bytes32 value); +} diff --git a/realtime/src/shared_abi/mod.rs b/realtime/src/shared_abi/mod.rs new file mode 100644 index 00000000..90c70dcc --- /dev/null +++ b/realtime/src/shared_abi/mod.rs @@ -0,0 +1 @@ +pub mod bindings; diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs new file mode 100644 index 00000000..49c1e6db --- /dev/null +++ b/realtime/src/utils/config.rs @@ -0,0 +1,63 @@ +use alloy::primitives::Address; +use anyhow::Error; +use common::config::{ConfigTrait, address_parse_error}; +use std::str::FromStr; + +#[derive(Debug, Clone)] +pub struct RealtimeConfig { + pub realtime_inbox: Address, + pub proposer_multicall: Address, + pub bridge: Address, + pub raiko_url: String, + pub raiko_api_key: Option, + pub proof_type: String, + pub raiko_network: String, + pub raiko_l1_network: String, +} + +impl ConfigTrait for RealtimeConfig { + fn read_env_variables() -> Result { + let read_contract_address = |env_var: &str| -> Result { + let address_str = std::env::var(env_var) + .map_err(|e| anyhow::anyhow!("Failed to read {}: {}", env_var, e))?; + Address::from_str(&address_str) + .map_err(|e| address_parse_error(env_var, e, &address_str)) + }; + + let realtime_inbox = read_contract_address("REALTIME_INBOX_ADDRESS")?; + let proposer_multicall = read_contract_address("PROPOSER_MULTICALL_ADDRESS")?; + let bridge = read_contract_address("L1_BRIDGE_ADDRESS")?; + + let raiko_url = std::env::var("RAIKO_URL") + .unwrap_or_else(|_| "http://localhost:8080".to_string()); + let raiko_api_key = std::env::var("RAIKO_API_KEY").ok(); + let proof_type = std::env::var("RAIKO_PROOF_TYPE") + .unwrap_or_else(|_| "sgx".to_string()); + let raiko_network = std::env::var("RAIKO_L2_NETWORK") + .unwrap_or_else(|_| "taiko_mainnet".to_string()); + let raiko_l1_network = std::env::var("RAIKO_L1_NETWORK") + .unwrap_or_else(|_| "ethereum".to_string()); + + Ok(RealtimeConfig { + realtime_inbox, + proposer_multicall, + bridge, + raiko_url, + raiko_api_key, + proof_type, + raiko_network, + raiko_l1_network, + }) + } +} + +use std::fmt; +impl fmt::Display for RealtimeConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "RealTime inbox: {:#?}", self.realtime_inbox)?; + writeln!(f, "Proposer multicall: {:#?}", self.proposer_multicall)?; + writeln!(f, "Raiko URL: {}", self.raiko_url)?; + writeln!(f, "Proof type: {}", self.proof_type)?; + Ok(()) + } +} diff --git a/realtime/src/utils/mod.rs b/realtime/src/utils/mod.rs new file mode 100644 index 00000000..ef68c369 --- /dev/null +++ b/realtime/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod config; From ea607a64a2c86f3c0caf300fd72591f0a18f0f6d Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Fri, 6 Mar 2026 13:42:08 +0530 Subject: [PATCH 02/14] feat: nits --- realtime/src/l1/execution_layer.rs | 46 +++++++------- realtime/src/l1/proposal_tx_builder.rs | 30 ++++----- realtime/src/node/mod.rs | 61 +++++++++---------- .../node/proposal_manager/async_submitter.rs | 2 +- .../node/proposal_manager/batch_builder.rs | 20 +++--- .../node/proposal_manager/bridge_handler.rs | 10 +-- 6 files changed, 83 insertions(+), 86 deletions(-) diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 514ff289..bbb6079d 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -311,29 +311,29 @@ impl L1BridgeHandlerOps for ExecutionLayer { tracing::debug!("Collected {} logs from call trace", all_logs.len()); for log in all_logs { - if let Some(topics) = &log.topics { - if !topics.is_empty() { - if topics[0] == MessageSent::SIGNATURE_HASH { - let log_data = alloy::primitives::LogData::new_unchecked( - topics.clone(), - log.data.clone().unwrap_or_default(), - ); - let decoded = MessageSent::decode_log_data(&log_data).map_err(|e| { - anyhow!("Failed to decode MessageSent event L1: {e}") - })?; - - message = Some(decoded.message); - } else if topics[0] == SignalSent::SIGNATURE_HASH { - let log_data = alloy::primitives::LogData::new_unchecked( - topics.clone(), - log.data.clone().unwrap_or_default(), - ); - let decoded = SignalSent::decode_log_data(&log_data).map_err(|e| { - anyhow!("Failed to decode SignalSent event L1: {e}") - })?; - - slot = Some(decoded.slot); - } + if let Some(topics) = &log.topics + && !topics.is_empty() + { + if topics[0] == MessageSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = MessageSent::decode_log_data(&log_data).map_err(|e| { + anyhow!("Failed to decode MessageSent event L1: {e}") + })?; + + message = Some(decoded.message); + } else if topics[0] == SignalSent::SIGNATURE_HASH { + let log_data = alloy::primitives::LogData::new_unchecked( + topics.clone(), + log.data.clone().unwrap_or_default(), + ); + let decoded = SignalSent::decode_log_data(&log_data).map_err(|e| { + anyhow!("Failed to decode SignalSent event L1: {e}") + })?; + + slot = Some(decoded.slot); } } } diff --git a/realtime/src/l1/proposal_tx_builder.rs b/realtime/src/l1/proposal_tx_builder.rs index 740fb708..991f9e27 100644 --- a/realtime/src/l1/proposal_tx_builder.rs +++ b/realtime/src/l1/proposal_tx_builder.rs @@ -94,12 +94,12 @@ impl ProposalTxBuilder { let mut multicalls: Vec = vec![]; // Add user op to multicall - if !batch.user_ops.is_empty() { - if let Some(user_op) = batch.user_ops.first() { - let user_op_call = self.build_user_op_call(user_op.clone()); - info!("Added user op to Multicall: {:?}", &user_op_call); - multicalls.push(user_op_call); - } + if !batch.user_ops.is_empty() + && let Some(user_op) = batch.user_ops.first() + { + let user_op_call = self.build_user_op_call(user_op.clone()); + info!("Added user op to Multicall: {:?}", &user_op_call); + multicalls.push(user_op_call); } // Add the proposal to the multicall @@ -110,15 +110,15 @@ impl ProposalTxBuilder { multicalls.push(propose_call.clone()); // Add L1 calls - if !batch.l1_calls.is_empty() { - if let Some(l1_call) = batch.l1_calls.first() { - let l1_call_call = self.build_l1_call_call( - l1_call.clone(), - contract_addresses.bridge, - ); - info!("Added L1 call to Multicall: {:?}", &l1_call_call); - multicalls.push(l1_call_call.clone()); - } + if !batch.l1_calls.is_empty() + && let Some(l1_call) = batch.l1_calls.first() + { + let l1_call_call = self.build_l1_call_call( + l1_call.clone(), + contract_addresses.bridge, + ); + info!("Added L1 call to Multicall: {:?}", &l1_call_call); + multicalls.push(l1_call_call.clone()); } let multicall = Multicall::new(contract_addresses.proposer_multicall, &self.provider); diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index e8ad59ea..339c42ec 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -203,53 +203,50 @@ impl Node { if self .proposal_manager .should_new_block_be_created(&pending_tx_list, &l2_slot_context) - { - if pending_tx_list + && (pending_tx_list .as_ref() - .is_some_and(|pre_built_list| pre_built_list.tx_list.len() != 0) - || self.proposal_manager.has_pending_user_ops().await - { - let preconfed_block = self - .proposal_manager - .preconfirm_block(pending_tx_list, &l2_slot_context) - .await?; + .is_some_and(|pre_built_list| !pre_built_list.tx_list.is_empty()) + || self.proposal_manager.has_pending_user_ops().await) + { + let preconfed_block = self + .proposal_manager + .preconfirm_block(pending_tx_list, &l2_slot_context) + .await?; - self.verify_preconfed_block(preconfed_block).await?; - } + self.verify_preconfed_block(preconfed_block).await?; } } // Submission phase — non-blocking: starts async proof fetch + L1 tx if current_status.is_submitter() && !self.proposal_manager.is_submission_in_progress() - { - if let Err(err) = self + && let Err(err) = self .proposal_manager .try_start_submission(current_status.is_preconfer()) .await - { - if let Some(transaction_error) = err.downcast_ref::() { - self.handle_transaction_error( - transaction_error, - ¤t_status, - &l2_slot_info, - ) - .await?; - } else { - return Err(err); - } + { + if let Some(transaction_error) = err.downcast_ref::() { + self.handle_transaction_error( + transaction_error, + ¤t_status, + &l2_slot_info, + ) + .await?; + } else { + return Err(err); } } // Cleanup - if !current_status.is_submitter() && !current_status.is_preconfer() { - if self.proposal_manager.has_batches() { - error!( - "Resetting batch builder. has batches: {}", - self.proposal_manager.has_batches(), - ); - self.proposal_manager.reset_builder().await?; - } + if !current_status.is_submitter() + && !current_status.is_preconfer() + && self.proposal_manager.has_batches() + { + error!( + "Resetting batch builder. has batches: {}", + self.proposal_manager.has_batches(), + ); + self.proposal_manager.reset_builder().await?; } Ok(()) diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 70b0c65d..73808a08 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -182,7 +182,7 @@ async fn submission_task( // Step 3: Compute new parent proposal hash let new_parent_proposal_hash = alloy::primitives::keccak256( - &alloy::sol_types::SolValue::abi_encode(&( + alloy::sol_types::SolValue::abi_encode(&( proposal.parent_proposal_hash, proposal.max_anchor_block_number, proposal.max_anchor_block_hash, diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index 8e1aee6b..2d3a4001 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -236,17 +236,17 @@ impl BatchBuilder { } pub fn is_time_shift_between_blocks_expiring(&self, current_l2_slot_timestamp: u64) -> bool { - if let Some(current_proposal) = self.current_proposal.as_ref() { - if let Some(last_block) = current_proposal.l2_blocks.last() { - if current_l2_slot_timestamp < last_block.timestamp_sec { - warn!("Preconfirmation timestamp is before the last block timestamp"); - return false; - } - return self.is_the_last_l1_slot_to_add_an_empty_l2_block( - current_l2_slot_timestamp, - last_block.timestamp_sec, - ); + if let Some(current_proposal) = self.current_proposal.as_ref() + && let Some(last_block) = current_proposal.l2_blocks.last() + { + if current_l2_slot_timestamp < last_block.timestamp_sec { + warn!("Preconfirmation timestamp is before the last block timestamp"); + return false; } + return self.is_the_last_l1_slot_to_add_an_empty_l2_block( + current_l2_slot_timestamp, + last_block.timestamp_sec, + ); } false } diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index e544cbfc..016e3cb7 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -38,10 +38,10 @@ impl UserOpStatusStore { } pub fn set(&self, id: u64, status: &UserOpStatus) { - if let Ok(value) = serde_json::to_vec(status) { - if let Err(e) = self.db.insert(id.to_be_bytes(), value) { - error!("Failed to write user op status: {}", e); - } + if let Ok(value) = serde_json::to_vec(status) + && let Err(e) = self.db.insert(id.to_be_bytes(), value) + { + error!("Failed to write user op status: {}", e); } } @@ -238,7 +238,7 @@ impl BridgeHandler { let mut signal_slot_proof = [0_u8; 65]; signal_slot_proof[..32].copy_from_slice(signature.r().to_be_bytes::<32>().as_slice()); signal_slot_proof[32..64].copy_from_slice(signature.s().to_be_bytes::<32>().as_slice()); - signal_slot_proof[64] = (signature.v() as u8) + 27; + signal_slot_proof[64] = u8::from(signature.v()) + 27; return Ok(Some(L1Call { message_from_l2, From 68738c958b0e34fa39c5ff272922ed82b7743d7d Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Mon, 16 Mar 2026 11:32:48 +0530 Subject: [PATCH 03/14] fix: preconfing --- Cargo.lock | 1 + PROTOCOL_MIGRATION_REAL_TIME_FORK.md | 25 +++++---- node/Cargo.toml | 1 + node/src/main.rs | 39 ++++--------- realtime/src/chain_monitor/mod.rs | 4 +- realtime/src/l1/abi/RealTimeInbox.json | 2 +- realtime/src/l1/execution_layer.rs | 6 +- realtime/src/l2/taiko.rs | 4 +- realtime/src/lib.rs | 12 ++-- realtime/src/node/mod.rs | 56 +++++++++++-------- .../node/proposal_manager/async_submitter.rs | 16 ++---- .../node/proposal_manager/batch_builder.rs | 10 ++-- realtime/src/node/proposal_manager/mod.rs | 33 +++++++---- .../src/node/proposal_manager/proposal.rs | 2 +- realtime/src/raiko/mod.rs | 2 +- realtime/src/utils/config.rs | 7 +++ 16 files changed, 114 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c258e12c..2952eb38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5836,6 +5836,7 @@ dependencies = [ "clap", "common", "pacaya", + "realtime", "serde_json", "shasta", "tokio", diff --git a/PROTOCOL_MIGRATION_REAL_TIME_FORK.md b/PROTOCOL_MIGRATION_REAL_TIME_FORK.md index 18b852e4..45f8b95b 100644 --- a/PROTOCOL_MIGRATION_REAL_TIME_FORK.md +++ b/PROTOCOL_MIGRATION_REAL_TIME_FORK.md @@ -124,7 +124,6 @@ struct Proposal { ```solidity struct Proposal { - bytes32 parentProposalHash; // Hash of parent (from lastProposalHash) uint48 maxAnchorBlockNumber; // NEW — highest L1 anchor block number bytes32 maxAnchorBlockHash; // NEW — blockhash(maxAnchorBlockNumber) uint8 basefeeSharingPctg; @@ -133,6 +132,7 @@ struct Proposal { } ``` +- Standalone — no parent linkage. State continuity is enforced via `Commitment.lastBlockHash`. - No sequential `id` — proposals identified by hash only. - No `timestamp`, `proposer`, or `endOfSubmissionWindowTimestamp`. - `originBlockNumber`/`originBlockHash` replaced by `maxAnchorBlockNumber`/`maxAnchorBlockHash`. @@ -163,12 +163,14 @@ struct Commitment { ```solidity struct Commitment { bytes32 proposalHash; - ICheckpointStore.Checkpoint checkpoint; // { blockNumber, blockHash, stateRoot } + bytes32 lastFinalizedBlockHash; // Block hash of last finalized L2 block (proof starting state) + ICheckpointStore.Checkpoint checkpoint; // { blockNumber, blockHash, stateRoot } } ``` -No batch support. No `actualProver`, no `Transition[]`. The checkpoint contains the finalized L2 -state for the single proposal. +No batch support. No `actualProver`, no `Transition[]`. The `lastFinalizedBlockHash` binds the +proof to the correct starting state (must match `lastFinalizedBlockHash` on-chain). The checkpoint +contains the finalized L2 state for the single proposal. ### 2.5 Removed Types @@ -208,8 +210,8 @@ function activate(bytes32 _lastPacayaBlockHash) external onlyOwner; // Sets up CoreState, stores genesis proposal hash in ring buffer slot 0 // RealTimeInbox -function activate(bytes32 _genesisProposalHash) external onlyOwner; -// Sets lastProposalHash = _genesisProposalHash. Can only be called once. +function activate(bytes32 _genesisBlockHash) external onlyOwner; +// Sets lastFinalizedBlockHash = _genesisBlockHash. Can only be called once. ``` ### Propose @@ -248,7 +250,7 @@ function getCoreState() external view returns (CoreState memory); function getProposalHash(uint256 _proposalId) external view returns (bytes32); // RealTimeInbox — replaces both with: -function getLastProposalHash() external view returns (bytes32); +function getLastFinalizedBlockHash() external view returns (bytes32); ``` ### Encoding Helpers @@ -280,7 +282,7 @@ LibBonds.Storage _bondStorage; **RealTimeInbox**: ```solidity -bytes32 public lastProposalHash; // 1 slot — the chain head +bytes32 public lastFinalizedBlockHash; // 1 slot — block hash of last finalized L2 block ``` --- @@ -307,16 +309,17 @@ event Proved( ```solidity event ProposedAndProved( bytes32 indexed proposalHash, - bytes32 parentProposalHash, + bytes32 lastFinalizedBlockHash, uint48 maxAnchorBlockNumber, uint8 basefeeSharingPctg, IInbox.DerivationSource[] sources, - bytes32 signalSlotsHash, + bytes32[] signalSlots, ICheckpointStore.Checkpoint checkpoint ); ``` - Indexed by `proposalHash` instead of sequential `id`. +- `lastFinalizedBlockHash` replaces `parentProposalHash` — the block hash of the last finalized L2 block. - Includes the finalized `Checkpoint` directly. - No `proposer` or `actualProver` field. @@ -349,7 +352,6 @@ For off-chain reconstruction of the commitment hash: ``` proposalHash = keccak256(abi.encode( - bytes32 parentProposalHash, uint48 maxAnchorBlockNumber, // padded to 32 bytes by abi.encode bytes32 maxAnchorBlockHash, uint8 basefeeSharingPctg, // padded to 32 bytes by abi.encode @@ -359,6 +361,7 @@ proposalHash = keccak256(abi.encode( commitmentHash = keccak256(abi.encode( bytes32 proposalHash, + bytes32 lastFinalizedBlockHash, // last finalized L2 block hash uint48 checkpoint.blockNumber, // padded to 32 bytes by abi.encode bytes32 checkpoint.blockHash, bytes32 checkpoint.stateRoot diff --git a/node/Cargo.toml b/node/Cargo.toml index e1682d58..9b351256 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -22,6 +22,7 @@ chrono = { workspace = true } clap = { workspace = true } common = { workspace = true } pacaya = { workspace = true } +realtime = { workspace = true } serde_json = { workspace = true } shasta = { workspace = true } tokio = { workspace = true } diff --git a/node/src/main.rs b/node/src/main.rs index a6a460bf..6ab0b6a0 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,10 +1,10 @@ use anyhow::Error; use common::{ - fork_info::{Fork, ForkInfo}, + fork_info::ForkInfo, metrics::{self, Metrics}, utils::cancellation_token::CancellationToken, }; -use pacaya::create_pacaya_node; +use realtime::create_realtime_node; use std::sync::Arc; use tokio::signal::unix::{SignalKind, signal}; use tracing::{error, info}; @@ -72,33 +72,14 @@ async fn run_node(iteration: u64, metrics: Arc) -> Result { - // TODO pacaya::utils::config::Config - let next_fork_timestamp = fork_info.config.fork_switch_timestamps.get(1); - info!( - "Current fork: PACAYA 🌋, next fork timestamp: {:?}", - next_fork_timestamp - ); - create_pacaya_node( - config.clone(), - metrics.clone(), - cancel_token.clone(), - fork_info, - ) - .await?; - } - Fork::Shasta => { - info!("Current fork: SHASTA 🌋"); - shasta::create_shasta_node( - config.clone(), - metrics.clone(), - cancel_token.clone(), - fork_info, - ) - .await?; - } - } + info!("Current fork: REALTIME ⚡"); + create_realtime_node( + config.clone(), + metrics.clone(), + cancel_token.clone(), + fork_info, + ) + .await?; metrics::server::serve_metrics(metrics.clone(), cancel_token.clone()); diff --git a/realtime/src/chain_monitor/mod.rs b/realtime/src/chain_monitor/mod.rs index 7fa2b710..2af8d789 100644 --- a/realtime/src/chain_monitor/mod.rs +++ b/realtime/src/chain_monitor/mod.rs @@ -6,7 +6,7 @@ pub type RealtimeChainMonitor = ChainMonitor; pub fn print_proposed_and_proved_info(event: &RealTimeInbox::ProposedAndProved) { info!( - "ProposedAndProved event → proposalHash = {}, parentProposalHash = {}, maxAnchorBlockNumber = {}", - event.proposalHash, event.parentProposalHash, event.maxAnchorBlockNumber + "ProposedAndProved event → proposalHash = {}, lastFinalizedBlockHash = {}, maxAnchorBlockNumber = {}", + event.proposalHash, event.lastFinalizedBlockHash, event.maxAnchorBlockNumber ); } diff --git a/realtime/src/l1/abi/RealTimeInbox.json b/realtime/src/l1/abi/RealTimeInbox.json index 0d444719..18dfc773 100644 --- a/realtime/src/l1/abi/RealTimeInbox.json +++ b/realtime/src/l1/abi/RealTimeInbox.json @@ -1 +1 @@ -{"abi":[{"type":"function","name":"activate","inputs":[{"name":"_genesisProposalHash","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"getConfig","inputs":[],"outputs":[{"name":"config_","type":"tuple","internalType":"struct IRealTimeInbox.Config","components":[{"name":"proofVerifier","type":"address","internalType":"address"},{"name":"signalService","type":"address","internalType":"address"},{"name":"basefeeSharingPctg","type":"uint8","internalType":"uint8"}]}],"stateMutability":"view"},{"type":"function","name":"getLastProposalHash","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"propose","inputs":[{"name":"_data","type":"bytes","internalType":"bytes"},{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"Activated","inputs":[{"name":"genesisProposalHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"ProposedAndProved","inputs":[{"name":"proposalHash","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"parentProposalHash","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"maxAnchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"basefeeSharingPctg","type":"uint8","indexed":false,"internalType":"uint8"},{"name":"sources","type":"tuple[]","indexed":false,"internalType":"struct IInbox.DerivationSource[]","components":[{"name":"isForcedInclusion","type":"bool","internalType":"bool"},{"name":"blobSlice","type":"tuple","internalType":"struct LibBlobs.BlobSlice","components":[{"name":"blobHashes","type":"bytes32[]","internalType":"bytes32[]"},{"name":"offset","type":"uint24","internalType":"uint24"},{"name":"timestamp","type":"uint48","internalType":"uint48"}]}]},{"name":"signalSlots","type":"bytes32[]","indexed":false,"internalType":"bytes32[]"},{"name":"checkpoint","type":"tuple","indexed":false,"internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"anonymous":false}]} \ No newline at end of file +{"abi":[{"type":"function","name":"activate","inputs":[{"name":"_genesisBlockHash","type":"bytes32","internalType":"bytes32"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"getConfig","inputs":[],"outputs":[{"name":"config_","type":"tuple","internalType":"struct IRealTimeInbox.Config","components":[{"name":"proofVerifier","type":"address","internalType":"address"},{"name":"signalService","type":"address","internalType":"address"},{"name":"basefeeSharingPctg","type":"uint8","internalType":"uint8"}]}],"stateMutability":"view"},{"type":"function","name":"getLastFinalizedBlockHash","inputs":[],"outputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"stateMutability":"view"},{"type":"function","name":"propose","inputs":[{"name":"_data","type":"bytes","internalType":"bytes"},{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"Activated","inputs":[{"name":"genesisBlockHash","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"ProposedAndProved","inputs":[{"name":"proposalHash","type":"bytes32","indexed":true,"internalType":"bytes32"},{"name":"lastFinalizedBlockHash","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"maxAnchorBlockNumber","type":"uint48","indexed":false,"internalType":"uint48"},{"name":"basefeeSharingPctg","type":"uint8","indexed":false,"internalType":"uint8"},{"name":"sources","type":"tuple[]","indexed":false,"internalType":"struct IInbox.DerivationSource[]","components":[{"name":"isForcedInclusion","type":"bool","internalType":"bool"},{"name":"blobSlice","type":"tuple","internalType":"struct LibBlobs.BlobSlice","components":[{"name":"blobHashes","type":"bytes32[]","internalType":"bytes32[]"},{"name":"offset","type":"uint24","internalType":"uint24"},{"name":"timestamp","type":"uint48","internalType":"uint48"}]}]},{"name":"signalSlots","type":"bytes32[]","indexed":false,"internalType":"bytes32[]"},{"name":"checkpoint","type":"tuple","indexed":false,"internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"anonymous":false}]} \ No newline at end of file diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index bbb6079d..cd4eee0f 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -231,13 +231,13 @@ impl ExecutionLayer { Ok(ProtocolConfig::from(&config)) } - pub async fn get_last_proposal_hash(&self) -> Result { + pub async fn get_last_finalized_block_hash(&self) -> Result { let result = self .realtime_inbox - .getLastProposalHash() + .getLastFinalizedBlockHash() .call() .await - .map_err(|e| anyhow::anyhow!("Failed to call getLastProposalHash: {e}"))?; + .map_err(|e| anyhow::anyhow!("Failed to call getLastFinalizedBlockHash: {e}"))?; Ok(result) } diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs index 0d850ee7..835e2207 100644 --- a/realtime/src/l2/taiko.rs +++ b/realtime/src/l2/taiko.rs @@ -271,8 +271,8 @@ impl Taiko { let sharing_pctg = self.protocol_config.get_basefee_sharing_pctg(); - // RealTime: extra data only contains basefee_sharing_pctg (1 byte) - let extra_data = format!("0x{:02x}", sharing_pctg); + // RealTime: 7 bytes — basefee_sharing_pctg + 6 zero bytes (no proposal_id) + let extra_data = format!("0x{:02x}000000000000", sharing_pctg); let executable_data = ExecutableData { base_fee_per_gas: l2_slot_context.info.base_fee(), diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs index 448bb299..5d793b34 100644 --- a/realtime/src/lib.rs +++ b/realtime/src/lib.rs @@ -126,13 +126,14 @@ pub async fn create_realtime_node( .await .map_err(|e| anyhow::anyhow!("Failed to start RealtimeChainMonitor: {}", e))?; - // Read the last proposal hash from L1 - let parent_proposal_hash = ethereum_l1 + // Read the last finalized block hash from L1 + let last_finalized_block_hash = ethereum_l1 .execution_layer - .get_last_proposal_hash() + .get_last_finalized_block_hash() .await?; - info!("Initial parentProposalHash: {}", parent_proposal_hash); + info!("Initial lastFinalizedBlockHash: {}", last_finalized_block_hash); + let preconf_only = realtime_config.preconf_only; let raiko_client = raiko::RaikoClient::new(&realtime_config); let node = Node::new( @@ -144,9 +145,10 @@ pub async fn create_realtime_node( batch_builder_config, transaction_error_receiver, fork_info, - parent_proposal_hash, + last_finalized_block_hash, raiko_client, protocol_config.basefee_sharing_pctg, + preconf_only, ) .await .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index 339c42ec..56e1015f 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -37,6 +37,7 @@ pub struct Node { proposal_manager: BatchManager, head_verifier: HeadVerifier, transaction_error_channel: Receiver, + preconf_only: bool, } impl Node { @@ -50,9 +51,10 @@ impl Node { batch_builder_config: BatchBuilderConfig, transaction_error_channel: Receiver, fork_info: ForkInfo, - parent_proposal_hash: alloy::primitives::B256, + last_finalized_block_hash: alloy::primitives::B256, raiko_client: crate::raiko::RaikoClient, basefee_sharing_pctg: u8, + preconf_only: bool, ) -> Result { let operator = Operator::new( ethereum_l1.execution_layer.clone(), @@ -78,7 +80,7 @@ impl Node { taiko.clone(), metrics.clone(), cancel_token.clone(), - parent_proposal_hash, + last_finalized_block_hash, raiko_client, basefee_sharing_pctg, ) @@ -103,6 +105,7 @@ impl Node { proposal_manager, head_verifier, transaction_error_channel, + preconf_only, }) } @@ -152,27 +155,29 @@ impl Node { let (l2_slot_info, current_status, pending_tx_list) = self.get_slot_info_and_status().await?; - // Always poll for completed async submissions (non-blocking) - if let Some(result) = self.proposal_manager.poll_submission_result() { - match result { - Ok(()) => info!("Async submission completed successfully"), - Err(e) => { - if let Some(transaction_error) = e.downcast_ref::() { - self.handle_transaction_error( - transaction_error, - ¤t_status, - &l2_slot_info, - ) - .await?; - } else { - error!("Async submission failed: {}", e); + if !self.preconf_only { + // Poll for completed async submissions (non-blocking) + if let Some(result) = self.proposal_manager.poll_submission_result() { + match result { + Ok(()) => info!("Async submission completed successfully"), + Err(e) => { + if let Some(transaction_error) = e.downcast_ref::() { + self.handle_transaction_error( + transaction_error, + ¤t_status, + &l2_slot_info, + ) + .await?; + } else { + error!("Async submission failed: {}", e); + } } } } - } - self.check_transaction_error_channel(¤t_status, &l2_slot_info) - .await?; + self.check_transaction_error_channel(¤t_status, &l2_slot_info) + .await?; + } if current_status.is_preconfirmation_start_slot() { self.head_verifier @@ -217,8 +222,11 @@ impl Node { } } - // Submission phase — non-blocking: starts async proof fetch + L1 tx - if current_status.is_submitter() + // Submission phase + if self.preconf_only { + // PRECONF_ONLY mode: drop finalized batches without proving/proposing + self.proposal_manager.drain_finalized_batches(); + } else if current_status.is_submitter() && !self.proposal_manager.is_submission_in_progress() && let Err(err) = self .proposal_manager @@ -443,15 +451,15 @@ impl Node { async fn warmup(&mut self) -> Result<(), Error> { info!("Warmup RealTime node"); - // Wait for RealTimeInbox activation (lastProposalHash != 0) + // Wait for RealTimeInbox activation (lastFinalizedBlockHash != 0) loop { let hash = self .ethereum_l1 .execution_layer - .get_last_proposal_hash() + .get_last_finalized_block_hash() .await?; if hash != alloy::primitives::B256::ZERO { - info!("RealTimeInbox is active, lastProposalHash: {}", hash); + info!("RealTimeInbox is active, lastFinalizedBlockHash: {}", hash); break; } warn!("RealTimeInbox not yet activated. Waiting..."); diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 73808a08..12c68f9e 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -11,7 +11,7 @@ use tokio::task::JoinHandle; use tracing::info; pub struct SubmissionResult { - pub new_parent_proposal_hash: B256, + pub new_last_finalized_block_hash: B256, } struct InFlightSubmission { @@ -120,7 +120,7 @@ async fn submission_task( l2_block_numbers, proof_type: raiko_client.proof_type.clone(), max_anchor_block_number: proposal.max_anchor_block_number, - parent_proposal_hash: format!("0x{}", hex::encode(proposal.parent_proposal_hash)), + last_finalized_block_hash: format!("0x{}", hex::encode(proposal.last_finalized_block_hash)), basefee_sharing_pctg, network: None, l1_network: None, @@ -180,14 +180,8 @@ async fn submission_task( return Err(err); } - // Step 3: Compute new parent proposal hash - let new_parent_proposal_hash = alloy::primitives::keccak256( - alloy::sol_types::SolValue::abi_encode(&( - proposal.parent_proposal_hash, - proposal.max_anchor_block_number, - proposal.max_anchor_block_hash, - )), - ); + // Step 3: After successful submission, the new lastFinalizedBlockHash is the checkpoint's blockHash + let new_last_finalized_block_hash = proposal.checkpoint.blockHash; // Step 4: Spawn user-op status tracker if let (Some(hash_rx), Some(result_rx), Some(store)) = @@ -247,6 +241,6 @@ async fn submission_task( } Ok(SubmissionResult { - new_parent_proposal_hash, + new_last_finalized_block_hash, }) } diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index 2d3a4001..bc310ca8 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -82,7 +82,7 @@ impl BatchBuilder { pub fn create_new_batch( &mut self, anchor_block: AnchorBlockInfo, - parent_proposal_hash: B256, + last_finalized_block_hash: B256, ) { self.finalize_current_batch(); @@ -93,7 +93,7 @@ impl BatchBuilder { max_anchor_block_number: anchor_block.id(), max_anchor_block_hash: anchor_block.hash(), checkpoint: Checkpoint::default(), - parent_proposal_hash, + last_finalized_block_hash, user_ops: vec![], signal_slots: vec![], l1_calls: vec![], @@ -210,10 +210,10 @@ impl BatchBuilder { } } - /// Pop the oldest finalized batch, stamping it with the current parent_proposal_hash. - pub fn pop_oldest_batch(&mut self, parent_proposal_hash: B256) -> Option { + /// Pop the oldest finalized batch, stamping it with the current last_finalized_block_hash. + pub fn pop_oldest_batch(&mut self, last_finalized_block_hash: B256) -> Option { if let Some(mut batch) = self.proposals_to_send.pop_front() { - batch.parent_proposal_hash = parent_proposal_hash; + batch.last_finalized_block_hash = last_finalized_block_hash; Some(batch) } else { None diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index dc24628b..d2a281c7 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -47,7 +47,7 @@ pub struct BatchManager { l1_height_lag: u64, metrics: Arc, cancel_token: CancellationToken, - parent_proposal_hash: B256, + last_finalized_block_hash: B256, } impl BatchManager { @@ -59,7 +59,7 @@ impl BatchManager { taiko: Arc, metrics: Arc, cancel_token: CancellationToken, - parent_proposal_hash: B256, + last_finalized_block_hash: B256, raiko_client: RaikoClient, basefee_sharing_pctg: u8, ) -> Result { @@ -107,20 +107,20 @@ impl BatchManager { l1_height_lag, metrics, cancel_token, - parent_proposal_hash, + last_finalized_block_hash, }) } /// Non-blocking poll: check if the in-flight submission has completed. - /// On success, updates `parent_proposal_hash`. Returns None if idle or still in progress. + /// On success, updates `last_finalized_block_hash`. Returns None if idle or still in progress. pub fn poll_submission_result(&mut self) -> Option> { match self.async_submitter.try_recv_result() { Some(Ok(result)) => { info!( - "Submission completed. New parent proposal hash: {}", - result.new_parent_proposal_hash + "Submission completed. New last finalized block hash: {}", + result.new_last_finalized_block_hash ); - self.parent_proposal_hash = result.new_parent_proposal_hash; + self.last_finalized_block_hash = result.new_last_finalized_block_hash; Some(Ok(())) } Some(Err(e)) => Some(Err(e)), @@ -139,7 +139,7 @@ impl BatchManager { self.batch_builder.finalize_if_needed(submit_only_full_batches); - let Some(batch) = self.batch_builder.pop_oldest_batch(self.parent_proposal_hash) else { + let Some(batch) = self.batch_builder.pop_oldest_batch(self.last_finalized_block_hash) else { return Ok(()); }; @@ -158,9 +158,9 @@ impl BatchManager { let status_store = self.bridge_handler.lock().await.status_store(); info!( - "Starting async submission: {} blocks, parent_hash: {}", + "Starting async submission: {} blocks, last_finalized_block_hash: {}", batch.l2_blocks.len(), - batch.parent_proposal_hash, + batch.last_finalized_block_hash, ); self.async_submitter.submit(batch, Some(status_store)); @@ -171,6 +171,17 @@ impl BatchManager { self.async_submitter.is_busy() } + /// Drop all finalized batches without submitting. Used in PRECONF_ONLY mode. + pub fn drain_finalized_batches(&mut self) { + self.batch_builder.finalize_if_needed(false); + while let Some(batch) = self.batch_builder.pop_oldest_batch(self.last_finalized_block_hash) { + info!( + "PRECONF_ONLY: dropping batch with {} blocks", + batch.l2_blocks.len(), + ); + } + } + pub fn should_new_block_be_created( &self, pending_tx_list: &Option, @@ -368,7 +379,7 @@ impl BatchManager { .await?; let anchor_block_id = anchor_block_info.id(); - // Use B256::ZERO as placeholder -- real parent hash is stamped at submission time + // Use B256::ZERO as placeholder -- real last_finalized_block_hash is stamped at submission time self.batch_builder .create_new_batch(anchor_block_info, B256::ZERO); diff --git a/realtime/src/node/proposal_manager/proposal.rs b/realtime/src/node/proposal_manager/proposal.rs index b659d1da..16b1e145 100644 --- a/realtime/src/node/proposal_manager/proposal.rs +++ b/realtime/src/node/proposal_manager/proposal.rs @@ -24,7 +24,7 @@ pub struct Proposal { // Proof fields pub checkpoint: Checkpoint, - pub parent_proposal_hash: B256, + pub last_finalized_block_hash: B256, // Surge POC fields (carried over) pub user_ops: Vec, diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 3d36d8a5..13b5fbc1 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -22,7 +22,7 @@ pub struct RaikoProofRequest { pub l2_block_numbers: Vec, pub proof_type: String, pub max_anchor_block_number: u64, - pub parent_proposal_hash: String, + pub last_finalized_block_hash: String, pub basefee_sharing_pctg: u8, #[serde(skip_serializing_if = "Option::is_none")] pub network: Option, diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index 49c1e6db..b3c4d73b 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -13,6 +13,7 @@ pub struct RealtimeConfig { pub proof_type: String, pub raiko_network: String, pub raiko_l1_network: String, + pub preconf_only: bool, } impl ConfigTrait for RealtimeConfig { @@ -38,6 +39,10 @@ impl ConfigTrait for RealtimeConfig { let raiko_l1_network = std::env::var("RAIKO_L1_NETWORK") .unwrap_or_else(|_| "ethereum".to_string()); + let preconf_only = std::env::var("PRECONF_ONLY") + .map(|v| v.to_lowercase() != "false" && v != "0") + .unwrap_or(true); + Ok(RealtimeConfig { realtime_inbox, proposer_multicall, @@ -47,6 +52,7 @@ impl ConfigTrait for RealtimeConfig { proof_type, raiko_network, raiko_l1_network, + preconf_only, }) } } @@ -58,6 +64,7 @@ impl fmt::Display for RealtimeConfig { writeln!(f, "Proposer multicall: {:#?}", self.proposer_multicall)?; writeln!(f, "Raiko URL: {}", self.raiko_url)?; writeln!(f, "Proof type: {}", self.proof_type)?; + writeln!(f, "Preconf only: {}", self.preconf_only)?; Ok(()) } } From d04e64163f17a748ba647b76715102719846f7cb Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 19 Mar 2026 11:55:28 +0530 Subject: [PATCH 04/14] raiko interaction --- realtime/src/lib.rs | 2 + realtime/src/node/mod.rs | 2 + .../node/proposal_manager/async_submitter.rs | 107 ++++++++++++++++-- .../node/proposal_manager/batch_builder.rs | 1 + realtime/src/node/proposal_manager/mod.rs | 2 + .../src/node/proposal_manager/proposal.rs | 3 +- realtime/src/raiko/mod.rs | 22 +++- realtime/src/utils/config.rs | 7 ++ 8 files changed, 133 insertions(+), 13 deletions(-) diff --git a/realtime/src/lib.rs b/realtime/src/lib.rs index 5d793b34..5d65fd90 100644 --- a/realtime/src/lib.rs +++ b/realtime/src/lib.rs @@ -134,6 +134,7 @@ pub async fn create_realtime_node( info!("Initial lastFinalizedBlockHash: {}", last_finalized_block_hash); let preconf_only = realtime_config.preconf_only; + let proof_request_bypass = realtime_config.proof_request_bypass; let raiko_client = raiko::RaikoClient::new(&realtime_config); let node = Node::new( @@ -149,6 +150,7 @@ pub async fn create_realtime_node( raiko_client, protocol_config.basefee_sharing_pctg, preconf_only, + proof_request_bypass, ) .await .map_err(|e| anyhow::anyhow!("Failed to create Node: {}", e))?; diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index 56e1015f..c0f7a377 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -55,6 +55,7 @@ impl Node { raiko_client: crate::raiko::RaikoClient, basefee_sharing_pctg: u8, preconf_only: bool, + proof_request_bypass: bool, ) -> Result { let operator = Operator::new( ethereum_l1.execution_layer.clone(), @@ -83,6 +84,7 @@ impl Node { last_finalized_block_hash, raiko_client, basefee_sharing_pctg, + proof_request_bypass, ) .await .map_err(|e| anyhow::anyhow!("Failed to create BatchManager: {}", e))?; diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 12c68f9e..0a6e1669 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -1,11 +1,16 @@ use crate::l1::execution_layer::ExecutionLayer; use crate::node::proposal_manager::bridge_handler::{UserOpStatus, UserOpStatusStore}; use crate::node::proposal_manager::proposal::Proposal; -use crate::raiko::{RaikoCheckpoint, RaikoClient, RaikoProofRequest}; +use crate::raiko::{ + RaikoBlobSlice, RaikoCheckpoint, RaikoClient, RaikoDerivationSource, RaikoProofRequest, +}; +use alloy::consensus::SidecarBuilder; use alloy::primitives::B256; use anyhow::Error; use common::l1::ethereum_l1::EthereumL1; use std::sync::Arc; +use taiko_protocol::shasta::manifest::{BlockManifest, DerivationSourceManifest}; +use taiko_protocol::shasta::BlobCoder; use tokio::sync::oneshot; use tokio::task::JoinHandle; use tracing::info; @@ -24,6 +29,7 @@ pub struct AsyncSubmitter { raiko_client: RaikoClient, basefee_sharing_pctg: u8, ethereum_l1: Arc>, + proof_request_bypass: bool, } impl AsyncSubmitter { @@ -31,12 +37,14 @@ impl AsyncSubmitter { raiko_client: RaikoClient, basefee_sharing_pctg: u8, ethereum_l1: Arc>, + proof_request_bypass: bool, ) -> Self { Self { in_flight: None, raiko_client, basefee_sharing_pctg, ethereum_l1, + proof_request_bypass, } } @@ -74,6 +82,7 @@ impl AsyncSubmitter { let raiko_client = self.raiko_client.clone(); let basefee_sharing_pctg = self.basefee_sharing_pctg; let ethereum_l1 = self.ethereum_l1.clone(); + let proof_request_bypass = self.proof_request_bypass; let handle = tokio::spawn(async move { let result = submission_task( @@ -82,6 +91,7 @@ impl AsyncSubmitter { basefee_sharing_pctg, ethereum_l1, status_store, + proof_request_bypass, ) .await; let _ = result_tx.send(result); @@ -103,19 +113,62 @@ async fn submission_task( basefee_sharing_pctg: u8, ethereum_l1: Arc>, status_store: Option, + proof_request_bypass: bool, ) -> Result { - // Step 1: Fetch ZK proof from Raiko + // Step 1: Fetch ZK proof from Raiko (or bypass) if proposal.zk_proof.is_none() { - info!( - "Fetching ZK proof from Raiko for batch with {} blocks", - proposal.l2_blocks.len() - ); - let l2_block_numbers: Vec = (proposal.checkpoint.blockNumber.to::() - u64::try_from(proposal.l2_blocks.len())? + 1 ..=proposal.checkpoint.blockNumber.to::()) .collect(); + // Build the blob sidecar (same as proposal_tx_builder) to get blob hashes and raw data + let mut block_manifests = Vec::with_capacity(proposal.l2_blocks.len()); + for l2_block in &proposal.l2_blocks { + block_manifests.push(BlockManifest { + timestamp: l2_block.timestamp_sec, + coinbase: l2_block.coinbase, + anchor_block_number: l2_block.anchor_block_number, + gas_limit: l2_block.gas_limit_without_anchor, + transactions: l2_block + .prebuilt_tx_list + .tx_list + .iter() + .map(|tx| tx.clone().into()) + .collect(), + }); + } + let manifest = DerivationSourceManifest { + blocks: block_manifests, + }; + let manifest_data = manifest.encode_and_compress()?; + let sidecar_builder: SidecarBuilder = + SidecarBuilder::from_slice(&manifest_data); + let sidecar: alloy::eips::eip4844::BlobTransactionSidecar = sidecar_builder.build()?; + + // Extract versioned blob hashes + let blob_hashes: Vec = sidecar + .versioned_hashes() + .map(|h| format!("0x{}", hex::encode(h))) + .collect(); + + // Extract raw blob data (each blob is 131072 bytes, hex-encoded with 0x prefix) + let blobs: Vec = sidecar + .blobs + .iter() + .map(|blob| format!("0x{}", hex::encode::<&[u8]>(blob.as_ref()))) + .collect(); + + // Build sources array with a single DerivationSource entry + let sources = vec![RaikoDerivationSource { + is_forced_inclusion: false, + blob_slice: RaikoBlobSlice { + blob_hashes, + offset: 0, + timestamp: 0, + }, + }]; + let request = RaikoProofRequest { l2_block_numbers, proof_type: raiko_client.proof_type.clone(), @@ -130,15 +183,51 @@ async fn submission_task( .iter() .map(|s| format!("0x{}", hex::encode(s))) .collect(), - sources: vec![], + sources, + blobs, checkpoint: Some(RaikoCheckpoint { block_number: proposal.checkpoint.blockNumber.to::(), block_hash: format!("0x{}", hex::encode(proposal.checkpoint.blockHash)), state_root: format!("0x{}", hex::encode(proposal.checkpoint.stateRoot)), }), - blob_proof_type: "ProofOfEquivalence".to_string(), + blob_proof_type: "proof_of_equivalence".to_string(), }; + if proof_request_bypass { + let json = serde_json::to_string_pretty(&request)?; + let raiko_url = format!("{}/v3/proof/batch/realtime", raiko_client.base_url); + + std::fs::write("/tmp/raiko_request.json", &json)?; + + let api_key_header = raiko_client.api_key.as_ref() + .map(|k| format!(" -H 'X-API-KEY: {}' \\\n", k)) + .unwrap_or_default(); + let curl_script = format!( + "#!/bin/bash\n\ + # Generated by Catalyst — send this to your Raiko instance\n\ + # Usage: RAIKO_URL=http://your-raiko:8080 bash /tmp/raiko_curl.sh\n\n\ + RAIKO_URL=\"${{RAIKO_URL:-{raiko_url}}}\"\n\n\ + curl -X POST \"$RAIKO_URL\" \\\n\ + {api_key_header}\ + \x20 -H 'Content-Type: application/json' \\\n\ + \x20 -d @/tmp/raiko_request.json\n" + ); + std::fs::write("/tmp/raiko_curl.sh", &curl_script)?; + + info!( + "PROOF_REQUEST_BYPASS: Raiko request dumped.\n\ + Request JSON: /tmp/raiko_request.json\n\ + Curl script: /tmp/raiko_curl.sh\n\ + Raiko URL: {}\n\ + Skipping Raiko call and L1 submission.", + raiko_url + ); + + return Ok(SubmissionResult { + new_last_finalized_block_hash: proposal.checkpoint.blockHash, + }); + } + let proof = raiko_client.get_proof(&request).await?; proposal.zk_proof = Some(proof); } diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index bc310ca8..3d6de3cb 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -92,6 +92,7 @@ impl BatchBuilder { coinbase: self.config.default_coinbase, max_anchor_block_number: anchor_block.id(), max_anchor_block_hash: anchor_block.hash(), + max_anchor_state_root: anchor_block.state_root(), checkpoint: Checkpoint::default(), last_finalized_block_hash, user_ops: vec![], diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index d2a281c7..77718f9b 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -62,6 +62,7 @@ impl BatchManager { last_finalized_block_hash: B256, raiko_client: RaikoClient, basefee_sharing_pctg: u8, + proof_request_bypass: bool, ) -> Result { info!( "Batch builder config:\n\ @@ -92,6 +93,7 @@ impl BatchManager { raiko_client, basefee_sharing_pctg, ethereum_l1.clone(), + proof_request_bypass, ); Ok(Self { diff --git a/realtime/src/node/proposal_manager/proposal.rs b/realtime/src/node/proposal_manager/proposal.rs index 16b1e145..b75ae8c2 100644 --- a/realtime/src/node/proposal_manager/proposal.rs +++ b/realtime/src/node/proposal_manager/proposal.rs @@ -21,6 +21,7 @@ pub struct Proposal { // RealTime: maxAnchor instead of anchor pub max_anchor_block_number: u64, pub max_anchor_block_hash: B256, + pub max_anchor_state_root: B256, // Proof fields pub checkpoint: Checkpoint, @@ -95,7 +96,7 @@ impl Proposal { gas_limit_without_anchor: l2_block.gas_limit_without_anchor, anchor_block_id: self.max_anchor_block_number, anchor_block_hash: self.max_anchor_block_hash, - anchor_state_root: B256::ZERO, // Not used in RealTime anchor + anchor_state_root: self.max_anchor_state_root, }; self.total_bytes += l2_block.prebuilt_tx_list.bytes_length; self.l2_blocks.push(l2_block); diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 13b5fbc1..7963617e 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -8,8 +8,8 @@ use tracing::{debug, info, warn}; #[derive(Clone)] pub struct RaikoClient { client: Client, - base_url: String, - api_key: Option, + pub base_url: String, + pub api_key: Option, pub proof_type: String, l2_network: String, l1_network: String, @@ -31,12 +31,28 @@ pub struct RaikoProofRequest { #[serde(skip_serializing_if = "Option::is_none")] pub prover: Option, pub signal_slots: Vec, - pub sources: Vec, + pub sources: Vec, + pub blobs: Vec, #[serde(skip_serializing_if = "Option::is_none")] pub checkpoint: Option, pub blob_proof_type: String, } +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RaikoDerivationSource { + pub is_forced_inclusion: bool, + pub blob_slice: RaikoBlobSlice, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RaikoBlobSlice { + pub blob_hashes: Vec, + pub offset: u32, + pub timestamp: u64, +} + #[derive(Serialize, Deserialize)] pub struct RaikoCheckpoint { pub block_number: u64, diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index b3c4d73b..4f361e06 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -14,6 +14,7 @@ pub struct RealtimeConfig { pub raiko_network: String, pub raiko_l1_network: String, pub preconf_only: bool, + pub proof_request_bypass: bool, } impl ConfigTrait for RealtimeConfig { @@ -43,6 +44,10 @@ impl ConfigTrait for RealtimeConfig { .map(|v| v.to_lowercase() != "false" && v != "0") .unwrap_or(true); + let proof_request_bypass = std::env::var("PROOF_REQUEST_BYPASS") + .map(|v| v.to_lowercase() != "false" && v != "0") + .unwrap_or(false); + Ok(RealtimeConfig { realtime_inbox, proposer_multicall, @@ -53,6 +58,7 @@ impl ConfigTrait for RealtimeConfig { raiko_network, raiko_l1_network, preconf_only, + proof_request_bypass, }) } } @@ -65,6 +71,7 @@ impl fmt::Display for RealtimeConfig { writeln!(f, "Raiko URL: {}", self.raiko_url)?; writeln!(f, "Proof type: {}", self.proof_type)?; writeln!(f, "Preconf only: {}", self.preconf_only)?; + writeln!(f, "Proof request bypass: {}", self.proof_request_bypass)?; Ok(()) } } From f9252ba80fa5c3112aa34b3402c67b2387dcb0ad Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 19 Mar 2026 12:05:03 +0530 Subject: [PATCH 05/14] feat: connect with surge verifier --- realtime/src/l1/bindings.rs | 56 +++++++++++++++++++ realtime/src/l1/execution_layer.rs | 5 +- realtime/src/l1/proposal_tx_builder.rs | 33 ++++++----- .../node/proposal_manager/async_submitter.rs | 2 +- realtime/src/raiko/mod.rs | 3 +- realtime/src/utils/config.rs | 10 ++-- 6 files changed, 87 insertions(+), 22 deletions(-) diff --git a/realtime/src/l1/bindings.rs b/realtime/src/l1/bindings.rs index dbee3b8d..d79e3599 100644 --- a/realtime/src/l1/bindings.rs +++ b/realtime/src/l1/bindings.rs @@ -33,4 +33,60 @@ sol! { bytes32[] signalSlots; uint48 maxAnchorBlockNumber; } + + // SurgeVerifier SubProof encoding + struct SubProof { + uint8 proofBitFlag; + bytes data; + } +} + +/// Proof types supported by the SurgeVerifier. +/// Each variant maps to a bit flag used in `SubProof.proofBitFlag`. +#[derive(Debug, Clone, Copy)] +pub enum ProofType { + Risc0, // 0b00000001 + Sp1, // 0b00000010 + Zisk, // 0b00000100 +} + +impl ProofType { + pub fn proof_bit_flag(&self) -> u8 { + match self { + ProofType::Risc0 => 1, + ProofType::Sp1 => 1 << 1, + ProofType::Zisk => 1 << 2, + } + } + + /// Returns the proof type string expected by Raiko. + pub fn raiko_proof_type(&self) -> &'static str { + match self { + ProofType::Risc0 => "risc0", + ProofType::Sp1 => "sp1", + ProofType::Zisk => "zisk", + } + } +} + +impl std::str::FromStr for ProofType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "risc0" => Ok(ProofType::Risc0), + "sp1" => Ok(ProofType::Sp1), + "zisk" => Ok(ProofType::Zisk), + _ => Err(anyhow::anyhow!( + "Invalid PROOF_TYPE '{}'. Must be one of: sp1, risc0, zisk", + s + )), + } + } +} + +impl std::fmt::Display for ProofType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.raiko_proof_type()) + } } diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index cd4eee0f..3c9c0940 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -44,6 +44,7 @@ pub struct ExecutionLayer { contract_addresses: ContractAddresses, realtime_inbox: RealTimeInboxInstance, raiko_client: RaikoClient, + proof_type: crate::l1::bindings::ProofType, } impl ELTrait for ExecutionLayer { @@ -99,6 +100,7 @@ impl ELTrait for ExecutionLayer { // Read Raiko config from environment let realtime_config = crate::utils::config::RealtimeConfig::read_env_variables() .map_err(|e| anyhow::anyhow!("Failed to read RealtimeConfig for Raiko: {e}"))?; + let proof_type = realtime_config.proof_type; let raiko_client = RaikoClient::new(&realtime_config); Ok(Self { @@ -109,6 +111,7 @@ impl ELTrait for ExecutionLayer { contract_addresses, realtime_inbox, raiko_client, + proof_type, }) } @@ -192,7 +195,7 @@ impl ExecutionLayer { batch.zk_proof.is_some(), ); - let builder = ProposalTxBuilder::new(self.provider.clone(), 10); + let builder = ProposalTxBuilder::new(self.provider.clone(), 10, self.proof_type); let tx = builder .build_propose_tx( diff --git a/realtime/src/l1/proposal_tx_builder.rs b/realtime/src/l1/proposal_tx_builder.rs index 991f9e27..190a805f 100644 --- a/realtime/src/l1/proposal_tx_builder.rs +++ b/realtime/src/l1/proposal_tx_builder.rs @@ -1,5 +1,5 @@ use crate::l1::{ - bindings::{BlobReference, Multicall, ProposeInput, RealTimeInbox}, + bindings::{BlobReference, Multicall, ProofType, ProposeInput, RealTimeInbox, SubProof}, config::ContractAddresses, }; use crate::node::proposal_manager::{ @@ -31,13 +31,15 @@ use tracing::{info, warn}; pub struct ProposalTxBuilder { provider: DynProvider, extra_gas_percentage: u64, + proof_type: ProofType, } impl ProposalTxBuilder { - pub fn new(provider: DynProvider, extra_gas_percentage: u64) -> Self { + pub fn new(provider: DynProvider, extra_gas_percentage: u64, proof_type: ProofType) -> Self { Self { provider, extra_gas_percentage, + proof_type, } } @@ -113,10 +115,7 @@ impl ProposalTxBuilder { if !batch.l1_calls.is_empty() && let Some(l1_call) = batch.l1_calls.first() { - let l1_call_call = self.build_l1_call_call( - l1_call.clone(), - contract_addresses.bridge, - ); + let l1_call_call = self.build_l1_call_call(l1_call.clone(), contract_addresses.bridge); info!("Added L1 call to Multicall: {:?}", &l1_call_call); multicalls.push(l1_call_call.clone()); } @@ -170,19 +169,23 @@ impl ProposalTxBuilder { .encode_and_compress() .map_err(|e| Error::msg(format!("Can't encode and compress manifest: {e}")))?; - let sidecar_builder: SidecarBuilder = - SidecarBuilder::from_slice(&manifest_data); + let sidecar_builder: SidecarBuilder = SidecarBuilder::from_slice(&manifest_data); let sidecar: BlobTransactionSidecar = sidecar_builder.build()?; let inbox = RealTimeInbox::new(inbox_address, self.provider.clone()); - let proof = Bytes::from( - batch - .zk_proof - .as_ref() - .ok_or_else(|| anyhow::anyhow!("ZK proof not set on proposal"))? - .clone(), - ); + // Encode the raw proof as SubProof[] for the SurgeVerifier + let raw_proof = batch + .zk_proof + .as_ref() + .ok_or_else(|| anyhow::anyhow!("ZK proof not set on proposal"))? + .clone(); + + let sub_proofs = vec![SubProof { + proofBitFlag: self.proof_type.proof_bit_flag(), + data: Bytes::from(raw_proof), + }]; + let proof = Bytes::from(sub_proofs.abi_encode()); // Build ProposeInput and ABI-encode it as the _data parameter let blob_reference = BlobReference { diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index 0a6e1669..dd38f1f0 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -171,7 +171,7 @@ async fn submission_task( let request = RaikoProofRequest { l2_block_numbers, - proof_type: raiko_client.proof_type.clone(), + proof_type: raiko_client.proof_type.raiko_proof_type().to_string(), max_anchor_block_number: proposal.max_anchor_block_number, last_finalized_block_hash: format!("0x{}", hex::encode(proposal.last_finalized_block_hash)), basefee_sharing_pctg, diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 7963617e..59f7213e 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -1,3 +1,4 @@ +use crate::l1::bindings::ProofType; use crate::utils::config::RealtimeConfig; use anyhow::Error; use reqwest::Client; @@ -10,7 +11,7 @@ pub struct RaikoClient { client: Client, pub base_url: String, pub api_key: Option, - pub proof_type: String, + pub proof_type: ProofType, l2_network: String, l1_network: String, poll_interval: Duration, diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index 4f361e06..41662a59 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -1,3 +1,4 @@ +use crate::l1::bindings::ProofType; use alloy::primitives::Address; use anyhow::Error; use common::config::{ConfigTrait, address_parse_error}; @@ -10,7 +11,7 @@ pub struct RealtimeConfig { pub bridge: Address, pub raiko_url: String, pub raiko_api_key: Option, - pub proof_type: String, + pub proof_type: ProofType, pub raiko_network: String, pub raiko_l1_network: String, pub preconf_only: bool, @@ -33,8 +34,9 @@ impl ConfigTrait for RealtimeConfig { let raiko_url = std::env::var("RAIKO_URL") .unwrap_or_else(|_| "http://localhost:8080".to_string()); let raiko_api_key = std::env::var("RAIKO_API_KEY").ok(); - let proof_type = std::env::var("RAIKO_PROOF_TYPE") - .unwrap_or_else(|_| "sgx".to_string()); + let proof_type: ProofType = std::env::var("PROOF_TYPE") + .unwrap_or_else(|_| "sp1".to_string()) + .parse()?; let raiko_network = std::env::var("RAIKO_L2_NETWORK") .unwrap_or_else(|_| "taiko_mainnet".to_string()); let raiko_l1_network = std::env::var("RAIKO_L1_NETWORK") @@ -69,7 +71,7 @@ impl fmt::Display for RealtimeConfig { writeln!(f, "RealTime inbox: {:#?}", self.realtime_inbox)?; writeln!(f, "Proposer multicall: {:#?}", self.proposer_multicall)?; writeln!(f, "Raiko URL: {}", self.raiko_url)?; - writeln!(f, "Proof type: {}", self.proof_type)?; + writeln!(f, "Proof type: {} (bit flag: {})", self.proof_type, self.proof_type.proof_bit_flag())?; writeln!(f, "Preconf only: {}", self.preconf_only)?; writeln!(f, "Proof request bypass: {}", self.proof_request_bypass)?; Ok(()) From 88d3285f545ee84c3bd6aec7c6edd8a71fa5b913 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 19 Mar 2026 21:30:14 +0530 Subject: [PATCH 06/14] fix: proof body --- realtime/src/raiko/mod.rs | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 59f7213e..5e8686d6 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -67,6 +67,10 @@ pub struct RaikoResponse { #[serde(default)] pub data: Option, #[serde(default)] + pub proof_type: Option, + #[serde(default)] + pub batch_id: Option, + #[serde(default)] pub error: Option, #[serde(default)] pub message: Option, @@ -75,8 +79,25 @@ pub struct RaikoResponse { #[derive(Deserialize)] #[serde(untagged)] pub enum RaikoData { - Proof { proof: String }, - Status { status: String }, + Proof { + proof: RaikoProof, + }, + Status { + status: String, + }, +} + +#[derive(Deserialize)] +pub struct RaikoProof { + pub proof: Option, + #[serde(default)] + pub input: Option, + #[serde(default)] + pub quote: Option, + #[serde(default)] + pub uuid: Option, + #[serde(default)] + pub kzg_proof: Option, } impl RaikoClient { @@ -116,9 +137,12 @@ impl RaikoClient { } match body.data { - Some(RaikoData::Proof { proof }) => { + Some(RaikoData::Proof { proof: proof_obj }) => { + let proof_hex = proof_obj.proof.ok_or_else(|| { + anyhow::anyhow!("Raiko returned proof object with null proof field") + })?; info!("ZK proof received (attempt {})", attempt + 1); - let proof_bytes = hex::decode(proof.trim_start_matches("0x"))?; + let proof_bytes = hex::decode(proof_hex.trim_start_matches("0x"))?; return Ok(proof_bytes); } Some(RaikoData::Status { ref status }) if status == "ZKAnyNotDrawn" => { From 2229f7d8a5fd449e294e9dc1fc856826a7d3cd49 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 19 Mar 2026 21:41:34 +0530 Subject: [PATCH 07/14] feat: recovery --- realtime/src/l2/taiko.rs | 17 ++ realtime/src/node/mod.rs | 54 ++++++ .../node/proposal_manager/batch_builder.rs | 14 +- realtime/src/node/proposal_manager/mod.rs | 155 +++++++++++++++++- 4 files changed, 237 insertions(+), 3 deletions(-) diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs index 835e2207..5d062bd2 100644 --- a/realtime/src/l2/taiko.rs +++ b/realtime/src/l2/taiko.rs @@ -148,6 +148,23 @@ impl Taiko { .await } + /// Scan backward from L2 head to find the block number matching a given hash. + /// Used during recovery to resolve `lastFinalizedBlockHash` from L1 to an L2 block number. + pub async fn find_l2_block_number_by_hash(&self, block_hash: B256) -> Result { + let head = self.get_latest_l2_block_id().await?; + for n in (0..=head).rev() { + let hash = self.get_l2_block_hash(n).await?; + if hash == block_hash { + return Ok(n); + } + } + Err(anyhow::anyhow!( + "L2 block with hash {} not found on Geth (scanned {} blocks)", + block_hash, + head + 1 + )) + } + pub async fn get_l2_slot_info(&self) -> Result { self.get_l2_slot_info_by_parent_block(BlockNumberOrTag::Latest) .await diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index c0f7a377..e9fcb94a 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -471,6 +471,60 @@ impl Node { // Wait for the last sent transaction to be executed self.wait_for_sent_transactions().await?; + // Recover and submit any preconfirmed-but-unproposed L2 blocks + if !self.preconf_only { + let recovered = self.proposal_manager.recover_unproposed_blocks().await?; + if recovered > 0 { + self.submit_recovered_batches().await?; + } + } + + Ok(()) + } + + async fn submit_recovered_batches(&mut self) -> Result<(), Error> { + info!("Submitting recovered batches to L1..."); + + loop { + if !self.proposal_manager.has_batches() { + break; + } + + if self.cancel_token.is_cancelled() { + return Err(anyhow::anyhow!("Shutdown during recovery submission")); + } + + // Start async submission (proves via Raiko + sends L1 tx) + self.proposal_manager.try_start_submission(false).await?; + + // Wait for submission to complete + loop { + if self.cancel_token.is_cancelled() { + return Err(anyhow::anyhow!("Shutdown during recovery submission")); + } + + if let Some(result) = self.proposal_manager.poll_submission_result() { + match result { + Ok(()) => { + info!("Recovery batch submitted successfully"); + break; + } + Err(e) => { + return Err(anyhow::anyhow!( + "Recovery batch submission failed: {}", + e + )); + } + } + } + sleep(Duration::from_millis(500)).await; + } + + // Wait for L1 transaction to be confirmed before next batch + self.wait_for_sent_transactions().await?; + } + + info!("All recovered batches submitted successfully"); Ok(()) } diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index 3d6de3cb..4c75ffa6 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -9,7 +9,7 @@ use anyhow::Error; use common::metrics::Metrics; use common::{ batch_builder::BatchBuilderConfig, - shared::l2_block_v2::L2BlockV2Draft, + shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}, }; use common::{ l1::slot_clock::SlotClock, @@ -120,6 +120,18 @@ impl BatchBuilder { } } + /// Add a pre-built L2BlockV2 directly to the current proposal. + /// Used during recovery to bypass the draft/payload flow. + pub fn add_recovered_l2_block(&mut self, l2_block: L2BlockV2) -> Result<(), Error> { + if let Some(current_proposal) = self.current_proposal.as_mut() { + current_proposal.total_bytes += l2_block.prebuilt_tx_list.bytes_length; + current_proposal.l2_blocks.push(l2_block); + Ok(()) + } else { + Err(anyhow::anyhow!("No current batch for recovered block")) + } + } + pub fn add_user_op(&mut self, user_op_data: UserOp) -> Result<&Proposal, Error> { if let Some(current_proposal) = self.current_proposal.as_mut() { current_proposal.user_ops.push(user_op_data.clone()); diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 77718f9b..273321f2 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -25,8 +25,8 @@ use common::{ l2::taiko_driver::{OperationType, models::BuildPreconfBlockResponse}, shared::{ anchor_block_info::AnchorBlockInfo, - l2_block_v2::L2BlockV2Draft, - l2_tx_lists::PreBuiltTxList, + l2_block_v2::{L2BlockV2, L2BlockV2Draft}, + l2_tx_lists::{self, PreBuiltTxList}, }, utils::cancellation_token::CancellationToken, }; @@ -414,6 +414,157 @@ impl BatchManager { self.batch_builder.get_number_of_batches() } + /// Detect and recover L2 blocks that were preconfirmed but never proposed to L1. + /// Returns the number of recovered blocks. + pub async fn recover_unproposed_blocks(&mut self) -> Result { + let last_finalized_hash = self + .ethereum_l1 + .execution_layer + .get_last_finalized_block_hash() + .await?; + + if last_finalized_hash == B256::ZERO { + info!("No finalized block hash on L1 (genesis). Nothing to recover."); + return Ok(0); + } + + // Resolve the L1 lastFinalizedBlockHash to an L2 block number. + // If the hash isn't found on L2, it means no blocks have been proposed yet + // (the hash is the initial contract value, not an actual L2 block hash), + // so we treat the last proposed block number as 0. + let last_proposed_block_number = match self + .taiko + .find_l2_block_number_by_hash(last_finalized_hash) + .await + { + Ok(n) => n, + Err(_) => { + info!( + "lastFinalizedBlockHash {} not found on L2 — treating as no blocks proposed yet", + last_finalized_hash + ); + 0 + } + }; + + let l2_head = self.taiko.get_latest_l2_block_id().await?; + + if l2_head <= last_proposed_block_number { + info!( + "No unproposed blocks: L2 head {} <= last proposed {}", + l2_head, last_proposed_block_number + ); + return Ok(0); + } + + let gap = l2_head - last_proposed_block_number; + info!( + "Detected {} unproposed L2 blocks ({} to {}). Starting recovery.", + gap, + last_proposed_block_number + 1, + l2_head + ); + + for block_number in (last_proposed_block_number + 1)..=l2_head { + self.recover_from_l2_block(block_number).await?; + } + + self.last_finalized_block_hash = last_finalized_hash; + + info!("Recovery complete: {} blocks recovered into proposals", gap); + Ok(gap) + } + + /// Fetch a single L2 block from Geth, extract anchor + user txs, and rebuild a Proposal. + async fn recover_from_l2_block(&mut self, block_height: u64) -> Result<(), Error> { + use alloy::consensus::{BlockHeader, Transaction}; + use taiko_alethia_reth::validation::ANCHOR_V3_V4_GAS_LIMIT; + + info!("Recovering unproposed L2 block {}", block_height); + + let block = self + .taiko + .get_l2_block_by_number(block_height, true) + .await?; + + let (anchor_tx, user_txs) = match block.transactions.as_transactions() { + Some(txs) => txs.split_first().ok_or_else(|| { + anyhow::anyhow!( + "recover_from_l2_block: No anchor transaction in block {}", + block_height + ) + })?, + None => { + return Err(anyhow::anyhow!( + "recover_from_l2_block: No transactions in block {}", + block_height + )); + } + }; + + let gas_limit_without_anchor = + block.header.gas_limit().checked_sub(ANCHOR_V3_V4_GAS_LIMIT).ok_or_else(|| { + anyhow::anyhow!( + "Block {} gas limit {} < ANCHOR_V3_V4_GAS_LIMIT {}", + block_height, + block.header.gas_limit(), + ANCHOR_V3_V4_GAS_LIMIT + ) + })?; + + let coinbase = block.header.beneficiary(); + + let anchor_tx_data = Taiko::get_anchor_tx_data(anchor_tx.input())?; + let anchor_block_number = anchor_tx_data._checkpoint.blockNumber.to::(); + let anchor_block_hash = anchor_tx_data._checkpoint.blockHash; + let anchor_state_root = anchor_tx_data._checkpoint.stateRoot; + + let user_txs = user_txs.to_vec(); + let bytes_length = l2_tx_lists::encode_and_compress(&user_txs)?.len() as u64; + + let l2_block = L2BlockV2::new_from( + PreBuiltTxList { + tx_list: user_txs.clone(), + estimated_gas_used: 0, + bytes_length, + }, + block.header.timestamp(), + coinbase, + anchor_block_number, + gas_limit_without_anchor, + ); + + let anchor_info = AnchorBlockInfo::from_precomputed_data( + self.ethereum_l1.execution_layer.common(), + anchor_block_number, + anchor_block_hash, + anchor_state_root, + ) + .await?; + + self.batch_builder + .create_new_batch(anchor_info, B256::ZERO); + self.batch_builder.add_recovered_l2_block(l2_block)?; + + self.batch_builder.set_proposal_checkpoint(Checkpoint { + blockNumber: U48::from(block_height), + blockHash: block.header.hash_slow(), + stateRoot: block.header.state_root(), + })?; + + self.batch_builder.finalize_current_batch(); + + info!( + "Recovered L2 block {} into proposal: anchor={}, coinbase={}, user_txs={}", + block_height, + anchor_block_number, + coinbase, + user_txs.len() + ); + + Ok(()) + } + pub async fn reanchor_block( &mut self, pending_tx_list: PreBuiltTxList, From 69684cd6061e2f1a411e07b3ffa41a3dfaa46184 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Wed, 25 Mar 2026 10:26:39 +0530 Subject: [PATCH 08/14] feat: update catalyst --- common/src/l2/taiko_driver/mod.rs | 29 ++++- common/src/l2/taiko_driver/models.rs | 13 ++ common/src/l2/taiko_driver/operation_type.rs | 2 + realtime/src/l1/proposal_tx_builder.rs | 26 ++-- realtime/src/l2/taiko.rs | 7 + realtime/src/node/mod.rs | 53 +------- .../node/proposal_manager/async_submitter.rs | 12 ++ .../node/proposal_manager/bridge_handler.rs | 1 + realtime/src/node/proposal_manager/mod.rs | 121 +++--------------- realtime/src/raiko/mod.rs | 2 +- .../node/proposal_manager/bridge_handler.rs | 1 + 11 files changed, 98 insertions(+), 169 deletions(-) diff --git a/common/src/l2/taiko_driver/mod.rs b/common/src/l2/taiko_driver/mod.rs index 83742901..23660fff 100644 --- a/common/src/l2/taiko_driver/mod.rs +++ b/common/src/l2/taiko_driver/mod.rs @@ -6,7 +6,10 @@ mod status_provider_trait; use crate::{metrics::Metrics, utils::rpc_client::HttpRPCClient}; use anyhow::Error; pub use config::TaikoDriverConfig; -use models::{BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, TaikoStatus}; +use models::{ + BuildPreconfBlockRequestBody, BuildPreconfBlockResponse, ReorgStaleBlockRequest, + ReorgStaleBlockResponse, TaikoStatus, +}; pub use operation_type::OperationType; use serde_json::Value; pub use status_provider_trait::StatusProvider; @@ -71,6 +74,30 @@ impl TaikoDriver { } } + pub async fn reorg_stale_block( + &self, + new_head_block_number: u64, + ) -> Result { + const API_ENDPOINT: &str = "reorgStaleBlock"; + + let request_body = ReorgStaleBlockRequest { + new_head_block_number, + }; + + let response = self + .call_driver( + &self.preconf_rpc, + http::Method::POST, + API_ENDPOINT, + &request_body, + OperationType::ReorgStaleBlock, + ) + .await?; + + let reorg_response: ReorgStaleBlockResponse = serde_json::from_value(response)?; + Ok(reorg_response) + } + async fn call_driver( &self, client: &HttpRPCClient, diff --git a/common/src/l2/taiko_driver/models.rs b/common/src/l2/taiko_driver/models.rs index 8e1715c0..81623703 100644 --- a/common/src/l2/taiko_driver/models.rs +++ b/common/src/l2/taiko_driver/models.rs @@ -64,6 +64,19 @@ pub struct TaikoStatus { pub end_of_sequencing_block_hash: B256, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReorgStaleBlockRequest { + pub new_head_block_number: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReorgStaleBlockResponse { + pub new_head_block_hash: B256, + pub blocks_removed: u64, +} + fn deserialize_end_of_sequencing_block_hash<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, diff --git a/common/src/l2/taiko_driver/operation_type.rs b/common/src/l2/taiko_driver/operation_type.rs index be2677af..83ce1bda 100644 --- a/common/src/l2/taiko_driver/operation_type.rs +++ b/common/src/l2/taiko_driver/operation_type.rs @@ -4,6 +4,7 @@ use std::fmt; pub enum OperationType { Preconfirm, Reanchor, + ReorgStaleBlock, Status, } @@ -12,6 +13,7 @@ impl fmt::Display for OperationType { let s = match self { OperationType::Preconfirm => "Preconfirm", OperationType::Reanchor => "Reanchor", + OperationType::ReorgStaleBlock => "ReorgStaleBlock", OperationType::Status => "Status", }; write!(f, "{s}") diff --git a/realtime/src/l1/proposal_tx_builder.rs b/realtime/src/l1/proposal_tx_builder.rs index 190a805f..a404741d 100644 --- a/realtime/src/l1/proposal_tx_builder.rs +++ b/realtime/src/l1/proposal_tx_builder.rs @@ -57,18 +57,10 @@ impl ProposalTxBuilder { Ok(gas) => gas, Err(e) => { warn!( - "Build proposeBatch: Failed to estimate gas for blob transaction: {}", + "Build proposeBatch: Failed to estimate gas for blob transaction: {}. Force-sending with 500000 gas.", e ); - match e { - RpcError::ErrorResp(err) => { - return Err(anyhow::anyhow!( - tools::convert_error_payload(&err.to_string()) - .unwrap_or(TransactionError::EstimationFailed) - )); - } - _ => return Ok(tx_blob), - } + 500_000 } }; let tx_blob_gas = tx_blob_gas + tx_blob_gas * self.extra_gas_percentage / 100; @@ -104,10 +96,22 @@ impl ProposalTxBuilder { multicalls.push(user_op_call); } - // Add the proposal to the multicall + // Build the propose call and blob sidecar let (propose_call, blob_sidecar) = self .build_propose_call(&batch, contract_addresses.realtime_inbox) .await?; + + // If no user ops or L1 calls, send directly to inbox (skip multicall) + if batch.user_ops.is_empty() && batch.l1_calls.is_empty() { + info!("Sending proposal directly to RealTimeInbox (no multicall)"); + let tx = TransactionRequest::default() + .to(contract_addresses.realtime_inbox) + .from(from) + .input(propose_call.data.into()) + .with_blob_sidecar(blob_sidecar); + return Ok(tx); + } + info!("Added proposal to Multicall: {:?}", &propose_call); multicalls.push(propose_call.clone()); diff --git a/realtime/src/l2/taiko.rs b/realtime/src/l2/taiko.rs index 5d062bd2..cfd173c5 100644 --- a/realtime/src/l2/taiko.rs +++ b/realtime/src/l2/taiko.rs @@ -313,6 +313,13 @@ impl Taiko { .await } + pub async fn reorg_stale_block( + &self, + new_head_block_number: u64, + ) -> Result { + self.driver.reorg_stale_block(new_head_block_number).await + } + pub fn decode_anchor_id_from_tx_data(data: &[u8]) -> Result { L2ExecutionLayer::decode_anchor_id_from_tx_data(data) } diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index e9fcb94a..8fb67a0d 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -471,60 +471,11 @@ impl Node { // Wait for the last sent transaction to be executed self.wait_for_sent_transactions().await?; - // Recover and submit any preconfirmed-but-unproposed L2 blocks + // Reorg any preconfirmed-but-unproposed L2 blocks back to the last proposed block if !self.preconf_only { - let recovered = self.proposal_manager.recover_unproposed_blocks().await?; - if recovered > 0 { - self.submit_recovered_batches().await?; - } - } - - Ok(()) - } - - async fn submit_recovered_batches(&mut self) -> Result<(), Error> { - info!("Submitting recovered batches to L1..."); - - loop { - if !self.proposal_manager.has_batches() { - break; - } - - if self.cancel_token.is_cancelled() { - return Err(anyhow::anyhow!("Shutdown during recovery submission")); - } - - // Start async submission (proves via Raiko + sends L1 tx) - self.proposal_manager.try_start_submission(false).await?; - - // Wait for submission to complete - loop { - if self.cancel_token.is_cancelled() { - return Err(anyhow::anyhow!("Shutdown during recovery submission")); - } - - if let Some(result) = self.proposal_manager.poll_submission_result() { - match result { - Ok(()) => { - info!("Recovery batch submitted successfully"); - break; - } - Err(e) => { - return Err(anyhow::anyhow!( - "Recovery batch submission failed: {}", - e - )); - } - } - } - sleep(Duration::from_millis(500)).await; - } - - // Wait for L1 transaction to be confirmed before next batch - self.wait_for_sent_transactions().await?; + self.proposal_manager.reorg_unproposed_blocks().await?; } - info!("All recovered batches submitted successfully"); Ok(()) } diff --git a/realtime/src/node/proposal_manager/async_submitter.rs b/realtime/src/node/proposal_manager/async_submitter.rs index dd38f1f0..46b7bbbf 100644 --- a/realtime/src/node/proposal_manager/async_submitter.rs +++ b/realtime/src/node/proposal_manager/async_submitter.rs @@ -228,6 +228,18 @@ async fn submission_task( }); } + // Set user op status to ProvingBlock before requesting proof from Raiko + if let Some(ref store) = status_store { + for op in &proposal.user_ops { + store.set( + op.id, + &UserOpStatus::ProvingBlock { + block_id: proposal.checkpoint.blockNumber.to::(), + }, + ); + } + } + let proof = raiko_client.get_proof(&request).await?; proposal.zk_proof = Some(proof); } diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index 016e3cb7..a4ea1cdf 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -20,6 +20,7 @@ use tracing::{error, info, warn}; pub enum UserOpStatus { Pending, Processing { tx_hash: FixedBytes<32> }, + ProvingBlock { block_id: u64 }, Rejected { reason: String }, Executed, } diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 273321f2..11995772 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -414,9 +414,9 @@ impl BatchManager { self.batch_builder.get_number_of_batches() } - /// Detect and recover L2 blocks that were preconfirmed but never proposed to L1. - /// Returns the number of recovered blocks. - pub async fn recover_unproposed_blocks(&mut self) -> Result { + /// Reorg all unproposed L2 blocks back to the last proposed block. + /// Called on startup to clean up any preconfirmed-but-unproposed blocks. + pub async fn reorg_unproposed_blocks(&mut self) -> Result<(), Error> { let last_finalized_hash = self .ethereum_l1 .execution_layer @@ -424,14 +424,10 @@ impl BatchManager { .await?; if last_finalized_hash == B256::ZERO { - info!("No finalized block hash on L1 (genesis). Nothing to recover."); - return Ok(0); + info!("No finalized block hash on L1 (genesis). Nothing to reorg."); + return Ok(()); } - // Resolve the L1 lastFinalizedBlockHash to an L2 block number. - // If the hash isn't found on L2, it means no blocks have been proposed yet - // (the hash is the initial contract value, not an actual L2 block hash), - // so we treat the last proposed block number as 0. let last_proposed_block_number = match self .taiko .find_l2_block_number_by_hash(last_finalized_hash) @@ -454,117 +450,32 @@ impl BatchManager { "No unproposed blocks: L2 head {} <= last proposed {}", l2_head, last_proposed_block_number ); - return Ok(0); + return Ok(()); } let gap = l2_head - last_proposed_block_number; - info!( - "Detected {} unproposed L2 blocks ({} to {}). Starting recovery.", + warn!( + "Detected {} unproposed L2 blocks ({} to {}). Reorging to last proposed block {}.", gap, last_proposed_block_number + 1, - l2_head + l2_head, + last_proposed_block_number ); - for block_number in (last_proposed_block_number + 1)..=l2_head { - self.recover_from_l2_block(block_number).await?; - } - - self.last_finalized_block_hash = last_finalized_hash; - - info!("Recovery complete: {} blocks recovered into proposals", gap); - Ok(gap) - } - - /// Fetch a single L2 block from Geth, extract anchor + user txs, and rebuild a Proposal. - async fn recover_from_l2_block(&mut self, block_height: u64) -> Result<(), Error> { - use alloy::consensus::{BlockHeader, Transaction}; - use taiko_alethia_reth::validation::ANCHOR_V3_V4_GAS_LIMIT; - - info!("Recovering unproposed L2 block {}", block_height); - - let block = self + let reorg_result = self .taiko - .get_l2_block_by_number(block_height, true) + .reorg_stale_block(last_proposed_block_number) .await?; - - let (anchor_tx, user_txs) = match block.transactions.as_transactions() { - Some(txs) => txs.split_first().ok_or_else(|| { - anyhow::anyhow!( - "recover_from_l2_block: No anchor transaction in block {}", - block_height - ) - })?, - None => { - return Err(anyhow::anyhow!( - "recover_from_l2_block: No transactions in block {}", - block_height - )); - } - }; - - let gas_limit_without_anchor = - block.header.gas_limit().checked_sub(ANCHOR_V3_V4_GAS_LIMIT).ok_or_else(|| { - anyhow::anyhow!( - "Block {} gas limit {} < ANCHOR_V3_V4_GAS_LIMIT {}", - block_height, - block.header.gas_limit(), - ANCHOR_V3_V4_GAS_LIMIT - ) - })?; - - let coinbase = block.header.beneficiary(); - - let anchor_tx_data = Taiko::get_anchor_tx_data(anchor_tx.input())?; - let anchor_block_number = anchor_tx_data._checkpoint.blockNumber.to::(); - let anchor_block_hash = anchor_tx_data._checkpoint.blockHash; - let anchor_state_root = anchor_tx_data._checkpoint.stateRoot; - - let user_txs = user_txs.to_vec(); - let bytes_length = l2_tx_lists::encode_and_compress(&user_txs)?.len() as u64; - - let l2_block = L2BlockV2::new_from( - PreBuiltTxList { - tx_list: user_txs.clone(), - estimated_gas_used: 0, - bytes_length, - }, - block.header.timestamp(), - coinbase, - anchor_block_number, - gas_limit_without_anchor, - ); - - let anchor_info = AnchorBlockInfo::from_precomputed_data( - self.ethereum_l1.execution_layer.common(), - anchor_block_number, - anchor_block_hash, - anchor_state_root, - ) - .await?; - - self.batch_builder - .create_new_batch(anchor_info, B256::ZERO); - self.batch_builder.add_recovered_l2_block(l2_block)?; - - self.batch_builder.set_proposal_checkpoint(Checkpoint { - blockNumber: U48::from(block_height), - blockHash: block.header.hash_slow(), - stateRoot: block.header.state_root(), - })?; - - self.batch_builder.finalize_current_batch(); - info!( - "Recovered L2 block {} into proposal: anchor={}, coinbase={}, user_txs={}", - block_height, - anchor_block_number, - coinbase, - user_txs.len() + "Reorg complete: new head hash={}, blocks removed={}", + reorg_result.new_head_block_hash, reorg_result.blocks_removed ); + self.last_finalized_block_hash = last_finalized_hash; Ok(()) } + pub async fn reanchor_block( &mut self, pending_tx_list: PreBuiltTxList, diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 5e8686d6..46d34c3e 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -109,7 +109,7 @@ impl RaikoClient { proof_type: config.proof_type.clone(), l2_network: config.raiko_network.clone(), l1_network: config.raiko_l1_network.clone(), - poll_interval: Duration::from_secs(10), + poll_interval: Duration::from_secs(2), max_retries: 60, } } diff --git a/shasta/src/node/proposal_manager/bridge_handler.rs b/shasta/src/node/proposal_manager/bridge_handler.rs index e35766db..30fbdb5f 100644 --- a/shasta/src/node/proposal_manager/bridge_handler.rs +++ b/shasta/src/node/proposal_manager/bridge_handler.rs @@ -28,6 +28,7 @@ use tracing::{error, info, warn}; pub enum UserOpStatus { Pending, Processing { tx_hash: FixedBytes<32> }, + ProvingBlock { block_id: u64 }, Rejected { reason: String }, Executed, } From 564fa1209c2e8f83f7a7d94d8f7ff2af401d6d66 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Wed, 25 Mar 2026 18:42:32 +0530 Subject: [PATCH 09/14] fix: bind server to 0.0.0.0 --- shasta/src/node/proposal_manager/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shasta/src/node/proposal_manager/mod.rs b/shasta/src/node/proposal_manager/mod.rs index 4a281c6d..3ee2488e 100644 --- a/shasta/src/node/proposal_manager/mod.rs +++ b/shasta/src/node/proposal_manager/mod.rs @@ -74,7 +74,7 @@ impl BatchManager { let forced_inclusion = Arc::new(ForcedInclusion::new(ethereum_l1.clone()).await?); // Initialize bridge handler listening on port 4545 - let bridge_addr: SocketAddr = "127.0.0.1:4545".parse()?; + let bridge_addr: SocketAddr = "0.0.0.0:4545".parse()?; let bridge_handler = Arc::new(Mutex::new( BridgeHandler::new( bridge_addr, From 026d5e8769a941630217265a9d4cd249784d49ce Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Wed, 25 Mar 2026 19:09:48 +0530 Subject: [PATCH 10/14] fix: bind server to 0.0.0.0 --- realtime/src/node/proposal_manager/mod.rs | 2 +- shasta/src/node/proposal_manager/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 11995772..6b303f29 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -78,7 +78,7 @@ impl BatchManager { config.max_anchor_height_offset, ); - let bridge_addr: SocketAddr = "127.0.0.1:4545".parse()?; + let bridge_addr: SocketAddr = "0.0.0.0:4545".parse()?; let bridge_handler = Arc::new(Mutex::new( BridgeHandler::new( bridge_addr, diff --git a/shasta/src/node/proposal_manager/mod.rs b/shasta/src/node/proposal_manager/mod.rs index 3ee2488e..4a281c6d 100644 --- a/shasta/src/node/proposal_manager/mod.rs +++ b/shasta/src/node/proposal_manager/mod.rs @@ -74,7 +74,7 @@ impl BatchManager { let forced_inclusion = Arc::new(ForcedInclusion::new(ethereum_l1.clone()).await?); // Initialize bridge handler listening on port 4545 - let bridge_addr: SocketAddr = "0.0.0.0:4545".parse()?; + let bridge_addr: SocketAddr = "127.0.0.1:4545".parse()?; let bridge_handler = Arc::new(Mutex::new( BridgeHandler::new( bridge_addr, From d811389bb46e6c5fe409eb2bcf92ca91d5f534b6 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 26 Mar 2026 15:36:33 +0530 Subject: [PATCH 11/14] feat: add faster polling --- realtime/src/raiko/mod.rs | 20 +++++++++++++++++--- realtime/src/utils/config.rs | 14 ++++++++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/realtime/src/raiko/mod.rs b/realtime/src/raiko/mod.rs index 46d34c3e..e4509e22 100644 --- a/realtime/src/raiko/mod.rs +++ b/realtime/src/raiko/mod.rs @@ -109,8 +109,8 @@ impl RaikoClient { proof_type: config.proof_type.clone(), l2_network: config.raiko_network.clone(), l1_network: config.raiko_l1_network.clone(), - poll_interval: Duration::from_secs(2), - max_retries: 60, + poll_interval: Duration::from_millis(config.raiko_poll_interval_ms), + max_retries: config.raiko_max_retries, } } @@ -127,7 +127,21 @@ impl RaikoClient { } let resp = req.send().await?; - let body: RaikoResponse = resp.json().await?; + let http_status = resp.status(); + let raw_body = resp.text().await?; + warn!( + "Raiko response (attempt {}): HTTP {} | body: {}", + attempt + 1, + http_status, + raw_body + ); + let body: RaikoResponse = serde_json::from_str(&raw_body) + .map_err(|e| anyhow::anyhow!( + "Failed to parse Raiko response (HTTP {}): {} | body: {}", + http_status, + e, + raw_body + ))?; if body.status == "error" { return Err(anyhow::anyhow!( diff --git a/realtime/src/utils/config.rs b/realtime/src/utils/config.rs index 41662a59..4db6210b 100644 --- a/realtime/src/utils/config.rs +++ b/realtime/src/utils/config.rs @@ -14,6 +14,8 @@ pub struct RealtimeConfig { pub proof_type: ProofType, pub raiko_network: String, pub raiko_l1_network: String, + pub raiko_poll_interval_ms: u64, + pub raiko_max_retries: u32, pub preconf_only: bool, pub proof_request_bypass: bool, } @@ -42,6 +44,16 @@ impl ConfigTrait for RealtimeConfig { let raiko_l1_network = std::env::var("RAIKO_L1_NETWORK") .unwrap_or_else(|_| "ethereum".to_string()); + let raiko_poll_interval_ms: u64 = std::env::var("RAIKO_POLL_INTERVAL_MS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(2000); + + let raiko_max_retries: u32 = std::env::var("RAIKO_MAX_RETRIES") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(60); + let preconf_only = std::env::var("PRECONF_ONLY") .map(|v| v.to_lowercase() != "false" && v != "0") .unwrap_or(true); @@ -59,6 +71,8 @@ impl ConfigTrait for RealtimeConfig { proof_type, raiko_network, raiko_l1_network, + raiko_poll_interval_ms, + raiko_max_retries, preconf_only, proof_request_bypass, }) From 077b6bceea39e0ca6449499e58330d1d0f1155c6 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Thu, 26 Mar 2026 17:53:47 +0530 Subject: [PATCH 12/14] feat: hop proving --- realtime/src/l1/execution_layer.rs | 2 +- realtime/src/l2/execution_layer.rs | 50 ++++++++++++++++++- .../node/proposal_manager/batch_builder.rs | 5 +- .../node/proposal_manager/bridge_handler.rs | 35 ++++++------- realtime/src/node/proposal_manager/mod.rs | 2 +- realtime/src/shared_abi/SignalService.json | 1 + realtime/src/shared_abi/bindings.rs | 22 ++++++-- 7 files changed, 86 insertions(+), 31 deletions(-) create mode 100644 realtime/src/shared_abi/SignalService.json diff --git a/realtime/src/l1/execution_layer.rs b/realtime/src/l1/execution_layer.rs index 3c9c0940..1edf3dde 100644 --- a/realtime/src/l1/execution_layer.rs +++ b/realtime/src/l1/execution_layer.rs @@ -3,7 +3,7 @@ use super::proposal_tx_builder::ProposalTxBuilder; use super::protocol_config::ProtocolConfig; use crate::node::proposal_manager::proposal::Proposal; use crate::raiko::RaikoClient; -use crate::shared_abi::bindings::{Bridge::MessageSent, IBridge::Message, SignalSent}; +use crate::shared_abi::bindings::{Bridge::MessageSent, IBridge::Message, SignalService::SignalSent}; use crate::{l1::config::ContractAddresses, node::proposal_manager::bridge_handler::UserOp}; use alloy::{ eips::{BlockId, BlockNumberOrTag}, diff --git a/realtime/src/l2/execution_layer.rs b/realtime/src/l2/execution_layer.rs index ccc17c4e..b06b7b1e 100644 --- a/realtime/src/l2/execution_layer.rs +++ b/realtime/src/l2/execution_layer.rs @@ -1,8 +1,9 @@ use crate::l2::bindings::{Anchor, ICheckpointStore::Checkpoint}; use crate::shared_abi::bindings::{ Bridge::{self, MessageSent}, + HopProof, IBridge::Message, - SignalSent, + SignalService::SignalSent, }; use alloy::{ consensus::{ @@ -241,6 +242,12 @@ pub trait L2BridgeHandlerOps { &self, block_id: u64, ) -> Result)>, anyhow::Error>; + async fn get_hop_proof( + &self, + slot: FixedBytes<32>, + block_id: u64, + state_root: B256, + ) -> Result; } impl L2BridgeHandlerOps for L2ExecutionLayer { @@ -366,4 +373,45 @@ impl L2BridgeHandlerOps for L2ExecutionLayer { Ok(Some((message, slot))) } + + async fn get_hop_proof( + &self, + slot: FixedBytes<32>, + block_id: u64, + state_root: B256, + ) -> Result { + use alloy::sol_types::SolValue; + + let proof = self + .provider + .get_proof(self.signal_service, vec![slot.into()]) + .block_id(block_id.into()) + .await + .map_err(|e| anyhow::anyhow!("eth_getProof failed for signal slot: {e}"))?; + + let storage_proof = proof + .storage_proof + .first() + .ok_or_else(|| anyhow::anyhow!("No storage proof returned for signal slot"))?; + + let hop_proof = HopProof { + chainId: self.chain_id, + blockId: block_id, + rootHash: state_root, + cacheOption: 0, + accountProof: proof.account_proof.clone(), + storageProof: storage_proof.proof.clone(), + }; + + info!( + "Built HopProof: chainId={}, blockId={}, rootHash={}, accountProof_len={}, storageProof_len={}", + hop_proof.chainId, + hop_proof.blockId, + hop_proof.rootHash, + hop_proof.accountProof.len(), + hop_proof.storageProof.len(), + ); + + Ok(Bytes::from(vec![hop_proof].abi_encode_params())) + } } diff --git a/realtime/src/node/proposal_manager/batch_builder.rs b/realtime/src/node/proposal_manager/batch_builder.rs index 4c75ffa6..48550d4b 100644 --- a/realtime/src/node/proposal_manager/batch_builder.rs +++ b/realtime/src/node/proposal_manager/batch_builder.rs @@ -11,10 +11,7 @@ use common::{ batch_builder::BatchBuilderConfig, shared::l2_block_v2::{L2BlockV2, L2BlockV2Draft}, }; -use common::{ - l1::slot_clock::SlotClock, - shared::anchor_block_info::AnchorBlockInfo, -}; +use common::{l1::slot_clock::SlotClock, shared::anchor_block_info::AnchorBlockInfo}; use std::{collections::VecDeque, sync::Arc}; use tracing::{debug, info, trace, warn}; diff --git a/realtime/src/node/proposal_manager/bridge_handler.rs b/realtime/src/node/proposal_manager/bridge_handler.rs index a4ea1cdf..3818dfce 100644 --- a/realtime/src/node/proposal_manager/bridge_handler.rs +++ b/realtime/src/node/proposal_manager/bridge_handler.rs @@ -4,8 +4,7 @@ use crate::{ l1::execution_layer::{ExecutionLayer, L1BridgeHandlerOps}, l2::execution_layer::L2BridgeHandlerOps, }; -use alloy::primitives::{Address, Bytes, FixedBytes}; -use alloy::signers::Signer; +use alloy::primitives::{Address, B256, Bytes, FixedBytes}; use anyhow::Result; use common::{l1::ethereum_l1::EthereumL1, utils::cancellation_token::CancellationToken}; use jsonrpsee::server::{RpcModule, ServerBuilder}; @@ -92,7 +91,6 @@ pub struct BridgeHandler { ethereum_l1: Arc>, taiko: Arc, rx: Receiver, - l1_call_proof_signer: alloy::signers::local::PrivateKeySigner, status_store: UserOpStatusStore, } @@ -180,11 +178,6 @@ impl BridgeHandler { ethereum_l1, taiko, rx, - // Surge: Hard coding the private key for the POC - l1_call_proof_signer: alloy::signers::local::PrivateKeySigner::from_bytes( - &"0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" - .parse::>()?, - )?, status_store, }) } @@ -227,23 +220,23 @@ impl BridgeHandler { Ok(None) } - pub async fn find_l1_call(&mut self, block_id: u64) -> Result, anyhow::Error> { - if let Some((message_from_l2, signal_slot)) = self - .taiko - .l2_execution_layer() - .find_message_and_signal_slot(block_id) - .await? - { - let signature = self.l1_call_proof_signer.sign_hash(&signal_slot).await?; + pub async fn find_l1_call( + &mut self, + block_id: u64, + state_root: B256, + ) -> Result, anyhow::Error> { + let l2_el = self.taiko.l2_execution_layer(); - let mut signal_slot_proof = [0_u8; 65]; - signal_slot_proof[..32].copy_from_slice(signature.r().to_be_bytes::<32>().as_slice()); - signal_slot_proof[32..64].copy_from_slice(signature.s().to_be_bytes::<32>().as_slice()); - signal_slot_proof[64] = u8::from(signature.v()) + 27; + if let Some((message_from_l2, signal_slot)) = + l2_el.find_message_and_signal_slot(block_id).await? + { + let signal_slot_proof = l2_el + .get_hop_proof(signal_slot, block_id, state_root) + .await?; return Ok(Some(L1Call { message_from_l2, - signal_slot_proof: Bytes::from(signal_slot_proof), + signal_slot_proof, })); } diff --git a/realtime/src/node/proposal_manager/mod.rs b/realtime/src/node/proposal_manager/mod.rs index 6b303f29..f85c3d6f 100644 --- a/realtime/src/node/proposal_manager/mod.rs +++ b/realtime/src/node/proposal_manager/mod.rs @@ -341,7 +341,7 @@ impl BatchManager { .bridge_handler .lock() .await - .find_l1_call(preconfed_block.number) + .find_l1_call(preconfed_block.number, preconfed_block.state_root) .await? { self.batch_builder.add_l1_call(l1_call)?; diff --git a/realtime/src/shared_abi/SignalService.json b/realtime/src/shared_abi/SignalService.json new file mode 100644 index 00000000..05a35bb2 --- /dev/null +++ b/realtime/src/shared_abi/SignalService.json @@ -0,0 +1 @@ +{"abi":[{"type":"function","name":"getCheckpoint","inputs":[{"name":"_blockNumber","type":"uint48","internalType":"uint48"}],"outputs":[{"name":"","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"stateMutability":"view"},{"type":"function","name":"isSignalSent","inputs":[{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"isSignalSent","inputs":[{"name":"_signalSlot","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"view"},{"type":"function","name":"proveSignalReceived","inputs":[{"name":"_chainId","type":"uint64","internalType":"uint64"},{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[{"name":"numCacheOps_","type":"uint256","internalType":"uint256"}],"stateMutability":"nonpayable"},{"type":"function","name":"saveCheckpoint","inputs":[{"name":"_checkpoint","type":"tuple","internalType":"struct ICheckpointStore.Checkpoint","components":[{"name":"blockNumber","type":"uint48","internalType":"uint48"},{"name":"blockHash","type":"bytes32","internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","internalType":"bytes32"}]}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"sendSignal","inputs":[{"name":"_signal","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"slot_","type":"bytes32","internalType":"bytes32"}],"stateMutability":"nonpayable"},{"type":"function","name":"setSignalsReceived","inputs":[{"name":"_signalSlots","type":"bytes32[]","internalType":"bytes32[]"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"verifySignalReceived","inputs":[{"name":"_chainId","type":"uint64","internalType":"uint64"},{"name":"_app","type":"address","internalType":"address"},{"name":"_signal","type":"bytes32","internalType":"bytes32"},{"name":"_proof","type":"bytes","internalType":"bytes"}],"outputs":[],"stateMutability":"view"},{"type":"event","name":"CheckpointSaved","inputs":[{"name":"blockNumber","type":"uint48","indexed":true,"internalType":"uint48"},{"name":"blockHash","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"stateRoot","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false},{"type":"event","name":"SignalSent","inputs":[{"name":"app","type":"address","indexed":false,"internalType":"address"},{"name":"signal","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"slot","type":"bytes32","indexed":false,"internalType":"bytes32"},{"name":"value","type":"bytes32","indexed":false,"internalType":"bytes32"}],"anonymous":false}]} diff --git a/realtime/src/shared_abi/bindings.rs b/realtime/src/shared_abi/bindings.rs index 2c6a471e..56bc1268 100644 --- a/realtime/src/shared_abi/bindings.rs +++ b/realtime/src/shared_abi/bindings.rs @@ -10,8 +10,24 @@ sol!( "src/shared_abi/Bridge.json" ); -// SignalSent event emitted by the SignalService contract -sol! { +sol!( #[allow(missing_docs)] - event SignalSent(address app, bytes32 signal, bytes32 slot, bytes32 value); + #[sol(rpc)] + #[derive(Debug)] + SignalService, + "src/shared_abi/SignalService.json" +); + +// HopProof encoding struct for cross-chain signal verification via storage proofs. +// Not part of the SignalService ABI directly — it is the encoding format for the +// `_proof` bytes parameter in proveSignalReceived / verifySignalReceived. +sol! { + struct HopProof { + uint64 chainId; + uint64 blockId; + bytes32 rootHash; + uint8 cacheOption; + bytes[] accountProof; + bytes[] storageProof; + } } From 32f681f8d858ceb78451458598efcf9a6d71f9aa Mon Sep 17 00:00:00 2001 From: Justin Chan Date: Fri, 27 Mar 2026 17:13:57 +1100 Subject: [PATCH 13/14] fix: temp push image to prod --- .github/workflows/node_docker_build.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/node_docker_build.yml b/.github/workflows/node_docker_build.yml index 01048c93..1ee29a6a 100644 --- a/.github/workflows/node_docker_build.yml +++ b/.github/workflows/node_docker_build.yml @@ -19,7 +19,7 @@ env: DOCKER_PUBLIC_REGISTRY: docker.io DOCKER_PUBLIC_REPOSITORY: nethermind/catalyst-node DOCKER_REGISTRY: nethermind.jfrog.io - DOCKER_REPOSITORY_STAGING: core-oci-local-staging/catalyst-node + DOCKER_REPOSITORY_PROD: core-oci-local-prod/catalyst-node MASTER_BRANCH: refs/heads/master jobs: @@ -66,7 +66,7 @@ jobs: file: Dockerfile platforms: ${{ matrix.platform }} push: true - outputs: type=image,name=${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_STAGING }},push-by-digest=true,name-canonical=true + outputs: type=image,name=${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_PROD }},push-by-digest=true,name-canonical=true - name: Set digest output id: digest @@ -129,7 +129,7 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_STAGING }} + images: ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_PROD }} tags: ${{ steps.tags.outputs.tag_list }} - name: Create and push manifest list @@ -151,7 +151,7 @@ jobs: - name: Tag with commit SHA run: | docker buildx imagetools create \ - -t ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_STAGING }}:${{ steps.sha.outputs.tag }} \ + -t ${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_PROD }}:${{ steps.sha.outputs.tag }} \ ${{ needs.build.outputs.digest-amd64 }} \ ${{ needs.build.outputs.digest-arm64 }} @@ -175,7 +175,7 @@ jobs: set -e echo "Tags to promote: $PROMOTE_TAGS" for tag in $PROMOTE_TAGS; do - source_image="${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_STAGING }}:${tag}" + source_image="${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_PROD }}:${tag}" prod_image="${{ env.DOCKER_PUBLIC_REGISTRY }}/${{ env.DOCKER_PUBLIC_REPOSITORY }}:${tag}" echo "" echo "=== Promoting tag: ${tag} ===" @@ -219,7 +219,7 @@ jobs: echo "" >> $GITHUB_STEP_SUMMARY echo "### Repository Information" >> $GITHUB_STEP_SUMMARY - echo "- **Staging**: \`${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_STAGING }}\`" >> $GITHUB_STEP_SUMMARY + echo "- **Production**: \`${{ env.DOCKER_REGISTRY }}/${{ env.DOCKER_REPOSITORY_PROD }}\`" >> $GITHUB_STEP_SUMMARY echo "- **Production**: \`${{ env.DOCKER_PUBLIC_REGISTRY }}/${{ env.DOCKER_PUBLIC_REPOSITORY }}\`" >> $GITHUB_STEP_SUMMARY echo "- **Platforms**: linux/amd64, linux/arm64" >> $GITHUB_STEP_SUMMARY echo "- **Commit**: \`${{ github.sha }}\` (\`${{ steps.sha.outputs.short }}\`)" >> $GITHUB_STEP_SUMMARY From 6c1b268ebdf410006642c755df5de08100756345 Mon Sep 17 00:00:00 2001 From: AnshuJalan Date: Sat, 28 Mar 2026 13:09:17 +0530 Subject: [PATCH 14/14] feat: resilience --- realtime/src/node/mod.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/realtime/src/node/mod.rs b/realtime/src/node/mod.rs index 8fb67a0d..33c5cf28 100644 --- a/realtime/src/node/mod.rs +++ b/realtime/src/node/mod.rs @@ -171,7 +171,12 @@ impl Node { ) .await?; } else { - error!("Async submission failed: {}", e); + error!("Async submission failed: {}. Restarting node.", e); + self.cancel_token.cancel_on_critical_error(); + return Err(anyhow::anyhow!( + "Async submission failed: {}", + e + )); } } } @@ -187,8 +192,11 @@ impl Node { .await; } - // Preconfirmation phase — runs even while proof is being fetched async - if current_status.is_preconfer() && current_status.is_driver_synced() { + // Preconfirmation phase — skip if a proof request or submission is already in progress + if current_status.is_preconfer() + && current_status.is_driver_synced() + && !self.proposal_manager.is_submission_in_progress() + { if !self .head_verifier .verify(l2_slot_info.parent_id(), l2_slot_info.parent_hash())