diff --git a/node/core/executor.go b/node/core/executor.go index 0d8895658..a55c8c1bd 100644 --- a/node/core/executor.go +++ b/node/core/executor.go @@ -418,3 +418,202 @@ func (e *Executor) getParamsAndValsAtHeight(height int64) (*tmproto.BatchParams, func (e *Executor) L2Client() *types.RetryableClient { return e.l2Client } + +// ============================================================================ +// L2NodeV2 interface implementation for sequencer mode +// ============================================================================ + +// RequestBlockDataV2 requests block data based on parent hash. +// This differs from RequestBlockData which uses height. +// Using parent hash allows for proper fork chain handling in the future. +func (e *Executor) RequestBlockDataV2(parentHashBytes []byte) (*l2node.BlockV2, bool, error) { + if e.l1MsgReader == nil { + return nil, false, fmt.Errorf("RequestBlockDataV2 is not allowed to be called") + } + parentHash := common.BytesToHash(parentHashBytes) + + // Read L1 messages + fromIndex := e.nextL1MsgIndex + l1Messages := e.l1MsgReader.ReadL1MessagesInRange(fromIndex, fromIndex+e.maxL1MsgNumPerBlock-1) + transactions := make(eth.Transactions, len(l1Messages)) + + collectedL1Msgs := false + if len(l1Messages) > 0 { + queueIndex := fromIndex + for i, l1Message := range l1Messages { + transaction := eth.NewTx(&l1Message.L1MessageTx) + transactions[i] = transaction + if queueIndex != l1Message.QueueIndex { + e.logger.Error("unexpected l1message queue index", "expected", queueIndex, "actual", l1Message.QueueIndex) + return nil, false, types.ErrInvalidL1MessageOrder + } + queueIndex++ + } + collectedL1Msgs = true + } + + // Call geth to assemble block based on parent hash + l2Block, err := e.l2Client.AssembleL2BlockV2(context.Background(), parentHash, transactions) + if err != nil { + e.logger.Error("failed to assemble block v2", "parentHash", parentHash.Hex(), "error", err) + return nil, false, err + } + + e.logger.Info("AssembleL2BlockV2 success ", + "number", l2Block.Number, + "hash", l2Block.Hash.Hex(), + "parentHash", l2Block.ParentHash.Hex(), + "tx length", len(l2Block.Transactions), + "collectedL1Msgs", collectedL1Msgs, + ) + + return executableL2DataToBlockV2(l2Block), collectedL1Msgs, nil +} + +// ApplyBlockV2 applies a block to the L2 execution layer. +// This is used in sequencer mode after block validation. +func (e *Executor) ApplyBlockV2(block *l2node.BlockV2) error { + // Convert BlockV2 to ExecutableL2Data for geth + execBlock := blockV2ToExecutableL2Data(block) + + // Check if block is already applied + height, err := e.l2Client.BlockNumber(context.Background()) + if err != nil { + return err + } + + if execBlock.Number <= height { + e.logger.Info("ignore it, the block was already applied", "block number", execBlock.Number) + return nil + } + + // We only accept continuous blocks + if execBlock.Number > height+1 { + return types.ErrWrongBlockNumber + } + + // Apply the block (no batch hash in sequencer mode for now) + err = e.l2Client.NewL2Block(context.Background(), execBlock, nil) + if err != nil { + e.logger.Error("failed to apply block v2", "error", err) + return err + } + + // Update L1 message index + e.updateNextL1MessageIndex(execBlock) + + e.metrics.Height.Set(float64(execBlock.Number)) + e.logger.Info("ApplyBlockV2 success", "number", execBlock.Number, "hash", execBlock.Hash.Hex()) + + return nil +} + +// GetBlockByNumber retrieves a block by its number from the L2 execution layer. +// Uses standard eth_getBlockByNumber JSON-RPC. +func (e *Executor) GetBlockByNumber(height uint64) (*l2node.BlockV2, error) { + block, err := e.l2Client.BlockByNumber(context.Background(), big.NewInt(int64(height))) + if err != nil { + e.logger.Error("failed to get block by number", "height", height, "error", err) + return nil, err + } + return ethBlockToBlockV2(block) +} + +// GetLatestBlockV2 returns the latest block from the L2 execution layer. +// Uses standard eth_getBlockByNumber JSON-RPC with nil (latest). +func (e *Executor) GetLatestBlockV2() (*l2node.BlockV2, error) { + block, err := e.l2Client.BlockByNumber(context.Background(), nil) + if err != nil { + e.logger.Error("failed to get latest block", "error", err) + return nil, err + } + return ethBlockToBlockV2(block) +} + +// ==================== Type Conversion Functions ==================== + +// ethBlockToBlockV2 converts eth.Block to BlockV2 +func ethBlockToBlockV2(block *eth.Block) (*l2node.BlockV2, error) { + if block == nil { + return nil, fmt.Errorf("block is nil") + } + header := block.Header() + + // Encode transactions using MarshalBinary (handles EIP-2718 typed transactions correctly) + // Initialize as empty slice (not nil) to ensure JSON serialization produces [] instead of null + txs := make([][]byte, 0, len(block.Transactions())) + for _, tx := range block.Transactions() { + bz, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("failed to marshal tx, error: %v", err) + } + txs = append(txs, bz) + } + + return &l2node.BlockV2{ + ParentHash: header.ParentHash, + Miner: header.Coinbase, + Number: header.Number.Uint64(), + GasLimit: header.GasLimit, + BaseFee: header.BaseFee, + Timestamp: header.Time, + Transactions: txs, + StateRoot: header.Root, + GasUsed: header.GasUsed, + ReceiptRoot: header.ReceiptHash, + LogsBloom: header.Bloom.Bytes(), + NextL1MessageIndex: header.NextL1MsgIndex, + Hash: block.Hash(), + }, nil +} + +// blockV2ToExecutableL2Data converts BlockV2 to ExecutableL2Data +func blockV2ToExecutableL2Data(block *l2node.BlockV2) *catalyst.ExecutableL2Data { + if block == nil { + return nil + } + // Ensure Transactions is not nil (JSON requires [] not null) + txs := block.Transactions + if txs == nil { + txs = make([][]byte, 0) + } + return &catalyst.ExecutableL2Data{ + ParentHash: block.ParentHash, + Miner: block.Miner, + Number: block.Number, + GasLimit: block.GasLimit, + BaseFee: block.BaseFee, + Timestamp: block.Timestamp, + Transactions: txs, + StateRoot: block.StateRoot, + GasUsed: block.GasUsed, + ReceiptRoot: block.ReceiptRoot, + LogsBloom: block.LogsBloom, + WithdrawTrieRoot: block.WithdrawTrieRoot, + NextL1MessageIndex: block.NextL1MessageIndex, + Hash: block.Hash, + } +} + +// executableL2DataToBlockV2 converts ExecutableL2Data to BlockV2 +func executableL2DataToBlockV2(data *catalyst.ExecutableL2Data) *l2node.BlockV2 { + if data == nil { + return nil + } + return &l2node.BlockV2{ + ParentHash: data.ParentHash, + Miner: data.Miner, + Number: data.Number, + GasLimit: data.GasLimit, + BaseFee: data.BaseFee, + Timestamp: data.Timestamp, + Transactions: data.Transactions, + StateRoot: data.StateRoot, + GasUsed: data.GasUsed, + ReceiptRoot: data.ReceiptRoot, + LogsBloom: data.LogsBloom, + WithdrawTrieRoot: data.WithdrawTrieRoot, + NextL1MessageIndex: data.NextL1MessageIndex, + Hash: data.Hash, + } +} diff --git a/node/types/retryable_client.go b/node/types/retryable_client.go index 3d3ad949d..74328e71d 100644 --- a/node/types/retryable_client.go +++ b/node/types/retryable_client.go @@ -172,7 +172,25 @@ func (rc *RetryableClient) HeaderByNumber(ctx context.Context, blockNumber *big. if retryErr := backoff.Retry(func() error { resp, respErr := rc.ethClient.HeaderByNumber(ctx, blockNumber) if respErr != nil { - rc.logger.Info("failed to call BlockNumber", "error", respErr) + rc.logger.Info("failed to call HeaderByNumber", "error", respErr) + if retryableError(respErr) { + return respErr + } + err = respErr + } + ret = resp + return nil + }, rc.b); retryErr != nil { + return nil, retryErr + } + return +} + +func (rc *RetryableClient) BlockByNumber(ctx context.Context, blockNumber *big.Int) (ret *eth.Block, err error) { + if retryErr := backoff.Retry(func() error { + resp, respErr := rc.ethClient.BlockByNumber(ctx, blockNumber) + if respErr != nil { + rc.logger.Info("failed to call BlockByNumber", "error", respErr) if retryableError(respErr) { return respErr } @@ -233,3 +251,26 @@ func retryableError(err error) bool { // strings.Contains(err.Error(), Timeout) return !strings.Contains(err.Error(), DiscontinuousBlockError) } + +// ============================================================================ +// L2NodeV2 methods for sequencer mode +// ============================================================================ + +// AssembleL2BlockV2 assembles a L2 block based on parent hash. +func (rc *RetryableClient) AssembleL2BlockV2(ctx context.Context, parentHash common.Hash, transactions eth.Transactions) (ret *catalyst.ExecutableL2Data, err error) { + if retryErr := backoff.Retry(func() error { + resp, respErr := rc.authClient.AssembleL2BlockV2(ctx, parentHash, transactions) + if respErr != nil { + rc.logger.Info("failed to AssembleL2BlockV2", "error", respErr) + if retryableError(respErr) { + return respErr + } + err = respErr + } + ret = resp + return nil + }, rc.b); retryErr != nil { + return nil, retryErr + } + return +} diff --git a/ops/docker-sequencer-test/Dockerfile.l2-geth-test b/ops/docker-sequencer-test/Dockerfile.l2-geth-test new file mode 100644 index 000000000..1c053f44b --- /dev/null +++ b/ops/docker-sequencer-test/Dockerfile.l2-geth-test @@ -0,0 +1,26 @@ +# Build Geth for Sequencer Test +# Build context should be bitget/ (parent of morph) +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder + +# Copy local go-ethereum (not submodule) +COPY ./go-ethereum /go-ethereum +WORKDIR /go-ethereum + +# Build geth +RUN go run build/ci.go install ./cmd/geth + +# Runtime stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu + +RUN apt-get -qq update && apt-get -qq install -y --no-install-recommends \ + ca-certificates bash curl \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ +COPY ./morph/ops/docker-sequencer-test/entrypoint-l2.sh /entrypoint.sh + +VOLUME ["/db"] + +ENTRYPOINT ["/bin/bash", "/entrypoint.sh"] + +EXPOSE 8545 8546 8551 30303 30303/udp diff --git a/ops/docker-sequencer-test/Dockerfile.l2-node-test b/ops/docker-sequencer-test/Dockerfile.l2-node-test new file mode 100644 index 000000000..1ece1eb81 --- /dev/null +++ b/ops/docker-sequencer-test/Dockerfile.l2-node-test @@ -0,0 +1,47 @@ +# Build Stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu AS builder + +# First: Copy only go.mod/go.sum files to cache dependencies +# Order matters for cache efficiency + +# Copy go-ethereum dependency files +COPY ./go-ethereum/go.mod ./go-ethereum/go.sum /bitget/go-ethereum/ + +# Copy tendermint dependency files +COPY ./tendermint/go.mod ./tendermint/go.sum /bitget/tendermint/ + +# Copy morph go.work and all module dependency files +COPY ./morph/go.work ./morph/go.work.sum /bitget/morph/ +COPY ./morph/node/go.mod ./morph/node/go.sum /bitget/morph/node/ +COPY ./morph/bindings/go.mod ./morph/bindings/go.sum /bitget/morph/bindings/ +COPY ./morph/contracts/go.mod ./morph/contracts/go.sum /bitget/morph/contracts/ +COPY ./morph/oracle/go.mod ./morph/oracle/go.sum /bitget/morph/oracle/ +COPY ./morph/tx-submitter/go.mod ./morph/tx-submitter/go.sum /bitget/morph/tx-submitter/ +COPY ./morph/ops/l2-genesis/go.mod ./morph/ops/l2-genesis/go.sum /bitget/morph/ops/l2-genesis/ +COPY ./morph/ops/tools/go.mod ./morph/ops/tools/go.sum /bitget/morph/ops/tools/ +COPY ./morph/token-price-oracle/go.mod ./morph/token-price-oracle/go.sum /bitget/morph/token-price-oracle/ + +# Download dependencies (this layer is cached if go.mod/go.sum don't change) +WORKDIR /bitget/morph/node +RUN go mod download -x + +# Now copy all source code +COPY ./go-ethereum /bitget/go-ethereum +COPY ./tendermint /bitget/tendermint +COPY ./morph /bitget/morph + +# Build (no need to download again, just compile) +WORKDIR /bitget/morph/node +RUN make build + +# Final Stage +FROM ghcr.io/morph-l2/go-ubuntu-builder:go-1.24-ubuntu + +RUN apt-get -qq update \ + && apt-get -qq install -y --no-install-recommends ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /bitget/morph/node/build/bin/tendermint /usr/local/bin/ +COPY --from=builder /bitget/morph/node/build/bin/morphnode /usr/local/bin/ + +CMD ["morphnode", "--home", "/data"] diff --git a/ops/docker-sequencer-test/README.md b/ops/docker-sequencer-test/README.md new file mode 100644 index 000000000..156f2d64a --- /dev/null +++ b/ops/docker-sequencer-test/README.md @@ -0,0 +1 @@ +This directory is intended for Docker testing purposes only and may be removed in the future. \ No newline at end of file diff --git a/ops/docker-sequencer-test/docker-compose.override.yml b/ops/docker-sequencer-test/docker-compose.override.yml new file mode 100644 index 000000000..50d960e27 --- /dev/null +++ b/ops/docker-sequencer-test/docker-compose.override.yml @@ -0,0 +1,43 @@ +# Override file to use test images +# Copy this to ops/docker/docker-compose.override.yml before running test +version: '3.8' + +services: + morph-geth-0: + image: morph-geth-test:latest + build: + context: ../.. + dockerfile: ops/docker-sequencer-test/Dockerfile.l2-geth-test + + morph-geth-1: + image: morph-geth-test:latest + + morph-geth-2: + image: morph-geth-test:latest + + # morph-geth-3: + # image: morph-geth-test:latest + + node-0: + image: morph-node-test:latest + build: + context: ../.. + dockerfile: ops/docker-sequencer-test/Dockerfile.l2-node-test + environment: + - SEQUENCER_PRIVATE_KEY=${SEQUENCER_PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80} + + node-1: + image: morph-node-test:latest + + node-2: + image: morph-node-test:latest + + # node-3: + # image: morph-node-test:latest + + sentry-geth-0: + image: morph-geth-test:latest + + sentry-node-0: + image: morph-node-test:latest + diff --git a/ops/docker-sequencer-test/entrypoint-l2.sh b/ops/docker-sequencer-test/entrypoint-l2.sh new file mode 100644 index 000000000..04dfed476 --- /dev/null +++ b/ops/docker-sequencer-test/entrypoint-l2.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +GETH_DATA_DIR=${GETH_DATA_DIR:-/db} +GENESIS_FILE_PATH=${GENESIS_FILE_PATH:-/genesis.json} +JWT_SECRET_PATH=${JWT_SECRET_PATH:-/jwt-secret.txt} + +# Initialize geth if not already done +if [ ! -d "$GETH_DATA_DIR/geth/chaindata" ]; then + echo "Initializing geth with genesis file..." + geth init --datadir "$GETH_DATA_DIR" "$GENESIS_FILE_PATH" +fi + +echo "Starting geth..." +exec geth \ + --datadir "$GETH_DATA_DIR" \ + --http \ + --http.addr "0.0.0.0" \ + --http.port 8545 \ + --http.api "eth,net,web3,debug,txpool,engine" \ + --http.corsdomain "*" \ + --http.vhosts "*" \ + --ws \ + --ws.addr "0.0.0.0" \ + --ws.port 8546 \ + --ws.api "eth,net,web3,debug,txpool,engine" \ + --ws.origins "*" \ + --authrpc.addr "0.0.0.0" \ + --authrpc.port 8551 \ + --authrpc.vhosts "*" \ + --authrpc.jwtsecret "$JWT_SECRET_PATH" \ + --networkid 53077 \ + --nodiscover \ + --syncmode full \ + --gcmode archive \ + --metrics \ + --metrics.addr "0.0.0.0" \ + --pprof \ + --pprof.addr "0.0.0.0" \ + --verbosity 3 \ + "$@" + diff --git a/ops/docker-sequencer-test/run-test.sh b/ops/docker-sequencer-test/run-test.sh new file mode 100755 index 000000000..823ba5273 --- /dev/null +++ b/ops/docker-sequencer-test/run-test.sh @@ -0,0 +1,553 @@ +#!/bin/bash +# Sequencer Upgrade Test Runner +# Reuses devnet-morph logic but with test-specific docker images + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MORPH_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +BITGET_ROOT="$(cd "$MORPH_ROOT/.." && pwd)" +OPS_DIR="$MORPH_ROOT/ops" +DOCKER_DIR="$OPS_DIR/docker" +DEVNET_DIR="$OPS_DIR/devnet-morph" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +# Configuration +UPGRADE_HEIGHT=${UPGRADE_HEIGHT:-50} +L2_RPC="http://127.0.0.1:8545" +L2_RPC_NODE1="http://127.0.0.1:8645" + +# ========== Helper Functions ========== + +wait_for_rpc() { + local rpc_url="$1" + local max_retries=${2:-60} + local retry=0 + + log_info "Waiting for RPC at $rpc_url..." + while [ $retry -lt $max_retries ]; do + if curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$rpc_url" 2>/dev/null | grep -q "result"; then + log_success "RPC is ready!" + return 0 + fi + retry=$((retry + 1)) + sleep 2 + done + log_error "Timeout waiting for RPC" + return 1 +} + +get_block_number() { + local rpc_url="${1:-$L2_RPC}" + local result + result=$(curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$rpc_url" 2>/dev/null) + echo "$result" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 | xargs printf "%d" 2>/dev/null || echo "0" +} + +wait_for_block() { + local target_height=$1 + local rpc_url="${2:-$L2_RPC}" + + log_info "Waiting for block $target_height..." + while true; do + local current=$(get_block_number "$rpc_url") + if [ "$current" -ge "$target_height" ]; then + log_success "Reached block $current" + return 0 + fi + echo -ne "\r Current block: $current / $target_height" + sleep 2 + done +} + +# ========== Setup Functions ========== + +# Update upgrade height in tendermint code +set_upgrade_height() { + local height=$1 + log_info "Setting upgrade height to $height..." + + local upgrade_file="$MORPH_ROOT/../tendermint/upgrade/upgrade.go" + if [ -f "$upgrade_file" ]; then + sed -i '' "s/UpgradeBlockHeight int64 = [0-9]*/UpgradeBlockHeight int64 = $height/" "$upgrade_file" + log_success "Updated upgrade height in $upgrade_file" + else + log_warn "upgrade.go not found at $upgrade_file" + fi +} + +# Build test images (with -test suffix) +# Uses bitget/ as build context to access local go-ethereum and tendermint +build_test_images() { + log_info "Building test Docker images..." + log_info "Using build context: $BITGET_ROOT" + + # Build go-ubuntu-builder if needed + cd "$MORPH_ROOT" + make go-ubuntu-builder + + # Build from bitget/ directory to access all repos + cd "$BITGET_ROOT" + + # # Copy go module cache to avoid network downloads + # if [ -d "$HOME/go/pkg/mod" ]; then + # log_info "Copying go module cache to build context..." + # rm -rf .gomodcache + # cp -r "$HOME/go/pkg/mod" .gomodcache + # else + # log_warn "Go module cache not found at $HOME/go/pkg/mod" + # log_warn "Build may fail due to network issues" + # fi + + # Build test geth image + log_info "Building morph-geth-test (using local go-ethereum)..." + docker build -t morph-geth-test:latest \ + -f morph/ops/docker-sequencer-test/Dockerfile.l2-geth-test . + + # Build test node image + log_info "Building morph-node-test (using local go-ethereum + tendermint)..." + docker build -t morph-node-test:latest \ + -f morph/ops/docker-sequencer-test/Dockerfile.l2-node-test . + + # # Cleanup go module cache copy + # rm -rf .gomodcache + + log_success "Test images built!" +} + +# Run full devnet setup (reusing existing logic, but skip L2 startup) +setup_devnet() { + log_info "Running devnet setup..." + cd "$MORPH_ROOT" + + # Note: upgrade height should already be set before build_test_images + + # Step 1: Start L1 and setup tendermint nodes + # Note: main.py calls setup_devnet_nodes() before devnet.main() + log_info "Step 1: Starting L1 and setting up tendermint nodes..." + python3 "$DEVNET_DIR/main.py" --polyrepo-dir="$MORPH_ROOT" --only-l1 + + # Step 2: Deploy contracts and generate L2 genesis (without starting L2) + log_info "Step 2: Deploying contracts and generating L2 genesis..." + python3 -c " +import sys +import os +import time +import re +import fileinput +sys.path.insert(0, '$DEVNET_DIR') +import devnet +from devnet import run_command, read_json, write_json, test_port, log + +pjoin = os.path.join +polyrepo_dir = '$MORPH_ROOT' +L2_dir = pjoin(polyrepo_dir, 'ops', 'l2-genesis') +devnet_dir = pjoin(polyrepo_dir, 'ops', 'l2-genesis', '.devnet') +ops_dir = pjoin(polyrepo_dir, 'ops', 'docker') +contracts_dir = pjoin(polyrepo_dir, 'contracts') + +os.makedirs(devnet_dir, exist_ok=True) + +# Generate network config +devnet_cfg_orig = pjoin(L2_dir, 'deploy-config', 'devnet-deploy-config.json') +deploy_config = read_json(devnet_cfg_orig) +deploy_config['l1GenesisBlockTimestamp'] = '0x{:x}'.format(int(time.time())) +deploy_config['l1StartingBlockTag'] = 'earliest' +temp_deploy_config = pjoin(devnet_dir, 'deploy-config.json') +write_json(temp_deploy_config, deploy_config) + +# Deploy L1 contracts +deployment_dir = pjoin(devnet_dir, 'devnetL1.json') +run_command(['rm', '-f', deployment_dir], env={}, cwd=contracts_dir) +log.info('Deploying L1 Proxy contracts...') +run_command(['yarn', 'build'], env={}, cwd=contracts_dir) +run_command(['npx', 'hardhat', 'deploy', '--network', 'l1', '--storagepath', deployment_dir, '--concurrent', 'true'], env={}, cwd=contracts_dir) + +# Generate L2 genesis +log.info('Generating L2 genesis and rollup configs...') +run_command([ + 'env', 'CGO_ENABLED=1', 'CGO_LDFLAGS=\"-ldl\"', + 'go', 'run', 'cmd/main.go', 'genesis', 'l2', + '--l1-rpc', 'http://localhost:9545', + '--deploy-config', temp_deploy_config, + '--deployment-dir', deployment_dir, + '--outfile.l2', pjoin(devnet_dir, 'genesis-l2.json'), + '--outfile.genbatchheader', pjoin(devnet_dir, 'genesis-batch-header.json'), + '--outfile.rollup', pjoin(devnet_dir, 'rollup.json') +], cwd=L2_dir) + +# Initialize contracts +log.info('Deploying L1 Impl contracts and initialize...') +rollup_cfg = read_json(pjoin(devnet_dir, 'rollup.json')) +genesis_batch_header = rollup_cfg['genesis_batch_header'] +contracts_config = pjoin(contracts_dir, 'src', 'deploy-config', 'l1.ts') +pattern3 = re.compile(\"batchHeader: '.*'\") +for line in fileinput.input(contracts_config, inplace=True): + modified_line = re.sub(pattern3, f\"batchHeader: '{genesis_batch_header}'\", line) + print(modified_line, end='') +run_command(['npx', 'hardhat', 'initialize', '--network', 'l1', '--storagepath', deployment_dir, '--concurrent', 'true'], env={}, cwd=contracts_dir) + +# Staking +log.info('Staking sequencers...') +addresses = {} +deployment = read_json(deployment_dir) +for d in deployment: + addresses[d['name']] = d['address'] +for i in range(4): + run_command(['cast', 'send', addresses['Proxy__L1Staking'], + 'register(bytes32,bytes memory)', + deploy_config['l2StakingTmKeys'][i], + deploy_config['l2StakingBlsKeys'][i], + '--rpc-url', 'http://127.0.0.1:9545', + '--value', '1ether', + '--private-key', deploy_config['l2StakingPks'][i] + ]) + +# Update .env file +log.info('Updating .env file...') +env_file = pjoin(ops_dir, '.env') +env_data = {} +with open(env_file, 'r+') as envfile: + env_content = envfile.readlines() + for line in env_content: + line = line.strip() + if line and not line.startswith('#'): + parts = line.split('=', 1) + if len(parts) == 2: + env_data[parts[0].strip()] = parts[1].strip() + env_data['L1_CROSS_DOMAIN_MESSENGER'] = addresses['Proxy__L1CrossDomainMessenger'] + env_data['MORPH_PORTAL'] = addresses['Proxy__L1MessageQueueWithGasPriceOracle'] + env_data['MORPH_ROLLUP'] = addresses['Proxy__Rollup'] + env_data['MORPH_L1STAKING'] = addresses['Proxy__L1Staking'] + envfile.seek(0) + for key, value in env_data.items(): + envfile.write(f'{key}={value}\n') + envfile.truncate() + +log.info('Contract deployment and genesis generation complete!') +log.info('Skipping L2 startup - will be done with test images.') +" + + log_success "Devnet setup complete (L2 not started yet)" +} + +# Docker compose command with override file +# Note: -f must explicitly include override file when using non-default compose file name +COMPOSE_CMD="docker compose -f docker-compose-4nodes.yml -f docker-compose.override.yml" +COMPOSE_CMD_NO_OVERRIDE="docker compose -f docker-compose-4nodes.yml" + +# Copy override file to use test images +setup_override() { + log_info "Setting up docker-compose override for test images..." + cp "$SCRIPT_DIR/docker-compose.override.yml" "$DOCKER_DIR/docker-compose.override.yml" + log_success "Override file copied to $DOCKER_DIR/" +} + +# Remove override file +remove_override() { + rm -f "$DOCKER_DIR/docker-compose.override.yml" +} + +# Start L2 with test images +start_l2_test() { + log_info "Starting L2 with test images..." + cd "$DOCKER_DIR" + + # Setup override file + setup_override + + # Read the .env file to get contract addresses + source .env 2>/dev/null || true + + # Set sequencer private key + export SEQUENCER_PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + + # Stop any existing L2 containers + $COMPOSE_CMD stop \ + morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 \ + node-0 node-1 node-2 node-3 2>/dev/null || true + + # Note: Test images should already be built by build_test_images() + # Uncomment below if you need to rebuild during start + # log_info "Building L2 containers with test images..." + # $COMPOSE_CMD build morph-geth-0 node-0 + + # Start L2 geth nodes + log_info "Starting L2 geth nodes..." + $COMPOSE_CMD up -d morph-geth-0 morph-geth-1 morph-geth-2 morph-geth-3 + + sleep 5 + + # Start L2 tendermint nodes + log_info "Starting L2 tendermint nodes..." + $COMPOSE_CMD up -d node-0 node-1 node-2 node-3 + + wait_for_rpc "$L2_RPC" + log_success "L2 is running with test images!" +} + +# ========== Test Functions ========== + +test_pbft_mode() { + log_info "========== Phase 1: Testing PBFT Mode ==========" + + local start_block=$(get_block_number) + log_info "Starting block: $start_block" + + # Wait for some blocks + local target=$((start_block + 10)) + wait_for_block $target + + # Verify nodes in sync + local block0=$(get_block_number "$L2_RPC") + local block1=$(get_block_number "$L2_RPC_NODE1") + + local diff=$((block0 - block1)) + if [ ${diff#-} -le 2 ]; then + log_success "Nodes in sync (node0: $block0, node1: $block1)" + else + log_error "Nodes out of sync!" + return 1 + fi +} + +test_upgrade() { + log_info "========== Phase 2: Waiting for Upgrade ==========" + log_info "Upgrade height: $UPGRADE_HEIGHT" + + wait_for_block $UPGRADE_HEIGHT + sleep 10 + + # Verify network continues + local post_upgrade=$(get_block_number) + wait_for_block $((post_upgrade + 5)) + + log_success "Upgrade completed! Network continues producing blocks." +} + +test_sequencer_mode() { + log_info "========== Phase 3: Testing Sequencer Mode ==========" + + local start_block=$(get_block_number) + wait_for_block $((start_block + 20)) + + local block0=$(get_block_number "$L2_RPC") + local block1=$(get_block_number "$L2_RPC_NODE1") + + local diff=$((block0 - block1)) + if [ ${diff#-} -le 2 ]; then + log_success "Nodes in sync after upgrade (node0: $block0, node1: $block1)" + else + log_error "Nodes out of sync after upgrade!" + return 1 + fi +} + +test_fullnode_sync() { + log_info "========== Phase 4: Testing Fullnode Sync ==========" + + local current_height=$(get_block_number) + log_info "Current height: $current_height" + + cd "$DOCKER_DIR" + + # Start sentry node (fullnode) + log_info "Starting fullnode (sentry-node-0)..." + $COMPOSE_CMD up -d sentry-geth-0 sentry-node-0 + + sleep 10 + wait_for_rpc "http://127.0.0.1:8945" + + # Wait for sync + local target_sync=$((current_height - 5)) + local max_wait=300 + local waited=0 + + while [ $waited -lt $max_wait ]; do + local fn_block=$(get_block_number "http://127.0.0.1:8945") + if [ "$fn_block" -ge "$target_sync" ]; then + log_success "Fullnode synced to block $fn_block" + return 0 + fi + echo -ne "\r Fullnode: $fn_block / $target_sync" + sleep 5 + waited=$((waited + 5)) + done + + log_error "Fullnode sync timeout" + return 1 +} + +# ========== Transaction Generator ========== + +start_tx_generator() { + log_info "Starting transaction generator..." + + # Simple tx generator using cast + ( + while true; do + RANDOM_ADDR="0x$(openssl rand -hex 20)" + cast send --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \ + --rpc-url "$L2_RPC" \ + --value 1wei \ + "$RANDOM_ADDR" 2>/dev/null || true + sleep ${TX_INTERVAL:-5} + done + ) & + TX_GEN_PID=$! + log_info "TX generator started (PID: $TX_GEN_PID)" +} + +stop_tx_generator() { + if [ -n "$TX_GEN_PID" ]; then + kill $TX_GEN_PID 2>/dev/null || true + log_info "TX generator stopped" + fi +} + +# ========== Cleanup ========== + +cleanup() { + log_info "Cleaning up..." + stop_tx_generator + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE down -v 2>/dev/null || true + remove_override +} + +# ========== Main Commands ========== + +run_full_test() { + log_info "==========================================" + log_info " Sequencer Upgrade Test" + log_info " Upgrade Height: $UPGRADE_HEIGHT" + log_info "==========================================" + + trap cleanup EXIT + + # Set upgrade height BEFORE building (so it's compiled into the binary) + set_upgrade_height "$UPGRADE_HEIGHT" + + # Build test images (now with correct upgrade height) + build_test_images + + # Setup devnet (L1 + contracts + L2 genesis) + setup_devnet + + # Start L2 with test images + start_l2_test + + # Start tx generator + start_tx_generator + + # Run tests + test_pbft_mode + test_upgrade + test_sequencer_mode + test_fullnode_sync + + stop_tx_generator + + log_success "==========================================" + log_success " ALL TESTS PASSED!" + log_success "==========================================" +} + +show_status() { + echo "Node 1: Block $(get_block_number http://127.0.0.1:8645)" + echo "Node 2: Block $(get_block_number http://127.0.0.1:8745)" + echo "Node 3: Block $(get_block_number http://127.0.0.1:8845)" + echo "Node 0 (seq-0): Block $(get_block_number http://127.0.0.1:8545)" + echo "Sentry: Block $(get_block_number http://127.0.0.1:8945 2>/dev/null || echo 'N/A')" +} + +show_logs() { + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE logs -f "$@" +} + +# ========== Command Parsing ========== + +case "${1:-}" in + build) + build_test_images + ;; + setup) + setup_devnet + ;; + start) + start_l2_test + ;; + stop) + cd "$DOCKER_DIR" + $COMPOSE_CMD_NO_OVERRIDE down + ;; + clean) + cleanup + # Also clean L2 genesis + rm -rf "$OPS_DIR/l2-genesis/.devnet" + rm -rf "$DOCKER_DIR/.devnet" + ;; + logs) + shift + show_logs "$@" + ;; + test) + run_full_test + ;; + tx) + start_tx_generator + wait + ;; + status) + show_status + ;; + upgrade-height) + set_upgrade_height "${2:-50}" + ;; + *) + echo "Sequencer Upgrade Test Runner" + echo "" + echo "Usage: $0 {build|setup|start|stop|clean|logs|test|tx|status|upgrade-height}" + echo "" + echo "Commands:" + echo " build - Build test Docker images (morph-geth-test, morph-node-test)" + echo " setup - Run full devnet setup (L1 + contracts + L2 genesis)" + echo " start - Start L2 nodes with test images" + echo " stop - Stop all containers" + echo " clean - Stop and remove all containers and data" + echo " logs [service] - Show container logs" + echo " test - Run full upgrade test" + echo " tx - Start transaction generator" + echo " status - Show current block numbers" + echo " upgrade-height N - Set upgrade height to N" + echo "" + echo "Environment Variables:" + echo " UPGRADE_HEIGHT - Block height for upgrade (default: 50)" + echo " TX_INTERVAL - Seconds between txs (default: 5)" + echo "" + echo "Test Flow:" + echo " 1. build - Build test images" + echo " 2. setup - Deploy L1, contracts, generate L2 genesis" + echo " 3. start - Start L2 with test images" + echo " 4. test - Run PBFT -> Upgrade -> Sequencer -> Fullnode tests" + echo "" + echo "Quick Start:" + echo " UPGRADE_HEIGHT=50 $0 test" + ;; +esac diff --git a/ops/docker-sequencer-test/scripts/tx-generator.sh b/ops/docker-sequencer-test/scripts/tx-generator.sh new file mode 100644 index 000000000..2311a64d5 --- /dev/null +++ b/ops/docker-sequencer-test/scripts/tx-generator.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# Transaction Generator for Sequencer Test +# Sends random transactions to keep the network active + +set -e + +L2_RPC="${L2_RPC:-http://morph-geth-0:8545}" +INTERVAL="${TX_INTERVAL:-5}" # seconds between txs +PRIVATE_KEY="${PRIVATE_KEY:-0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80}" + +# Wait for L2 to be ready +echo "Waiting for L2 RPC to be ready..." +while true; do + if curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$L2_RPC" | grep -q "result"; then + echo "L2 RPC is ready!" + break + fi + echo "Waiting..." + sleep 2 +done + +# Get initial nonce +get_nonce() { + curl -s -X POST -H "Content-Type: application/json" \ + --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionCount\",\"params\":[\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\",\"latest\"],\"id\":1}" \ + "$L2_RPC" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 +} + +# Get current block number +get_block_number() { + curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ + "$L2_RPC" | grep -o '"result":"[^"]*"' | cut -d'"' -f4 +} + +echo "Starting transaction generator..." +echo "L2 RPC: $L2_RPC" +echo "Interval: ${INTERVAL}s" + +NONCE_HEX=$(get_nonce) +NONCE=$((NONCE_HEX)) +TX_COUNT=0 + +while true; do + BLOCK=$(get_block_number) + BLOCK_DEC=$((BLOCK)) + + # Generate random recipient address + RANDOM_SUFFIX=$(od -An -N4 -tx1 /dev/urandom | tr -d ' ') + TO_ADDR="0x000000000000000000000000000000${RANDOM_SUFFIX}" + + # Create and send transaction + NONCE_HEX=$(printf "0x%x" $NONCE) + TX_DATA="{\"jsonrpc\":\"2.0\",\"method\":\"eth_sendTransaction\",\"params\":[{\"from\":\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\",\"to\":\"${TO_ADDR}\",\"value\":\"0x1\",\"nonce\":\"${NONCE_HEX}\"}],\"id\":1}" + + RESULT=$(curl -s -X POST -H "Content-Type: application/json" --data "$TX_DATA" "$L2_RPC") + + if echo "$RESULT" | grep -q "result"; then + TX_HASH=$(echo "$RESULT" | grep -o '"result":"[^"]*"' | cut -d'"' -f4) + echo "[Block $BLOCK_DEC] TX #$TX_COUNT sent: $TX_HASH" + NONCE=$((NONCE + 1)) + TX_COUNT=$((TX_COUNT + 1)) + else + echo "[Block $BLOCK_DEC] TX failed: $RESULT" + # Refresh nonce in case of error + NONCE_HEX=$(get_nonce) + NONCE=$((NONCE_HEX)) + fi + + sleep "$INTERVAL" +done +