Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .github/workflows/release-on-tag.yml
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,17 @@ jobs:
files: arkiv-${{ matrix.binary }}-*.tar.xz
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

start-performance-tests:
needs: build-and-upload
runs-on: ubuntu-latest
steps:
- name: Trigger Performance Tests
run: |
echo "Triggering performance tests for release ${{ github.ref_name }}"
# Here you can add commands to trigger your performance tests, e.g., via API calls or other means
BEARER=${{ secrets.TEST_TRACKER_BEARER_KEY }}
URL=${{ secrets.TEST_TRACKER_URL }}/public/test/run/OptimismBuild

curl -X POST -H "Authorization: Bearer $BEARER" -f -s $URL

5 changes: 4 additions & 1 deletion op-alt-da/damgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ var ErrPendingChallenge = errors.New("not found, pending challenge")
// ErrExpiredChallenge is returned when a challenge was not resolved and derivation should skip this input.
var ErrExpiredChallenge = errors.New("challenge expired")

// ErrCommitmentTypeMismatch is returned when the commitment type of the input does not match the expected commitment type in the config.
var ErrCommitmentTypeMismatch = errors.New("commitment type mismatch")

// ErrMissingPastWindow is returned when the input data is MIA and cannot be challenged.
// This is a protocol fatal error.
var ErrMissingPastWindow = errors.New("data missing past window")
Expand Down Expand Up @@ -193,7 +196,7 @@ func (d *DA) Reset(ctx context.Context, base eth.L1BlockRef, baseCfg eth.SystemC
func (d *DA) GetInput(ctx context.Context, l1 L1Fetcher, comm CommitmentData, blockId eth.L1BlockRef) (eth.Data, error) {
// If it's not the right commitment type, report it as an expired commitment in order to skip it
if d.cfg.CommitmentType != comm.CommitmentType() {
return nil, fmt.Errorf("invalid commitment type; expected: %v, got: %v: %w", d.cfg.CommitmentType, comm.CommitmentType(), ErrExpiredChallenge)
return nil, fmt.Errorf("invalid commitment type; expected: %v, got: %v: %w", d.cfg.CommitmentType, comm.CommitmentType(), ErrCommitmentTypeMismatch)
}
status := d.state.GetChallengeStatus(comm, blockId.Number)
// check if the challenge is expired
Expand Down
8 changes: 7 additions & 1 deletion op-alt-da/params.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,10 @@ package altda
// MaxInputSize ensures the canonical chain cannot include input batches too large to
// challenge in the Data Availability Challenge contract. Value in number of bytes.
// This value can only be changed in a hard fork.
const MaxInputSize = 130672

//const MaxInputSize = 130672

// scx1332 - override MaxInputSize to 10MB for testing purposes,
// to allow for larger batches to be included in the canonical chain and tested in the Data Availability Challenge contract.
// This should be removed before mainnet deployment.
const MaxInputSize = 10 * 1024 * 1024
3 changes: 3 additions & 0 deletions op-batcher/batcher/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -952,6 +952,9 @@ func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[t
// is already reached. Since we can't send the txdata, we have to
// return it for later processing. We use nil error to skip error logging.
l.recordFailedDARequest(txdata.ID(), nil)
l.Log.Warn("Max concurrent DA requests reached, unable to publish to Alt DA at this time")
// Avoid busy looping
time.Sleep(1 * time.Second)
}
}

Expand Down
6 changes: 6 additions & 0 deletions op-node/rollup/derive/altda_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ func (s *AltDADataSource) Next(ctx context.Context) (eth.Data, error) {
if errors.Is(err, altda.ErrReorgRequired) {
// challenge for a new previously derived commitment expired.
return nil, NewResetError(err)
} else if errors.Is(err, altda.ErrCommitmentTypeMismatch) {
// expected different commitment type
s.log.Warn("commitment mismatch, skipping batch", "err", err.Error())
s.comm = nil
// skip the input
return s.Next(ctx)
} else if errors.Is(err, altda.ErrExpiredChallenge) {
// this commitment was challenged and the challenge expired.
s.log.Warn("challenge expired, skipping batch", "comm", s.comm)
Expand Down
7 changes: 6 additions & 1 deletion op-node/rollup/derive/frame.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@ import (
// Frames cannot be larger than 1 MB.
// Data transactions that carry frames are generally not larger than 128 KB due to L1 network conditions,
// but we leave space to grow larger anyway (gas limit allows for more data).
const MaxFrameLen = 1_000_000
//const MaxFrameLen = 1_000_000

// scx1332 - override MaxFrameLen to 11MB for testing purposes,
// to allow for larger batches to be included in the canonical chain and tested in the Data Availability Challenge contract.
// This should be removed before mainnet deployment.
const MaxFrameLen = 10 * 1024 * 1024

// Data Format
//
Expand Down