Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ require (
github.com/prometheus/client_golang v1.14.0
github.com/sirupsen/logrus v1.9.3
golang.org/x/crypto v0.41.0
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f
google.golang.org/protobuf v1.36.6
)

Expand Down Expand Up @@ -103,7 +104,6 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/urfave/cli/v2 v2.27.5 // indirect
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
golang.org/x/mod v0.27.0 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/sync v0.16.0 // indirect
Expand Down
133 changes: 133 additions & 0 deletions internal/mt/mt.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
// In-memory Merkle Tree which implements an authenticated list.
package mt

import (
"crypto/sha256"
"fmt"
"iter"
)

type Digest = [sha256.Size]byte

var MerklePlaceholderDigest = Digest([]byte("MERKLE_PLACEHOLDER_HASH_________"))
var LeafSeparator = []byte("MT::LeafNode")
var InternalSeparator = []byte("MT::InternalNode")

func digestInternal(leftChildDigest Digest, rightChildDigest Digest) Digest {
hash := sha256.New()
hash.Write(InternalSeparator)
hash.Write(leftChildDigest[:])
hash.Write(rightChildDigest[:])
return Digest(hash.Sum(nil))
}

func digestLeaf(preimage []byte) Digest {
hash := sha256.New()
hash.Write(LeafSeparator)
hash.Write(preimage)
return Digest(hash.Sum(nil))
}

func buildTreeLevels(leafPreimages [][]byte) iter.Seq[[]Digest] {
return func(yield func([]Digest) bool) {
leafCount := len(leafPreimages)
if leafCount == 0 {
if !yield([]Digest{MerklePlaceholderDigest}) {
return
}
}

// Start with the leaf digests
currentLayer := make([]Digest, leafCount)
for i, leafPreimage := range leafPreimages {
currentLayer[i] = digestLeaf(leafPreimage)
}
if !yield(currentLayer) {
return
}

// Build the tree upwards, padding with placeholders when odd number of nodes
for len(currentLayer) > 1 {
nextLayerSize := (len(currentLayer) + 1) / 2 // Ceiling division
nextLayer := make([]Digest, 0, nextLayerSize)

for i := 0; i < len(currentLayer); i += 2 {
leftDigest := currentLayer[i]
rightDigest := MerklePlaceholderDigest
if i+1 < len(currentLayer) {
rightDigest = currentLayer[i+1]
}
nextLayer = append(nextLayer, digestInternal(leftDigest, rightDigest))
}
currentLayer = nextLayer
if !yield(currentLayer) {
return
}
}
}
}

// Root computes the Merkle tree root from leaf preimages.
func Root(leafPreimages [][]byte) Digest {
var rootDigest Digest
for treeLevel := range buildTreeLevels(leafPreimages) {
if len(treeLevel) == 1 {
rootDigest = treeLevel[0]
}
}
return rootDigest
}

// Prove generates a Merkle inclusion proof that the leafPreimage at index is
// included in the tree rooted at Root(leafPreimages).
func Prove(leafPreimages [][]byte, index uint64) ([]Digest, error) {
leafCount := len(leafPreimages)
if leafCount == 0 {
return nil, fmt.Errorf("cannot prove inclusion in empty tree")
}
if index >= uint64(leafCount) {
return nil, fmt.Errorf("index %d is out of bounds for %d leaves", index, leafCount)
}

var proof []Digest
currentIndex := index

for treeLevel := range buildTreeLevels(leafPreimages) {
if len(treeLevel) <= 1 {
break
}
siblingIndex := currentIndex ^ 1
siblingDigest := MerklePlaceholderDigest
if siblingIndex < uint64(len(treeLevel)) {
siblingDigest = treeLevel[siblingIndex]
}
proof = append(proof, siblingDigest)
currentIndex /= 2
}

return proof, nil
}

// Verify verifies that leafPreimage is preimage of the index-th leaf in the
// Merkle tree rooted at expectedRootDigest.
func Verify(expectedRootDigest Digest, index uint64, leafPreimage []byte, proof []Digest) error {
currentDigest := digestLeaf(leafPreimage)
currentIndex := index

for _, siblingDigest := range proof {
if currentIndex%2 == 0 {
// Current node is left child, sibling is right
currentDigest = digestInternal(currentDigest, siblingDigest)
} else {
// Current node is right child, sibling is left
currentDigest = digestInternal(siblingDigest, currentDigest)
}
currentIndex /= 2
}

if currentDigest != expectedRootDigest {
return fmt.Errorf("computed root digest mismatch: computed %x, expected %x", currentDigest, expectedRootDigest)
}

return nil
}
10 changes: 10 additions & 0 deletions internal/util/generic.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,19 @@
package util

import "golang.org/x/exp/constraints"

func PointerTo[T any](v T) *T {
return &v
}

func PointerIntegerCast[U constraints.Integer, T constraints.Integer](p *T) *U {
if p == nil {
return nil
}
v := U(*p)
return &v
}

func NilCoalesce[T any](maybe *T, default_ T) T {
if maybe != nil {
return *maybe
Expand Down
1 change: 1 addition & 0 deletions networking/ragedisco/discovery_protocol.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
// Maximum number of distinct oracles that we can have across groups.
// The exact number is chosen arbitrarily. Better to have an arbitrary limit
// than no limit.
// See also [ragetypes.MaxPeersPerHost].
const MaxOracles = 165

type incomingMessage struct {
Expand Down
11 changes: 11 additions & 0 deletions offchainreporting2plus/internal/config/netconfig/netconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (

"github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config"
"github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr2config"
"github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3_1config"
"github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
)
Expand Down Expand Up @@ -37,6 +38,16 @@ func NetConfigFromContractConfig(contractConfig types.ContractConfig) (NetConfig
publicConfig.F,
peerIDs(publicConfig.OracleIdentities),
}, nil
case config.OCR3_1OffchainConfigVersion:
publicConfig, err := ocr3_1config.PublicConfigFromContractConfig(true, contractConfig)
if err != nil {
return NetConfig{}, err
}
return NetConfig{
publicConfig.ConfigDigest,
publicConfig.F,
peerIDs(publicConfig.OracleIdentities),
}, nil
default:
return NetConfig{}, fmt.Errorf("NetConfigFromContractConfig received OffchainConfigVersion %v", contractConfig.OffchainConfigVersion)
}
Expand Down
91 changes: 91 additions & 0 deletions offchainreporting2plus/internal/config/ocr3_1config/defaults.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
package ocr3_1config

import (
"math"
"time"

"github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/maxmaxserializationlimits"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
)

const (
defaultSmallRequestSizeMinRequestToSameOracleInterval = 10 * time.Millisecond

assumedRTT = 500 * time.Millisecond
assumedBandwidthBitsPerSecond = 100e6 // 100Mbit
assumedBandwidthBytesPerSecond = assumedBandwidthBitsPerSecond / 8
)

// transferDuration calculates the duration required to transfer the given
// number of bytes at the assumed bandwidth
func transferDuration(bytes int) time.Duration {
seconds := float64(bytes) / float64(assumedBandwidthBytesPerSecond)
return time.Duration(seconds * float64(time.Second))
}

func roundUpToTenthOfSecond(duration time.Duration) time.Duration {
tenthsOfSecond := float64(duration.Milliseconds()) / 100
return time.Duration(math.Ceil(tenthsOfSecond)) * 100 * time.Millisecond
}

func DefaultDeltaInitial() time.Duration {
return roundUpToTenthOfSecond(
3*assumedRTT/2 +
transferDuration(maxmaxserializationlimits.MaxMaxEpochStartRequestBytes*types.MaxOracles+maxmaxserializationlimits.MaxMaxEpochStartBytes))
}

func DefaultDeltaReportsPlusPrecursorRequest() time.Duration {
return roundUpToTenthOfSecond(
assumedRTT +
transferDuration(maxmaxserializationlimits.MaxMaxReportsPlusPrecursorRequestBytes+maxmaxserializationlimits.MaxMaxReportsPlusPrecursorBytes))
}

func DefaultDeltaBlockSyncResponseTimeout() time.Duration {
return roundUpToTenthOfSecond(
assumedRTT +
transferDuration(maxmaxserializationlimits.MaxMaxBlockSyncRequestBytes+maxmaxserializationlimits.MaxMaxBlockSyncResponseBytes))
}

func DefaultDeltaTreeSyncResponseTimeout() time.Duration {
return roundUpToTenthOfSecond(
assumedRTT +
transferDuration(maxmaxserializationlimits.MaxMaxTreeSyncChunkRequestBytes+maxmaxserializationlimits.MaxMaxTreeSyncChunkResponseBytes))
}

func DefaultDeltaBlobChunkResponseTimeout() time.Duration {
return roundUpToTenthOfSecond(
assumedRTT +
transferDuration(maxmaxserializationlimits.MaxMaxBlobChunkRequestBytes+maxmaxserializationlimits.MaxMaxBlobChunkResponseBytes))
}

const (
DefaultDeltaResend = 5 * time.Second

DefaultDeltaStateSyncSummaryInterval = 5 * time.Second
DefaultDeltaBlockSyncMinRequestToSameOracleInterval = defaultSmallRequestSizeMinRequestToSameOracleInterval

DefaultMaxBlocksPerBlockSyncResponse = 2
DefaultMaxParallelRequestedBlocks = 100

DefaultDeltaTreeSyncMinRequestToSameOracleInterval = defaultSmallRequestSizeMinRequestToSameOracleInterval

DefaultMaxTreeSyncChunkKeys = 1024

// A tree sync chunk must always fit at least 1 maximally sized (using maxmax) key-value pair
DefaultMaxTreeSyncChunkKeysPlusValuesBytes = ocr3_1types.MaxMaxKeyValueKeyBytes + ocr3_1types.MaxMaxKeyValueValueBytes

DefaultMaxParallelTreeSyncChunkFetches = 8

DefaultSnapshotInterval = 10_000
DefaultMaxHistoricalSnapshotsRetained = 10

DefaultDeltaBlobOfferMinRequestToSameOracleInterval = defaultSmallRequestSizeMinRequestToSameOracleInterval
DefaultDeltaBlobOfferResponseTimeout = 10 * time.Second

DefaultDeltaBlobBroadcastGrace = 10 * time.Millisecond

DefaultDeltaBlobChunkMinRequestToSameOracleInterval = defaultSmallRequestSizeMinRequestToSameOracleInterval

DefaultBlobChunkBytes = 1_000_000 // 1MB
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
package ocr3_1config

const (
MaxMaxBlocksPerBlockSyncResponse = 2
MaxMaxTreeSyncChunkKeys = 10_000
MaxMaxTreeSyncChunkKeysPlusValuesBytes = 50_000_000 // 50MB
MaxMaxBlobChunkBytes = 10_000_000 // 10MB
)
Loading