Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,27 @@
baseInternal
. V.genesisVariantVoltaire

baseLeios :: Types.Profile -> Types.Profile
baseLeios =
P.fixedLoaded
. composeFiftyOne
. P.maxBlockSize 90112
-- All cloud profiles use trace forwarding.
. P.traceForwardingOn
. P.initCooldown 45
. P.analysisStandard
. V.genesisVariantVoltaire

baseVoting :: Types.Profile -> Types.Profile
baseVoting =
baseVoltaire
. P.voting

--------------------------------------------------------------------------------

composeFiftyOne :: Types.Profile -> Types.Profile
composeFiftyOne = P.torus . V.hosts 51 . P.withExplorerNode

composeFiftytwo :: Types.Profile -> Types.Profile
composeFiftytwo = P.torusDense . V.hosts 52 . P.withExplorerNode

Expand Down Expand Up @@ -196,6 +210,14 @@
, ripemd & P.name "plutusv3-ripemd-stepx15-nomadperf" . P.dreps 10000 . P.newTracing . P.budgetBlockStepsOneAndAHalf
, ripemd & P.name "plutusv3-ripemd-stepx2-nomadperf" . P.dreps 10000 . P.newTracing . P.budgetBlockStepsDouble
]
---------
-- Leios.
---------
++
let valueLeios = P.empty & baseLeios . V.valueCloud . V.datasetOct2021 . V.fundsDouble . valueDuration . nomadPerf
in [

Check warning on line 218 in bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Cloud.hs

View workflow job for this annotation

GitHub Actions / build

Suggestion in profilesNoEraCloud in module Cardano.Benchmarking.Profile.Builtin.Cloud: Use : ▫︎ Found: "[valueLeios\n & P.name \"value-leios-nomadperf\" . P.dreps 10000 . P.newTracing]\n ++\n let\n valueVoting\n = P.empty\n & baseVoting\n . V.valueCloud\n . V.datasetOct2021\n . V.fundsVoting\n . valueDurationVoting\n . nomadPerf . P.descAdd \"+ voting\" . valueDesc\n plutusVoting\n = P.empty\n & baseVoting\n . V.plutusBase\n . V.datasetOct2021\n . V.fundsVoting\n . plutusDuration . nomadPerf . P.descAdd \"+ voting\" . plutusDesc\n loopVoting\n = plutusVoting\n & plutusLoopBase . V.plutusTypeLoop . P.analysisSizeSmall\n in\n [valueVoting\n & P.name \"value-voting-utxo-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadUtxo,\n valueVoting\n & P.name \"value-voting-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadx1,\n valueVoting\n & P.name \"value-voting-double-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadx2,\n loopVoting\n & P.name \"plutus-voting-utxo-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadUtxo,\n loopVoting\n & P.name \"plutus-voting-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadx1,\n loopVoting\n & P.name \"plutus-voting-double-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadx2]\n ++\n let\n latency\n = P.empty\n & B.base\n . P.fixedLoaded\n . composeFiftytwo\n . V.genesisVariantPreVoltaire\n . V.timescaleCompressed\n . P.delegators 0\n . P.workloadAppend L.latencyWorkload . P.analysisStandard\n in\n (latency\n & P.name \"latency-nomadperf\"\n . P.desc\n \"AWS perf class cluster, stop when all latency services stop\"\n . P.traceForwardingOn . P.newTracing . nomadPerf)\n : let\n valueCI\n = P.empty & E.base . V.valueLocal . P.traceForwardingOn . nomadPerf\n fastNP\n = valueCI\n & E.fastDuration . composeFiftytwo . V.genesisVariantPreVoltaire\n ciNP\n = valueCI\n & E.ciTestDuration\n . P.torus . V.hosts 2 . P.withExplorerNode . V.genesisVariant300\n in\n [fastNP & P.name \"fast-nomadperf\" . P.newTracing,\n ciNP & P.name \"ci-test-nomadperf\" . P.newTracing,\n ciNP & P.name \"ci-test-oldtracing-nomadperf\" . P.oldTracing]\n ++\n let\n defNP\n = P.empty\n & E.baseNoDataset\n . V.valueCloud\n . P.traceForwardingOn\n . nomadPerf\n . E.defaultDuration\n . P.torus\n . V.hosts 6\n . P.withExplorerNode\n . V.genesisVariant300\n . P.delegators 6\n . P.analysisUnitary\n in\n [defNP & P.name \"default-nomadperf\" . P.newTracing,\n defNP & P.name \"oldtracing-nomadperf\" . P.oldTracing]\n ++\n let\n ciBench\n = P.empty\n & P.fixedLoaded\n . V.hosts 2\n . P.torus\n . nomadPerf\n . P.withExplorerNode\n . V.timescaleCompressed\n . V.genesisVariant300\n . V.datasetMiniature\n . V.fundsDefault\n . P.shutdownOnBlock 15\n . P.generatorEpochs 3\n . P.initCooldown 5\n . P.analysisStandard\n . P.desc\n \"Miniature dataset, CI-friendly duration, bench scale\"\n in\n [ciBench\n & P.name \"ci-bench-nomadperf\"\n . V.valueLocal\n . P.dreps 0 . P.traceForwardingOn . P.newTracing,\n ciBench\n & P.name \"ci-bench-oldtracing-nomadperf\"\n . V.valueLocal . P.dreps 0 . P.oldTracing]" ▫︎ Perhaps: "(valueLeios\n & P.name \"value-leios-nomadperf\" . P.dreps 10000 . P.newTracing)\n : let\n valueVoting\n = P.empty\n & baseVoting\n . V.valueCloud\n . V.datasetOct2021\n . V.fundsVoting\n . valueDurationVoting\n . nomadPerf . P.descAdd \"+ voting\" . valueDesc\n plutusVoting\n = P.empty\n & baseVoting\n . V.plutusBase\n . V.datasetOct2021\n . V.fundsVoting\n . plutusDuration . nomadPerf . P.descAdd \"+ voting\" . plutusDesc\n loopVoting\n = plutusVoting\n & plutusLoopBase . V.plutusTypeLoop . P.analysisSizeSmall\n in\n [valueVoting\n & P.name \"value-voting-utxo-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadUtxo,\n valueVoting\n & P.name \"value-voting-volt-nomadperf\"\n . P.dreps 10000\n . P.newTracing . P.workloadAppend W.votingWorkloadx1,\n valueVoting\n & P.name \"va
valueLeios & P.name "value-leios-nomadperf" . P.dreps 10000 . P.newTracing
]
----------
-- Voting.
----------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,13 @@ baseNoDataset =

-- TODO: Move to `base` when "default*" and "oldtracing" genesis are the same.
genesis :: Types.Profile -> Types.Profile
genesis = V.genesisVariant300
genesis = V.genesisVariantVoltaire

--------------------------------------------------------------------------------

fastDuration :: Types.Profile -> Types.Profile
fastDuration =
V.timescaleCompressed . P.shutdownOnBlock 1
V.timescaleCompressed . P.shutdownOnBlock 180
-- TODO: dummy "generator.epochs" ignored in favor of "--shutdown-on".
-- Create a "time.epochs" or "time.blocks" or similar, IDK!
-- This applies to all profiles!
Expand All @@ -63,7 +63,7 @@ fastDuration =

ciTestDuration :: Types.Profile -> Types.Profile
ciTestDuration =
V.timescaleCompressed . P.shutdownOnBlock 8
V.timescaleCompressed . P.shutdownOnBlock 180
-- TODO: dummy "generator.epochs" ignored in favor of "--shutdown-on".
-- Create a "time.epochs" or "time.blocks" or similar, IDK!
-- This applies to all profiles!
Expand Down Expand Up @@ -143,7 +143,7 @@ profilesNoEraEmpty = map baseNoDataset
-- ci-test-hydra: FixedLoaded and "--shutdown-on-block-synced 3" with 2 nodes.
------------------------------------------------------------------------------
let ciTestHydra =
P.empty & V.datasetEmpty . V.genesisVariantPreVoltaire . ciTestDuration
P.empty & V.datasetEmpty . genesis . ciTestDuration
. P.uniCircle . V.hosts 2 . P.loopback
. P.analysisSizeSmall
in [
Expand Down
139 changes: 137 additions & 2 deletions bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs
Original file line number Diff line number Diff line change
Expand Up @@ -114,13 +114,148 @@ genesisVariantVoltaire = genesisVariantLatest
-- Definition vocabulary: funds.
--------------------------------

-- | Estimate the number of genesis UTxO keys (funds) required for continuous
-- load generation using @on_confirm@ recycling.
--
-- @
-- funds = ceiling((M + J + D × B + Q × S) / S × I)
-- @
--
-- * @M@: single-node mempool capacity in bytes
-- (@MempoolCapacityBytesOverride@ rounded up to whole blocks by Consensus).
-- * @J@: disjoint mempool bytes across all nodes. The caller pre-computes
-- this as @nodes × (1 - syncRatio) × mempool@; 0 for perfect sync.
-- * @D@: confirmation depth (blocks on top before recycling).
-- * @Q@: payload queue depth (built txs waiting to be fetched by workers).
-- * @B@: effective block body size in bytes (@maxBlockBodySize@ minus
-- @fixedBlockBodyOverhead@ (1024) to account for block serialization
-- overhead not captured by summing individual tx sizes).
-- * @S@: serialised transaction size in bytes.
-- * @I@: inputs per transaction (= UTxO keys consumed per tx).
--
-- Counts the maximum funds simultaneously locked in the pipeline:
--
-- @
-- payload queue (Q txs) → mempool (M bytes) → unconfirmed (D blocks) → recycled
-- @
--
-- __TPS is irrelevant.__ The tx-centrifuge is pull-based, the node requests
-- transactions when its mempool has room. In steady state the submission rate
-- is driven by block production, not TPS. TPS only determines how quickly the
-- mempool fills initially, that is enough txs to fill all mempools if the
-- resulting drain rate allows it.
--
-- __Mempool sync ratio.__ With perfect sync (@syncRatio = 1.0@) all mempools
-- hold the same txs and the mempool counts once. In practice, propagation
-- latency and asymmetric connectivity cause partial disjointness: each node
-- has a fraction of unique txs that still consume funds. The effective
-- mempool is @syncRatio × M + nodes × (1 - syncRatio) × M@. With
-- @syncRatio = 1.0@ this collapses to @M@ (count once); with
-- @syncRatio = 0.7@ and 52 nodes it becomes @0.7 × M + 52 × 0.3 × M@.
--
-- __Confirmation depth and fork safety.__ Forks of depth <= D are safe, the
-- recycler receives orphan events and recycles original inputs. Forks deeper
-- than D cause permanent fund loss (outputs already recycled, originals gone).
-- Fork frequency depends only on the active slot coefficient @f@ (typically
-- 0.05); a depth-K fork requires K consecutive slot battles (≈ 0.001^K),
-- making D = 2 safe for virtually all configurations.
--
-- __Payload queue (Q).__ The builder's payload queue (bounded TBQueue,
-- hardcoded to 8192 in Config.Runtime) holds already-built transactions
-- whose input funds are already consumed. Back-to-back blocks can drain
-- this queue before the recycler/builder refill it, fatal with
-- @on_exhaustion = error@. The queue depth is counted in transactions
-- (not blocks) and contributes directly to the in-flight fund count.
utxoKeys
:: Integer -- ^ Payload queue capacity (built txs waiting to be fetched).
-- Hardcoded to 8192 in Config.Runtime.
-> Integer -- ^ Mempool capacity in bytes (per node).
-> Integer -- ^ Disjoint mempool bytes across all nodes. Total unique bytes
-- not shared by all mempools: @nodes × (1 - syncRatio) × mempool@.
-- 0 for perfect sync; the caller pre-computes this from the
-- number of nodes and the estimated sync ratio.
-> Integer -- ^ Max block body size in bytes (protocol parameter).
-> Integer -- ^ Confirmation depth (blocks on top before recycling).
-- Also the fork protection depth: forks of depth <= D are safe;
-- deeper forks cause permanent fund loss.
-> Integer -- ^ Serialised transaction size in bytes.
-> Integer -- ^ Inputs per transaction.
-> Integer -- ^ Estimated number of genesis UTxO keys (funds) needed.
utxoKeys queueDepth mempoolBytes disjointBytes blockBytes confirmDepth txBytes inputsPerTx =
let -- Block capacity: blockBytes - fixedBlockBodyOverhead (1024).
effectiveBlockBytes = blockBytes - 1024
-- The mempool uses its own size accounting, not wire sizes.
-- See fixedBlockBodyOverhead and perTxOverhead in
-- Ouroboros.Consensus.Shelley.Ledger.Mempool.
-- Tx size in mempool: sizeTxF + perTxOverhead (4, hardcoded constant).
-- sizeTxF is ~1 byte smaller than the wire size (Api.serialiseToCBOR), so
-- the net mempool tx size is ~3 bytes larger than the wire size.
txBytesMempool = txBytes + 3
-- Consensus rounds the override up to whole blocks (ceiling division
-- using the effective block size, not raw maxBlockBodySize);
-- see computeMempoolCapacity in Ouroboros.Consensus.Mempool.Capacity.
effectiveMempoolBlocks =
(mempoolBytes + effectiveBlockBytes - 1) `div` effectiveBlockBytes
effectiveMempoolBytes = effectiveMempoolBlocks * effectiveBlockBytes
-- Total pipeline depth in bytes (switch to Double for division).
pipelineBytes = fromIntegral (effectiveMempoolBytes + disjointBytes)
+ fromIntegral (confirmDepth * effectiveBlockBytes)
+ fromIntegral (queueDepth * txBytesMempool) :: Double
in ceiling (pipelineBytes / fromIntegral txBytesMempool * fromIntegral inputsPerTx)

-- Defined in the "genesis" property and it's for the tx-generator.
fundsDefault :: Types.Profile -> Types.Profile
fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys 1
fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000
. P.utxoKeys
(utxoKeys
-- Payload queue depth (Config.Runtime TBQueue capacity).
8192
-- MempoolCapacityBytesOverride, rounded internally like
-- Consensus does.
25000000
-- Disjoint mempool bytes (0 = perfect sync).
0
-- Max block body size (bytes).
90112
-- Confirmation depth (blocks on top); AKA fork protection.
2
-- Steady-state tx size (bytes). The initial batch uses
-- genesis keys (one per fund, 2 witnesses → 371 bytes), but
-- after recycling all inputs share the builder's single
-- signing key (1 witness → 270 bytes). Use the steady-state
-- size: smaller txs means more txs fit per block, so more
-- funds are needed to keep the pipeline full.
270
-- Inputs per tx.
2
)

-- Some profiles have a higher `funds_balance` in `Genesis`. Needed? Fix it?
fundsDouble :: Types.Profile -> Types.Profile
fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.utxoKeys 1
fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000
. P.utxoKeys
(utxoKeys
-- Payload queue depth (Config.Runtime TBQueue capacity).
8192
-- MempoolCapacityBytesOverride, rounded internally like
-- Consensus does.
25000000
-- Disjoint mempool bytes: 52 nodes, 1.1% disjoint each.
(52 * 25000000 * 11 `div` 1000)
-- Max block body size (bytes).
90112
-- Confirmation depth (blocks on top); AKA fork protection.
2
-- Steady-state tx size (bytes). The initial batch uses
-- genesis keys (one per fund, 2 witnesses → 371 bytes), but
-- after recycling all inputs share the builder's single
-- signing key (1 witness → 270 bytes). Use the steady-state
-- size: smaller txs means more txs fit per block, so more
-- funds are needed to keep the pipeline full.
270
-- Inputs per tx.
2
)

fundsVoting :: Types.Profile -> Types.Profile
fundsVoting = P.poolBalance 1000000000000000 . P.funds 40000000000000 . P.utxoKeys 2
Expand Down
5 changes: 3 additions & 2 deletions bench/cardano-topology/src/Cardano/Benchmarking/Topology.hs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ module Cardano.Benchmarking.Topology (
import Prelude hiding (id)

import Data.Function ((&))
import Data.List (tails, sortOn, uncons)
import Data.List (tails, uncons)
import Data.Maybe (isJust)

import qualified Cardano.Benchmarking.Topology.Types as Types
Expand Down Expand Up @@ -69,7 +69,8 @@ mkExplorer explorerLocation coreNodes =
, nodeId = length coreNodes
, region = explorerLocation
-- Explorer producers sorted by numeric id, not region or something else.
, producers = map Types.name (sortOn Types.nodeId coreNodes)
--, producers = map Types.name (sortOn Types.nodeId coreNodes)
, producers = ["node-0", "node-1", "node-2"]
, org = "IOHK"
, pools = Nothing
, stakePool = Nothing
Expand Down
Loading
Loading