diff --git a/Cargo.lock b/Cargo.lock index 8c992bea..1adf393e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,15 +2953,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - [[package]] name = "enum-as-inner" version = "0.6.1" @@ -3842,7 +3833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls", + "rustls 0.23.32", "rustls-pki-types", ] @@ -4498,8 +4489,8 @@ dependencies = [ "hyper 1.7.0", "hyper-util", "log", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio 1.47.1", "tokio-rustls", @@ -4692,7 +4683,7 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "system-configuration 0.6.1", + "system-configuration", "tokio 1.47.1", "windows 0.53.0", ] @@ -5056,7 +5047,7 @@ dependencies = [ "http 1.3.1", "jsonrpsee-core", "pin-project", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -5577,10 +5568,10 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "parking_lot 0.12.4", - "quinn", + "quinn 0.11.9", "rand 0.8.5", "ring 0.17.14", - "rustls", + "rustls 0.23.32", "socket2 0.5.10", "thiserror 1.0.69", "tokio 1.47.1", @@ -5672,7 +5663,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.14", - "rustls", + "rustls 0.23.32", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser 0.16.0", @@ -6164,12 +6155,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -7119,25 +7104,6 @@ dependencies = [ "sp-runtime", ] -[[package]] -name = "pallet-merkle-airdrop" -version = "0.1.0" -dependencies = [ - "binary-merkle-tree", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-balances 40.0.1", - "pallet-vesting", - "parity-scale-codec", - "scale-info", - "sha2 0.10.9", - "sp-core", - "sp-io", - "sp-runtime", -] - [[package]] name = "pallet-message-queue" version = "44.0.0" @@ -7189,6 +7155,24 @@ dependencies = [ "sp-mmr-primitives", ] +[[package]] +name = "pallet-multisig" +version = "1.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-balances 40.0.1", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", +] + [[package]] name = "pallet-preimage" version = "41.0.0" @@ -7570,21 +7554,6 @@ dependencies = [ "sp-runtime", ] -[[package]] -name = "pallet-vesting" -version = "41.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "305b437f4832bb563b660afa6549c0f0d446b668b4f098edc48d04e803badb9f" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-runtime", -] - [[package]] name = "pallet-wormhole" version = "0.1.0" @@ -8882,6 +8851,24 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "qp-header" +version = "0.1.0" +dependencies = [ + "hex", + "log", + "p3-field", + "p3-goldilocks", + "parity-scale-codec", + "qp-poseidon", + "qp-poseidon-core", + "scale-info", + "serde", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "qp-plonky2" version = "1.1.1" @@ -9068,8 +9055,6 @@ version = "0.1.0" dependencies = [ "hex", "log", - "num-bigint", - "num-traits", "primitive-types 0.13.1", "qp-poseidon-core", ] @@ -9094,6 +9079,8 @@ name = "quantus-miner-api" version = "0.0.3" dependencies = [ "serde", + "serde_json", + "tokio 1.47.1", ] [[package]] @@ -9113,15 +9100,16 @@ dependencies = [ "parity-scale-codec", "prometheus", "qp-dilithium-crypto", - "qp-rusty-crystals-dilithium", "qp-rusty-crystals-hdwallet", "qp-wormhole-circuit-builder", "qp-wormhole-verifier", "qpow-math", "quantus-miner-api", "quantus-runtime", + "quinn 0.10.2", "rand 0.8.5", - "reqwest", + "rcgen", + "rustls 0.21.12", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -9129,6 +9117,7 @@ dependencies = [ "sc-consensus-qpow", "sc-executor", "sc-network", + "sc-network-sync", "sc-offchain", "sc-service", "sc-telemetry", @@ -9151,7 +9140,6 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-rpc-system", "tokio-util", - "uuid", ] [[package]] @@ -9173,8 +9161,8 @@ dependencies = [ "pallet-assets-holder", "pallet-balances 40.0.1", "pallet-conviction-voting", - "pallet-merkle-airdrop", "pallet-mining-rewards", + "pallet-multisig", "pallet-preimage", "pallet-qpow", "pallet-ranked-collective", @@ -9188,10 +9176,10 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-treasury", "pallet-utility", - "pallet-vesting", "parity-scale-codec", "primitive-types 0.13.1", "qp-dilithium-crypto", + "qp-header", "qp-poseidon", "qp-scheduler", "scale-info", @@ -9241,6 +9229,23 @@ dependencies = [ "unsigned-varint 0.8.0", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes 1.10.1", + "pin-project-lite 0.2.16", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", + "rustc-hash 1.1.0", + "rustls 0.21.12", + "thiserror 1.0.69", + "tokio 1.47.1", + "tracing", +] + [[package]] name = "quinn" version = "0.11.9" @@ -9251,10 +9256,10 @@ dependencies = [ "cfg_aliases 0.2.1", "futures-io", "pin-project-lite 0.2.16", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.11.13", + "quinn-udp 0.5.14", "rustc-hash 2.1.1", - "rustls", + "rustls 0.23.32", "socket2 0.6.0", "thiserror 2.0.16", "tokio 1.47.1", @@ -9262,6 +9267,24 @@ dependencies = [ "web-time", ] +[[package]] +name = "quinn-proto" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +dependencies = [ + "bytes 1.10.1", + "rand 0.8.5", + "ring 0.16.20", + "rustc-hash 1.1.0", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "slab", + "thiserror 1.0.69", + "tinyvec", + "tracing", +] + [[package]] name = "quinn-proto" version = "0.11.13" @@ -9274,7 +9297,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.16", @@ -9283,6 +9306,19 @@ dependencies = [ "web-time", ] +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes 1.10.1", + "libc", + "socket2 0.5.10", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quinn-udp" version = "0.5.14" @@ -9572,42 +9608,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes 1.10.1", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.16", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "system-configuration 0.5.1", - "tokio 1.47.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "resolv-conf" version = "0.7.5" @@ -9848,6 +9848,17 @@ dependencies = [ "windows-sys 0.61.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "ring 0.17.14", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.32" @@ -9863,6 +9874,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.8.1" @@ -9872,7 +9895,16 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.5.0", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", ] [[package]] @@ -9896,11 +9928,11 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-platform-verifier-android", "rustls-webpki 0.103.6", - "security-framework", + "security-framework 3.5.0", "security-framework-sys", "webpki-root-certs 0.26.11", "windows-sys 0.59.0", @@ -10145,7 +10177,6 @@ dependencies = [ "parity-scale-codec", "qp-dilithium-crypto", "qp-rusty-crystals-dilithium", - "qp-rusty-crystals-hdwallet", "rand 0.8.5", "regex", "rpassword", @@ -10164,7 +10195,6 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-keyring", - "sp-keystore", "sp-panic-handler", "sp-runtime", "sp-tracing", @@ -10320,9 +10350,6 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-service", - "scale-info", - "sha2 0.10.9", - "sha3", "sp-api", "sp-block-builder", "sp-blockchain", @@ -10511,7 +10538,6 @@ dependencies = [ "log", "mockall", "multistream-select", - "once_cell", "parity-scale-codec", "parking_lot 0.12.4", "partial_sort", @@ -10673,7 +10699,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.4", "rand 0.8.5", - "rustls", + "rustls 0.23.32", "sc-client-api", "sc-network", "sc-network-types", @@ -11256,6 +11282,16 @@ dependencies = [ "sha2 0.10.9", ] +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.14", + "untrusted 0.9.0", +] + [[package]] name = "sec1" version = "0.7.3" @@ -11345,6 +11381,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework" version = "3.5.0" @@ -11473,18 +11522,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - [[package]] name = "serde_with" version = "3.14.1" @@ -13201,12 +13238,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "synstructure" version = "0.12.6" @@ -13245,17 +13276,6 @@ dependencies = [ "windows 0.52.0", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys 0.5.0", -] - [[package]] name = "system-configuration" version = "0.6.1" @@ -13264,17 +13284,7 @@ checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.9.4", "core-foundation 0.9.4", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", + "system-configuration-sys", ] [[package]] @@ -13582,7 +13592,7 @@ version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ - "rustls", + "rustls 0.23.32", "tokio 1.47.1", ] @@ -13606,8 +13616,8 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio 1.47.1", "tokio-rustls", @@ -13931,7 +13941,7 @@ dependencies = [ "httparse", "log", "rand 0.9.2", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "sha1", "thiserror 2.0.16", @@ -14124,7 +14134,6 @@ checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", - "serde", "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index f57e846a..c78175f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,8 +13,8 @@ members = [ "miner-api", "node", "pallets/balances", - "pallets/merkle-airdrop", "pallets/mining-rewards", + "pallets/multisig", "pallets/qpow", "pallets/reversible-transfers", "pallets/scheduler", @@ -22,6 +22,7 @@ members = [ "primitives/consensus/pow", "primitives/consensus/qpow", "primitives/dilithium-crypto", + "primitives/header", "primitives/scheduler", "primitives/state-machine", "primitives/trie", @@ -84,6 +85,8 @@ names = { version = "0.14.0", default-features = false } nohash-hasher = { version = "0.2.0" } num-traits = { version = "0.2", default-features = false, features = ["libm"] } once_cell = { version = "1.21.3" } +p3-field = { version = "0.3.0" } +p3-goldilocks = { version = "0.3.0" } parking_lot = { version = "0.12.1", default-features = false } partial_sort = { version = "0.2.0" } paste = { version = "1.0.15", default-features = false } @@ -128,13 +131,14 @@ zeroize = { version = "1.7.0", default-features = false } # Own dependencies pallet-balances = { path = "./pallets/balances", default-features = false } -pallet-merkle-airdrop = { path = "./pallets/merkle-airdrop", default-features = false } pallet-mining-rewards = { path = "./pallets/mining-rewards", default-features = false } +pallet-multisig = { path = "./pallets/multisig", default-features = false } pallet-qpow = { path = "./pallets/qpow", default-features = false } pallet-reversible-transfers = { path = "./pallets/reversible-transfers", default-features = false } pallet-scheduler = { path = "./pallets/scheduler", default-features = false } pallet-wormhole = { path = "./pallets/wormhole", default-features = false } qp-dilithium-crypto = { path = "./primitives/dilithium-crypto", version = "0.2.0", default-features = false } +qp-header = { path = "./primitives/header", default-features = false } qp-scheduler = { path = "./primitives/scheduler", default-features = false } qp-wormhole = { path = "./primitives/wormhole", default-features = false } qpow-math = { path = "./qpow-math", default-features = false } @@ -182,7 +186,6 @@ pallet-transaction-payment-rpc = { version = "44.0.0", default-features = false pallet-transaction-payment-rpc-runtime-api = { version = "41.0.0", default-features = false } pallet-treasury = { version = "40.0.0", default-features = false } pallet-utility = { version = "41.0.0", default-features = false } -pallet-vesting = { version = "41.0.0", default-features = false } prometheus-endpoint = { version = "0.17.2", default-features = false, package = "substrate-prometheus-endpoint" } sc-basic-authorship = { version = "0.50.0", default-features = false } sc-block-builder = { version = "0.45.0", default-features = true } @@ -192,6 +195,7 @@ sc-consensus = { version = "0.50.0", default-features = false } sc-executor = { version = "0.43.0", default-features = false } sc-network = { version = "0.51.0", default-features = false } sc-network-common = { version = "0.49.0", default-features = false } +sc-network-sync = { version = "0.50.0", default-features = false } sc-network-types = { version = "0.17.0", default-features = false } sc-offchain = { version = "46.0.0", default-features = false } sc-service = { version = "0.52.0", default-features = false } diff --git a/EXTERNAL_MINER_PROTOCOL.md b/EXTERNAL_MINER_PROTOCOL.md index 187d4a76..9c17730a 100644 --- a/EXTERNAL_MINER_PROTOCOL.md +++ b/EXTERNAL_MINER_PROTOCOL.md @@ -1,256 +1,243 @@ # External Miner Protocol Specification -This document defines the JSON-based HTTP protocol for communication between the Resonance Network node and an external QPoW miner service. +This document defines the QUIC-based protocol for communication between the Quantus Network node and external QPoW miner services. ## Overview -The node delegates the mining task (finding a valid nonce) to an external service. The node provides the necessary parameters (header hash, difficulty, nonce range) and the external miner searches for a valid nonce according to the QPoW rules defined in the `qpow-math` crate. The miner returns the result, including the winning nonce, when found. +The node delegates the mining task (finding a valid nonce) to external miner services over persistent QUIC connections. The node provides the necessary parameters (header hash, difficulty) and each external miner independently searches for a valid nonce according to the QPoW rules defined in the `qpow-math` crate. Miners push results back when found. -## Data Types +### Key Benefits of QUIC -See the `resonance-miner-api` crate for the canonical Rust definitions of these structures. - -- `job_id`: String (UUID recommended) - Unique identifier for a specific mining task, generated by the node. -- `mining_hash`: String (64 hex chars, no 0x) - The header hash for which to find a nonce. -- `difficulty`: String (u64 as string) - The target difficulty for the mining job. -- `nonce_start`: String (128 hex chars, no 0x) - The starting nonce value (inclusive) for the search range. -- `nonce_end`: String (128 hex chars, no 0x) - The ending nonce value (inclusive) for the search range. -- `status`: Enum (`ApiResponseStatus`) - Indicates the state or result of an API call. -- `message`: String (optional) - Provides details for `Error` status responses. -- `nonce`: String (Hex, no 0x) - Represents the `U512` value of the current or winning nonce. -- `work`: String (128 hex chars, no 0x) - Represents the winning nonce as `[u8; 64]`. This is the value the node needs for verification. -- `hash_count`: Number (u64) - Number of nonces checked by the miner for the job. -- `elapsed_time`: Number (f64) - Time in seconds the miner spent on the job. - -## Endpoints - -### 1. Submit Mining Job - -- **Endpoint:** `POST /mine` -- **Description:** The node requests the external miner to start searching for a valid nonce. -- **Request Body (`MiningRequest`):** - ```json - { - "job_id": "...", - "mining_hash": "...", - "difficulty": "...", - "nonce_start": "...", - "nonce_end": "..." - } - ``` -- **Response Body (`MiningResponse`):** - - Success (200 OK): - ```json - { - "status": "accepted", - "job_id": "..." - } - ``` - - Error (400 Bad Request - Invalid Input / 409 Conflict - Duplicate Job ID): - ```json - { - "status": "error", - "job_id": "...", - "message": "..." // e.g., "Job already exists", "Invalid mining_hash (...)" - } - ``` - -### 2. Get Job Result - -- **Endpoint:** `GET /result/{job_id}` -- **Description:** The node polls the external miner to check the status and retrieve the result. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to query. -- **Response Body (`MiningResult`):** - - Job Completed (200 OK): - ```json - { - "status": "completed", - "job_id": "...", - "nonce": "...", // U512 hex value of winning nonce - "work": "...", // [u8; 64] hex value of winning nonce - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Still Running (200 OK): - ```json - { - "status": "running", - "job_id": "...", - "nonce": "...", // Current nonce being checked (U512 hex) - "work": null, - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Failed (e.g., nonce range exhausted) (200 OK): - ```json - { - "status": "failed", - "job_id": "...", - "nonce": "...", // Final nonce checked (U512 hex) - "work": null, - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "...", - "nonce": null, - "work": null, - "hash_count": 0, - "elapsed_time": 0.0 - } - ``` - -### 3. Cancel Mining Job - -- **Endpoint:** `POST /cancel/{job_id}` -- **Description:** The node requests the external miner to stop working on a specific job. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to cancel. -- **Request Body:** (Empty) -- **Response Body (`MiningResponse`): - - Success (200 OK): - ```json - { - "status": "cancelled", - "job_id": "..." - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." - } - ``` +- **Lower latency**: Results are pushed immediately when found (no polling) +- **Connection resilience**: Built-in connection migration and recovery +- **Multiplexed streams**: Multiple operations on single connection +- **Built-in TLS**: Encrypted by default -## Notes +## Architecture -- All hex values (`mining_hash`, `nonce_start`, `nonce_end`, `nonce`, `work`) should be sent **without** the `0x` prefix. -- The miner must implement the validation logic defined in `qpow_math::is_valid_nonce`. -- The node relies primarily on the `work` field in the `MiningResult` (when status is `completed`) for constructing the `QPoWSeal`. +### Connection Model -# External Miner Protocol Specification +``` + ┌─────────────────────────────────┐ + │ Node │ + │ (QUIC Server on port 9833) │ + │ │ +┌──────────┐ │ Broadcasts: NewJob │ +│ Miner 1 │ ──connect───► │ Receives: JobResult │ +└──────────┘ │ │ + │ Supports multiple miners │ +┌──────────┐ │ First valid result wins │ +│ Miner 2 │ ──connect───► │ │ +└──────────┘ └─────────────────────────────────┘ + +┌──────────┐ +│ Miner 3 │ ──connect───► +└──────────┘ +``` -This document defines the JSON-based HTTP protocol for communication between the node and an external QPoW miner. +- **Node** acts as the QUIC server, listening on port 9833 (default) +- **Miners** act as QUIC clients, connecting to the node +- Single bidirectional stream per miner connection +- Connection persists across multiple mining jobs +- Multiple miners can connect simultaneously -## Overview +### Multi-Miner Operation + +When multiple miners are connected: +1. Node broadcasts the same `NewJob` to all connected miners +2. Each miner independently selects a random starting nonce +3. First miner to find a valid solution sends `JobResult` +4. Node uses the first valid result, ignores subsequent results for same job +5. New job broadcast implicitly cancels work on all miners + +### Message Types + +The protocol uses **three message types**: + +| Direction | Message | Description | +|-----------|---------|-------------| +| Miner → Node | `Ready` | Sent immediately after connecting to establish the stream | +| Node → Miner | `NewJob` | Submit a mining job (implicitly cancels any previous job) | +| Miner → Node | `JobResult` | Mining result (completed, failed, or cancelled) | + +### Wire Format + +Messages are length-prefixed JSON: + +``` +┌─────────────────┬─────────────────────────────────┐ +│ Length (4 bytes)│ JSON payload (MinerMessage) │ +│ big-endian u32 │ │ +└─────────────────┴─────────────────────────────────┘ +``` -The node delegates the mining task to an external service. The node provides the necessary parameters (mining hash, difficulty, and a nonce range) and the external miner searches for a valid nonce within that range. The miner returns the nonce and the resulting work hash when a solution is found. +Maximum message size: 16 MB ## Data Types -- `job_id`: String (UUID recommended) - Identifier for a specific mining task. -- `mining_hash`: String (Hex-encoded, 32-byte hash, H256) - The hash derived from the block header data that the miner needs to solve. -- `difficulty`: String (Decimal representation of u64) - The target difficulty for the block. -- `nonce_start`: String (Hex-encoded, 64-byte value, U512) - The starting nonce value (inclusive). -- `nonce_end`: String (Hex-encoded, 64-byte value, U512) - The ending nonce value (inclusive). -- `nonce`: String (Hex-encoded, 64-byte value, U512) - The solution found by the miner. -- `work`: String (Hex-encoded, 32-byte hash, H256) - The hash resulting from the combination of `mining_hash` and `nonce`, meeting the difficulty requirement. -- `status`: String Enum - Indicates the state or result of an API call. - -## Endpoints - -### 1. Start Mining Job - -- **Endpoint:** `POST /mine` -- **Description:** The node requests the external miner to start searching for a valid nonce within the specified range for the given parameters. -- **Request Body (application/json):** - ```json - { - "job_id": "...", // String (UUID), generated by the node - "mining_hash": "...", // Hex String (H256) - "difficulty": "...", // String (u64 decimal) - "nonce_start": "...", // Hex String (U512 hex) - "nonce_end": "..." // Hex String (U512 hex) - } - ``` -- **Response Body (application/json):** - - Success (200 OK): - ```json - { - "status": "accepted", - "job_id": "..." // String (UUID), confirming the job ID received - } - ``` - - Error (e.g., 400 Bad Request, 500 Internal Server Error): - ```json - { - "status": "rejected", - "reason": "..." // String (Description of error) - } - ``` - -### 2. Get Job Result - -- **Endpoint:** `GET /result/{job_id}` -- **Description:** The node polls the external miner to check the status and retrieve the result of a previously submitted job. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to query. -- **Response Body (application/json):** - - Solution Found (200 OK): - ```json - { - "status": "found", - "job_id": "...", // String (UUID) - "nonce": "...", // Hex String (U512 hex) - "work": "CAFEBABE01.." // Hex String (H256 hex) - } - ``` - - Still Working (200 OK): - ```json - { - "status": "working", - "job_id": "..." // String (UUID) - } - ``` - - Job Stale/Cancelled (200 OK): Indicates the job is no longer valid (e.g., the node requested cancellation or submitted work for a newer block). - ```json - { - "status": "stale", - "job_id": "..." // String (UUID) - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." // String (UUID) - } - ``` - -### 3. Cancel Mining Job - -- **Endpoint:** `POST /cancel/{job_id}` -- **Description:** The node requests the external miner to stop working on a specific job. This is typically used when the node receives a new block or its mining parameters change, making the old job obsolete. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to cancel. -- **Request Body:** (Empty) -- **Response Body (application/json):** - - Success (200 OK): - ```json - { - "status": "cancelled", - "job_id": "..." // String (UUID) - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." // String (UUID) - } - ``` +See the `quantus-miner-api` crate for the canonical Rust definitions. + +### MinerMessage (Enum) + +```rust +pub enum MinerMessage { + Ready, // Miner → Node: establish stream + NewJob(MiningRequest), // Node → Miner: submit job + JobResult(MiningResult), // Miner → Node: return result +} +``` + +### MiningRequest + +| Field | Type | Description | +|-------|------|-------------| +| `job_id` | String | Unique identifier (UUID recommended) | +| `mining_hash` | String | Header hash (64 hex chars, no 0x prefix) | +| `distance_threshold` | String | Difficulty (U512 as decimal string) | + +Note: Nonce range is not specified - each miner independently selects a random starting point. + +### MiningResult + +| Field | Type | Description | +|-------|------|-------------| +| `status` | ApiResponseStatus | Result status (see below) | +| `job_id` | String | Job identifier | +| `nonce` | Option | Winning nonce (U512 hex, no 0x prefix) | +| `work` | Option | Winning nonce as bytes (128 hex chars) | +| `hash_count` | u64 | Number of nonces checked | +| `elapsed_time` | f64 | Time spent mining (seconds) | +| `miner_id` | Option | Miner ID (set by node, not miner) | + +### ApiResponseStatus (Enum) + +| Value | Description | +|-------|-------------| +| `completed` | Valid nonce found | +| `failed` | Nonce range exhausted without finding solution | +| `cancelled` | Job was cancelled (new job received) | +| `running` | Job still in progress (not typically sent) | + +## Protocol Flow + +### Normal Mining Flow + +``` +Miner Node + │ │ + │──── QUIC Connect ─────────────────────────►│ + │◄─── Connection Established ────────────────│ + │ │ + │──── Ready ────────────────────────────────►│ (establish stream) + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │ (picks random nonce, starts mining) │ + │ │ + │──── JobResult { job_id: "abc", ... } ─────►│ (found solution!) + │ │ + │ (node submits block, gets new work) │ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ + │ │ +``` + +### Job Cancellation (Implicit) + +When a new block arrives before the miner finds a solution, the node simply sends a new `NewJob`. The miner automatically cancels the previous job: + +``` +Miner Node + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │ (mining "abc") │ + │ │ + │ (new block arrives at node!) │ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ + │ │ + │ (cancels "abc", starts "def") │ + │ │ + │──── JobResult { job_id: "def", ... } ─────►│ +``` + +### Miner Connect During Active Job + +When a miner connects while a job is active, it immediately receives the current job: + +``` +Miner (new) Node + │ │ (already mining job "abc") + │──── QUIC Connect ─────────────────────────►│ + │◄─── Connection Established ────────────────│ + │ │ + │──── Ready ────────────────────────────────►│ (establish stream) + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ (current job sent immediately) + │ │ + │ (joins mining effort) │ +``` + +### Stale Result Handling + +If a result arrives for an old job, the node discards it: + +``` +Miner Node + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ (almost simultaneous) + │ │ + │──── JobResult { job_id: "abc", ... } ─────►│ (stale, node ignores) + │ │ + │──── JobResult { job_id: "def", ... } ─────►│ (current, node uses) +``` + +## Configuration + +### Node + +```bash +# Listen for external miner connections on port 9833 +quantus-node --miner-listen-port 9833 +``` + +### Miner + +```bash +# Connect to node +quantus-miner serve --node-addr 127.0.0.1:9833 +``` + +## TLS Configuration + +The node generates a self-signed TLS certificate at startup. The miner skips certificate verification by default (insecure mode). For production deployments, consider: + +1. **Certificate pinning**: Configure the miner to accept only specific certificate fingerprints +2. **Proper CA**: Use certificates signed by a trusted CA +3. **Network isolation**: Run node and miner on a private network + +## Error Handling + +### Connection Loss + +The miner automatically reconnects with exponential backoff: +- Initial delay: 1 second +- Maximum delay: 30 seconds + +The node continues operating with remaining connected miners. + +### Validation Errors + +If the miner receives an invalid `MiningRequest`, it sends a `JobResult` with status `failed`. ## Notes -- The external miner should iterate from `nonce_start` up to and including `nonce_end` when searching for a valid nonce. -- The miner should return the `nonce` and the calculated `work` hash when a solution is found. -- The node uses the returned `nonce` and `work` (along with the fetched `difficulty`) to construct the `QPoWSeal` and submit it. -- The external miner should not need to know anything about the runtime or the code; it only needs to perform the nonce search and return the results. \ No newline at end of file +- All hex values should be sent **without** the `0x` prefix +- The miner implements validation logic from `qpow_math::is_valid_nonce` +- The node uses the `work` field from `MiningResult` to construct `QPoWSeal` +- ALPN protocol identifier: `quantus-miner` +- Each miner independently generates a random nonce starting point using cryptographically secure randomness +- With a 512-bit nonce space, collision between miners is statistically impossible diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e730af89..23c601e1 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -27,7 +27,6 @@ log = { workspace = true, default-features = true } names = { workspace = true, default-features = false } qp-dilithium-crypto = { workspace = true, features = ["full_crypto", "serde", "std"] } qp-rusty-crystals-dilithium = { workspace = true } -qp-rusty-crystals-hdwallet = { workspace = true } rand = { workspace = true, default-features = true } regex = { workspace = true } rpassword = { workspace = true } @@ -46,7 +45,6 @@ serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-panic-handler = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } diff --git a/client/consensus/qpow/Cargo.toml b/client/consensus/qpow/Cargo.toml index 7c9c1de2..598bef29 100644 --- a/client/consensus/qpow/Cargo.toml +++ b/client/consensus/qpow/Cargo.toml @@ -18,9 +18,6 @@ prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = false } sc-consensus = { workspace = true } sc-service = { workspace = true, default-features = false } -scale-info = { workspace = true, default-features = false } -sha2.workspace = true -sha3.workspace = true sp-api = { workspace = true, default-features = false } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = false } @@ -37,9 +34,6 @@ default = ["std"] std = [ "codec/std", "primitive-types/std", - "scale-info/std", - "sha2/std", - "sha3/std", "sp-api/std", "sp-consensus-pow/std", "sp-consensus-qpow/std", diff --git a/client/consensus/qpow/src/lib.rs b/client/consensus/qpow/src/lib.rs index 8fc459ee..4a8e36ac 100644 --- a/client/consensus/qpow/src/lib.rs +++ b/client/consensus/qpow/src/lib.rs @@ -10,9 +10,9 @@ use sp_consensus_qpow::QPoWApi; use sp_runtime::{generic::BlockId, traits::Block as BlockT, AccountId32}; use std::{sync::Arc, time::Duration}; -use crate::worker::UntilImportedOrTimeout; -pub use crate::worker::{MiningBuild, MiningHandle, MiningMetadata}; -use futures::{Future, StreamExt}; +use crate::worker::UntilImportedOrTransaction; +pub use crate::worker::{MiningBuild, MiningHandle, MiningMetadata, RebuildTrigger}; +use futures::{Future, Stream, StreamExt}; use log::*; use prometheus_endpoint::Registry; use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; @@ -342,6 +342,10 @@ where Ok(BasicQueue::new(verifier, block_import, justification_import, spawner, registry)) } +/// Maximum transaction-triggered rebuilds per second. +/// Hardcoded for now but could be made configurable later. +const MAX_REBUILDS_PER_SEC: u32 = 2; + /// Start the mining worker for QPoW. This function provides the necessary helper functions that can /// be used to implement a miner. However, it does not do the CPU-intensive mining itself. /// @@ -349,11 +353,17 @@ where /// mining metadata and submitting mined blocks, and a future, which must be polled to fill in /// information in the worker. /// -/// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted -/// for blocks being built. This can encode authorship information, or just be a graffiti. +/// The worker will rebuild blocks when: +/// - A new block is imported from the network +/// - New transactions arrive (rate limited to MAX_REBUILDS_PER_SEC) +/// +/// This allows transactions to be included faster since we don't wait for the next block import +/// to rebuild. Mining on a new block vs the old block has the same probability of success per +/// nonce, so the only cost is the overhead of rebuilding (which is minimal compared to mining +/// time). #[allow(clippy::too_many_arguments)] #[allow(clippy::type_complexity)] -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport, client: Arc, select_chain: S, @@ -362,7 +372,7 @@ pub fn start_mining_worker( justification_sync_link: L, rewards_address: AccountId32, create_inherent_data_providers: CIDP, - timeout: Duration, + tx_notifications: TxStream, build_time: Duration, ) -> (MiningHandle>::Proof>, impl Future) where @@ -381,17 +391,22 @@ where SO: SyncOracle + Clone + Send + Sync + 'static, L: JustificationSyncLink, CIDP: CreateInherentDataProviders, + TxHash: Send + 'static, + TxStream: Stream + Send + Unpin + 'static, { - let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); + let mut trigger_stream = UntilImportedOrTransaction::new( + client.import_notification_stream(), + tx_notifications, + MAX_REBUILDS_PER_SEC, + ); let worker = MiningHandle::new(client.clone(), block_import, justification_sync_link); let worker_ret = worker.clone(); let task = async move { - loop { - if timer.next().await.is_none() { - break; - } - + // Main block building loop - runs until trigger stream closes + // Wait for a trigger (Initial, BlockImported, or NewTransactions) + // continue skips to the next iteration to wait for another trigger + while let Some(trigger) = trigger_stream.next().await { if sync_oracle.is_major_syncing() { debug!(target: LOG_TARGET, "Skipping proposal due to sync."); worker.on_major_syncing(); @@ -412,7 +427,9 @@ where }; let best_hash = best_header.hash(); - if worker.best_hash() == Some(best_hash) { + // Skip redundant block import triggers if we're already building on this hash. + // Initial and NewTransactions triggers should proceed to rebuild. + if trigger == RebuildTrigger::BlockImported && worker.best_hash() == Some(best_hash) { continue; } diff --git a/client/consensus/qpow/src/worker.rs b/client/consensus/qpow/src/worker.rs index 7fd05f96..ca41aa95 100644 --- a/client/consensus/qpow/src/worker.rs +++ b/client/consensus/qpow/src/worker.rs @@ -31,6 +31,7 @@ use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChange use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Proposal}; use sp_consensus_pow::{Seal, POW_ENGINE_ID}; +use sp_consensus_qpow::QPoWApi; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, AccountId32, DigestItem, @@ -82,6 +83,7 @@ impl MiningHandle where Block: BlockT, AC: ProvideRuntimeApi, + AC::Api: QPoWApi, L: sc_consensus::JustificationSyncLink, { fn increment_version(&self) { @@ -133,6 +135,39 @@ where self.build.lock().as_ref().map(|b| b.metadata.clone()) } + /// Verify a seal without consuming the build. + /// + /// Returns `true` if the seal is valid for the current block, `false` otherwise. + /// Returns `false` if there's no current build. + pub fn verify_seal(&self, seal: &Seal) -> bool { + let build = self.build.lock(); + let build = match build.as_ref() { + Some(b) => b, + None => return false, + }; + + // Convert seal to nonce [u8; 64] + let nonce: [u8; 64] = match seal.as_slice().try_into() { + Ok(arr) => arr, + Err(_) => { + warn!(target: LOG_TARGET, "Seal does not have exactly 64 bytes"); + return false; + }, + }; + + let pre_hash = build.metadata.pre_hash.0; + let best_hash = build.metadata.best_hash; + + // Verify using runtime API + match self.client.runtime_api().verify_nonce_local_mining(best_hash, pre_hash, nonce) { + Ok(valid) => valid, + Err(e) => { + warn!(target: LOG_TARGET, "Runtime API error verifying seal: {:?}", e); + false + }, + } + } + /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. #[allow(clippy::await_holding_lock)] @@ -198,51 +233,117 @@ where } } -/// A stream that waits for a block import or timeout. -pub struct UntilImportedOrTimeout { +/// Reason why the stream fired - either a block was imported or enough transactions arrived. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RebuildTrigger { + /// Initial trigger to bootstrap mining (fires once on first poll). + Initial, + /// A new block was imported from the network. + BlockImported, + /// Enough new transactions arrived to trigger a rebuild. + NewTransactions, +} + +/// A stream that waits for a block import or new transactions (with rate limiting). +/// +/// This enables block producers to include new transactions faster by rebuilding +/// the block being mined when transactions arrive, rather than waiting for the +/// next block import or timeout. +/// +/// Rate limiting prevents excessive rebuilds - we limit to `max_rebuilds_per_sec`. +pub struct UntilImportedOrTransaction { + /// Block import notifications stream. import_notifications: ImportNotifications, - timeout: Duration, - inner_delay: Option, + /// Transaction pool import notifications stream. + tx_notifications: Pin + Send>>, + /// Minimum interval between transaction-triggered rebuilds. + min_rebuild_interval: Duration, + /// Rate limit delay - if set, we're waiting before we can fire again. + rate_limit_delay: Option, + /// Whether we've fired the initial trigger yet. + initial_fired: bool, + /// Whether we have pending transactions waiting to trigger a rebuild. + has_pending_tx: bool, } -impl UntilImportedOrTimeout { - /// Create a new stream using the given import notification and timeout duration. - pub fn new(import_notifications: ImportNotifications, timeout: Duration) -> Self { - Self { import_notifications, timeout, inner_delay: None } +impl UntilImportedOrTransaction { + /// Create a new stream. + /// + /// # Arguments + /// * `import_notifications` - Stream of block import notifications + /// * `tx_notifications` - Stream of transaction import notifications + /// * `max_rebuilds_per_sec` - Maximum transaction-triggered rebuilds per second + pub fn new( + import_notifications: ImportNotifications, + tx_notifications: impl Stream + Send + 'static, + max_rebuilds_per_sec: u32, + ) -> Self { + let min_rebuild_interval = if max_rebuilds_per_sec > 0 { + Duration::from_millis(1000 / max_rebuilds_per_sec as u64) + } else { + Duration::from_secs(u64::MAX) // Effectively disable tx-triggered rebuilds + }; + + Self { + import_notifications, + tx_notifications: Box::pin(tx_notifications), + min_rebuild_interval, + rate_limit_delay: None, + initial_fired: false, + has_pending_tx: false, + } } } -impl Stream for UntilImportedOrTimeout { - type Item = (); +impl Stream for UntilImportedOrTransaction { + type Item = RebuildTrigger; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut fire = false; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // Fire immediately on first poll to bootstrap mining at genesis + if !self.initial_fired { + self.initial_fired = true; + debug!(target: LOG_TARGET, "Initial trigger, bootstrapping block production"); + return Poll::Ready(Some(RebuildTrigger::Initial)); + } - loop { - match Stream::poll_next(Pin::new(&mut self.import_notifications), cx) { - Poll::Pending => break, - Poll::Ready(Some(_)) => { - fire = true; + // Check for block imports first - these always trigger immediately + if let Poll::Ready(notification) = + Stream::poll_next(Pin::new(&mut self.import_notifications), cx) + { + match notification { + Some(_) => { + // Block import resets pending state since we'll build fresh + self.has_pending_tx = false; + self.rate_limit_delay = None; + debug!(target: LOG_TARGET, "Block imported, triggering rebuild"); + return Poll::Ready(Some(RebuildTrigger::BlockImported)); }, - Poll::Ready(None) => return Poll::Ready(None), + None => return Poll::Ready(None), } } - let timeout = self.timeout; - let inner_delay = self.inner_delay.get_or_insert_with(|| Delay::new(timeout)); - - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => (), - Poll::Ready(()) => { - fire = true; - }, + // Drain all pending transaction notifications + while let Poll::Ready(Some(_)) = Stream::poll_next(Pin::new(&mut self.tx_notifications), cx) + { + self.has_pending_tx = true; } - if fire { - self.inner_delay = None; - Poll::Ready(Some(())) - } else { - Poll::Pending + // If we have pending transactions, check rate limit + if self.has_pending_tx { + // Check if rate limit allows firing (no delay or delay expired) + let can_fire = match self.rate_limit_delay.as_mut() { + None => true, + Some(delay) => Future::poll(Pin::new(delay), cx).is_ready(), + }; + + if can_fire { + self.has_pending_tx = false; + self.rate_limit_delay = Some(Delay::new(self.min_rebuild_interval)); + debug!(target: LOG_TARGET, "New transaction(s), triggering rebuild"); + return Poll::Ready(Some(RebuildTrigger::NewTransactions)); + } } + + Poll::Pending } } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 5e00f677..5616dabf 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -39,7 +39,6 @@ libp2p-identity = { workspace = true, features = ["dilithium"] } linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } -once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } partial_sort = { workspace = true } pin-project = { workspace = true } diff --git a/miner-api/Cargo.toml b/miner-api/Cargo.toml index 142365a8..0f532cb7 100644 --- a/miner-api/Cargo.toml +++ b/miner-api/Cargo.toml @@ -16,3 +16,5 @@ version = "0.0.3" [dependencies] serde = { workspace = true, features = ["alloc", "derive"] } +serde_json = { workspace = true, features = ["std"] } +tokio = { workspace = true, features = ["io-util"] } diff --git a/miner-api/src/lib.rs b/miner-api/src/lib.rs index 869c869b..6c15fc34 100644 --- a/miner-api/src/lib.rs +++ b/miner-api/src/lib.rs @@ -1,4 +1,8 @@ use serde::{Deserialize, Serialize}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +/// Maximum message size (16 MB) to prevent memory exhaustion attacks. +pub const MAX_MESSAGE_SIZE: u32 = 16 * 1024 * 1024; /// Status codes returned in API responses. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] @@ -13,18 +17,74 @@ pub enum ApiResponseStatus { Error, } -/// Request payload sent from Node to Miner (`/mine` endpoint). +/// QUIC protocol messages exchanged between node and miner. +/// +/// The protocol is: +/// - Miner sends `Ready` immediately after connecting to establish the stream +/// - Node sends `NewJob` to submit a mining job (implicitly cancels any previous job) +/// - Miner sends `JobResult` when mining completes +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum MinerMessage { + /// Miner → Node: Sent immediately after connecting to establish the stream. + /// This is required because QUIC streams are lazily initialized. + Ready, + + /// Node → Miner: Submit a new mining job. + /// If a job is already running, it will be cancelled and replaced. + NewJob(MiningRequest), + + /// Miner → Node: Mining result (completed, failed, or cancelled). + JobResult(MiningResult), +} + +/// Write a length-prefixed JSON message to an async writer. +/// +/// Wire format: 4-byte big-endian length prefix followed by JSON payload. +pub async fn write_message( + writer: &mut W, + msg: &MinerMessage, +) -> std::io::Result<()> { + let json = serde_json::to_vec(msg) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + let len = json.len() as u32; + writer.write_all(&len.to_be_bytes()).await?; + writer.write_all(&json).await?; + Ok(()) +} + +/// Read a length-prefixed JSON message from an async reader. +/// +/// Wire format: 4-byte big-endian length prefix followed by JSON payload. +/// Returns an error if the message exceeds MAX_MESSAGE_SIZE. +pub async fn read_message(reader: &mut R) -> std::io::Result { + let mut len_buf = [0u8; 4]; + reader.read_exact(&mut len_buf).await?; + let len = u32::from_be_bytes(len_buf); + + if len > MAX_MESSAGE_SIZE { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Message size {} exceeds maximum {}", len, MAX_MESSAGE_SIZE), + )); + } + + let mut buf = vec![0u8; len as usize]; + reader.read_exact(&mut buf).await?; + serde_json::from_slice(&buf) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) +} + +/// Request payload sent from Node to Miner. +/// +/// The miner will choose its own random starting nonce, enabling multiple +/// miners to work on the same job without coordination. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiningRequest { pub job_id: String, /// Hex encoded header hash (32 bytes -> 64 chars, no 0x prefix) pub mining_hash: String, - /// Distance threshold (u64 as string) + /// Distance threshold (U512 as decimal string) pub distance_threshold: String, - /// Hex encoded start nonce (U512 -> 128 chars, no 0x prefix) - pub nonce_start: String, - /// Hex encoded end nonce (U512 -> 128 chars, no 0x prefix) - pub nonce_end: String, } /// Response payload for job submission (`/mine`) and cancellation (`/cancel`). @@ -48,4 +108,7 @@ pub struct MiningResult { pub work: Option, pub hash_count: u64, pub elapsed_time: f64, + /// Miner ID assigned by the node (set server-side, not by the miner). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub miner_id: Option, } diff --git a/node/Cargo.toml b/node/Cargo.toml index 271a7813..9df4bb9e 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -33,16 +33,17 @@ pallet-transaction-payment-rpc.default-features = true pallet-transaction-payment-rpc.workspace = true prometheus.workspace = true qp-dilithium-crypto = { workspace = true } -qp-rusty-crystals-dilithium.workspace = true qp-rusty-crystals-hdwallet.workspace = true qpow-math.workspace = true quantus-miner-api = { workspace = true } quantus-runtime.workspace = true +quinn = "0.10" rand = { workspace = true, default-features = false, features = [ "alloc", "getrandom", ] } -reqwest = { workspace = true, default-features = false, features = ["json"] } +rcgen = "0.11" +rustls = { version = "0.21", default-features = false, features = ["dangerous_configuration", "quic"] } sc-basic-authorship.default-features = true sc-basic-authorship.workspace = true sc-cli.default-features = true @@ -56,6 +57,8 @@ sc-executor.default-features = true sc-executor.workspace = true sc-network.default-features = true sc-network.workspace = true +sc-network-sync.default-features = true +sc-network-sync.workspace = true sc-offchain.default-features = true sc-offchain.workspace = true sc-service.default-features = true @@ -93,7 +96,6 @@ sp-timestamp.workspace = true substrate-frame-rpc-system.default-features = true substrate-frame-rpc-system.workspace = true tokio-util.workspace = true -uuid.workspace = true [build-dependencies] qp-wormhole-circuit-builder.workspace = true @@ -115,6 +117,7 @@ std = [ "serde_json/std", "sp-consensus-qpow/std", ] +tx-logging = [] # Enable transaction pool logging for debugging # Dependencies that are only required if runtime benchmarking should be build. runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/node/src/cli.rs b/node/src/cli.rs index b9d3003a..f52ff1d2 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -13,9 +13,10 @@ pub struct Cli { #[arg(long, value_name = "REWARDS_ADDRESS")] pub rewards_address: Option, - /// Specify the URL of an external QPoW miner service - #[arg(long, value_name = "EXTERNAL_MINER_URL")] - pub external_miner_url: Option, + /// Port to listen for external miner connections (e.g., 9833). + /// When set, the node will wait for miners to connect instead of mining locally. + #[arg(long, value_name = "PORT")] + pub miner_listen_port: Option, /// Enable peer sharing via RPC endpoint #[arg(long)] diff --git a/node/src/command.rs b/node/src/command.rs index 654d8506..b44d75ac 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -480,9 +480,7 @@ pub fn run() -> sc_cli::Result<()> { quantus_runtime::opaque::Block, ::Hash, >, - >( - config, rewards_account, cli.external_miner_url.clone(), cli.enable_peer_sharing - ) + >(config, rewards_account, cli.miner_listen_port, cli.enable_peer_sharing) .map_err(sc_cli::Error::Service) }) }, diff --git a/node/src/external_miner_client.rs b/node/src/external_miner_client.rs deleted file mode 100644 index 49d7f764..00000000 --- a/node/src/external_miner_client.rs +++ /dev/null @@ -1,110 +0,0 @@ -use quantus_miner_api::{ApiResponseStatus, MiningRequest, MiningResponse, MiningResult}; -/// Functions to interact with the external miner service -use reqwest::Client; -use sp_core::{H256, U512}; - -// Make functions pub(crate) or pub as needed -pub(crate) async fn submit_mining_job( - client: &Client, - miner_url: &str, - job_id: &str, - mining_hash: &H256, - distance_threshold: U512, - nonce_start: U512, - nonce_end: U512, -) -> Result<(), String> { - let request = MiningRequest { - job_id: job_id.to_string(), - mining_hash: hex::encode(mining_hash.as_bytes()), - distance_threshold: distance_threshold.to_string(), - nonce_start: format!("{:0128x}", nonce_start), - nonce_end: format!("{:0128x}", nonce_end), - }; - - let response = client - .post(format!("{}/mine", miner_url)) - .json(&request) - .send() - .await - .map_err(|e| format!("Failed to send mining request: {}", e))?; - - let result: MiningResponse = response - .json() - .await - .map_err(|e| format!("Failed to parse mining response: {}", e))?; - - if result.status != ApiResponseStatus::Accepted { - return Err(format!("Mining job was not accepted: {:?}", result.status)); - } - - Ok(()) -} - -pub(crate) async fn check_mining_result( - client: &Client, - miner_url: &str, - job_id: &str, -) -> Result, String> { - let response = client - .get(format!("{}/result/{}", miner_url, job_id)) - .send() - .await - .map_err(|e| format!("Failed to check mining result: {}", e))?; - - let result: MiningResult = response - .json() - .await - .map_err(|e| format!("Failed to parse mining result: {}", e))?; - - match result.status { - ApiResponseStatus::Completed => - if let Some(work_hex) = result.work { - let nonce_bytes = hex::decode(&work_hex) - .map_err(|e| format!("Failed to decode work hex '{}': {}", work_hex, e))?; - if nonce_bytes.len() == 64 { - let mut nonce = [0u8; 64]; - nonce.copy_from_slice(&nonce_bytes); - Ok(Some(nonce)) - } else { - Err(format!( - "Invalid decoded work length: {} bytes (expected 64)", - nonce_bytes.len() - )) - } - } else { - Err("Missing 'work' field in completed mining result".to_string()) - }, - ApiResponseStatus::Running => Ok(None), - ApiResponseStatus::NotFound => Err("Mining job not found".to_string()), - ApiResponseStatus::Failed => Err("Mining job failed (miner reported)".to_string()), - ApiResponseStatus::Cancelled => - Err("Mining job was cancelled (miner reported)".to_string()), - ApiResponseStatus::Error => Err("Miner reported an unspecified error".to_string()), - ApiResponseStatus::Accepted => - Err("Unexpected 'Accepted' status received from result endpoint".to_string()), - } -} - -pub(crate) async fn cancel_mining_job( - client: &Client, - miner_url: &str, - job_id: &str, -) -> Result<(), String> { - let response = client - .post(format!("{}/cancel/{}", miner_url, job_id)) - .send() - .await - .map_err(|e| format!("Failed to cancel mining job: {}", e))?; - - let result: MiningResponse = response - .json() - .await - .map_err(|e| format!("Failed to parse cancel response: {}", e))?; - - if result.status == ApiResponseStatus::Cancelled || result.status == ApiResponseStatus::NotFound - { - Ok(()) - } else { - Err(format!("Failed to cancel mining job (unexpected status): {:?}", result.status)) - } -} diff --git a/node/src/main.rs b/node/src/main.rs index f1fb0e64..f0627141 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -5,7 +5,7 @@ mod benchmarking; mod chain_spec; mod cli; mod command; -mod external_miner_client; +mod miner_server; mod prometheus; mod rpc; mod service; diff --git a/node/src/miner_server.rs b/node/src/miner_server.rs new file mode 100644 index 00000000..17951bf9 --- /dev/null +++ b/node/src/miner_server.rs @@ -0,0 +1,348 @@ +//! QUIC server for accepting connections from external miners. +//! +//! This module provides a QUIC server that miners connect to. It supports +//! multiple concurrent miners, broadcasting jobs to all connected miners +//! and collecting results. +//! +//! # Architecture +//! +//! ```text +//! ┌──────────┐ +//! │ Miner 1 │ ────┐ +//! └──────────┘ │ +//! │ ┌─────────────────┐ +//! ┌──────────┐ ├────>│ MinerServer │ +//! │ Miner 2 │ ────┤ │ (QUIC Server) │ +//! └──────────┘ │ └─────────────────┘ +//! │ +//! ┌──────────┐ │ +//! │ Miner 3 │ ────┘ +//! └──────────┘ +//! ``` +//! +//! # Protocol +//! +//! - Node sends `MinerMessage::NewJob` to all connected miners +//! - Each miner independently selects a random nonce starting point +//! - First miner to find a valid solution sends `MinerMessage::JobResult` +//! - When a new job is broadcast, miners implicitly cancel their current work + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use jsonrpsee::tokio; +use quantus_miner_api::{read_message, write_message, MinerMessage, MiningRequest, MiningResult}; +use tokio::sync::{mpsc, RwLock}; + +/// A QUIC server that accepts connections from miners. +pub struct MinerServer { + /// Connected miners, keyed by unique ID. + miners: Arc>>, + /// Channel to receive results from any miner. + result_rx: tokio::sync::Mutex>, + /// Sender cloned to each miner connection handler. + result_tx: mpsc::Sender, + /// Current job being mined (sent to newly connecting miners). + current_job: Arc>>, + /// Counter for assigning unique miner IDs. + next_miner_id: AtomicU64, +} + +/// Handle for communicating with a connected miner. +struct MinerHandle { + /// Channel to send jobs to this miner. + job_tx: mpsc::Sender, +} + +impl MinerServer { + /// Start the QUIC server and listen for miner connections. + /// + /// This spawns a background task that accepts incoming connections. + pub async fn start(port: u16) -> Result, String> { + let (result_tx, result_rx) = mpsc::channel::(64); + + let server = Arc::new(Self { + miners: Arc::new(RwLock::new(HashMap::new())), + result_rx: tokio::sync::Mutex::new(result_rx), + result_tx, + current_job: Arc::new(RwLock::new(None)), + next_miner_id: AtomicU64::new(1), + }); + + // Start the acceptor task + let server_clone = server.clone(); + let endpoint = create_server_endpoint(port).await?; + + tokio::spawn(async move { + acceptor_task(endpoint, server_clone).await; + }); + + log::info!("⛏️ Miner server listening on port {}", port); + + Ok(server) + } + + /// Broadcast a job to all connected miners. + /// + /// This also stores the job so newly connecting miners receive it. + pub async fn broadcast_job(&self, job: MiningRequest) { + // Store as current job for new miners + { + let mut current = self.current_job.write().await; + *current = Some(job.clone()); + } + + // Send to all connected miners + let miners = self.miners.read().await; + let miner_count = miners.len(); + + if miner_count == 0 { + log::debug!("No miners connected, job queued for when miners connect"); + return; + } + + log::debug!("Broadcasting job {} to {} miner(s)", job.job_id, miner_count); + + for (id, handle) in miners.iter() { + if let Err(e) = handle.job_tx.try_send(job.clone()) { + log::warn!("Failed to send job to miner {}: {}", id, e); + } + } + } + + /// Wait for a mining result with a timeout. + pub async fn recv_result_timeout(&self, timeout: Duration) -> Option { + let mut rx = self.result_rx.lock().await; + tokio::time::timeout(timeout, rx.recv()).await.ok().flatten() + } + + /// Add a new miner connection. + async fn add_miner(&self, job_tx: mpsc::Sender) -> u64 { + let id = self.next_miner_id.fetch_add(1, Ordering::Relaxed); + let handle = MinerHandle { job_tx }; + + self.miners.write().await.insert(id, handle); + + log::info!("⛏️ Miner {} connected (total: {})", id, self.miners.read().await.len()); + + id + } + + /// Remove a miner connection. + async fn remove_miner(&self, id: u64) { + self.miners.write().await.remove(&id); + log::info!("⛏️ Miner {} disconnected (total: {})", id, self.miners.read().await.len()); + } + + /// Get the current job (if any) for newly connecting miners. + async fn get_current_job(&self) -> Option { + self.current_job.read().await.clone() + } +} + +/// Create a QUIC server endpoint with self-signed certificate. +async fn create_server_endpoint(port: u16) -> Result { + // Generate self-signed certificate + let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]) + .map_err(|e| format!("Failed to generate certificate: {}", e))?; + + let cert_der = cert + .serialize_der() + .map_err(|e| format!("Failed to serialize certificate: {}", e))?; + let key_der = cert.serialize_private_key_der(); + + let cert_chain = vec![rustls::Certificate(cert_der)]; + let key = rustls::PrivateKey(key_der); + + // Create server config + let mut server_config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + .with_single_cert(cert_chain, key) + .map_err(|e| format!("Failed to create server config: {}", e))?; + + // Set ALPN protocol + server_config.alpn_protocols = vec![b"quantus-miner".to_vec()]; + + let mut quinn_config = quinn::ServerConfig::with_crypto(Arc::new(server_config)); + + // Set transport config + let mut transport_config = quinn::TransportConfig::default(); + transport_config.keep_alive_interval(Some(Duration::from_secs(10))); + transport_config.max_idle_timeout(Some(Duration::from_secs(60).try_into().unwrap())); + quinn_config.transport_config(Arc::new(transport_config)); + + // Create endpoint + let addr = format!("0.0.0.0:{}", port).parse().unwrap(); + let endpoint = quinn::Endpoint::server(quinn_config, addr) + .map_err(|e| format!("Failed to create server endpoint: {}", e))?; + + Ok(endpoint) +} + +/// Background task that accepts incoming miner connections. +async fn acceptor_task(endpoint: quinn::Endpoint, server: Arc) { + log::debug!("Acceptor task started"); + + while let Some(connecting) = endpoint.accept().await { + let server = server.clone(); + + tokio::spawn(async move { + match connecting.await { + Ok(connection) => { + log::debug!("New QUIC connection from {:?}", connection.remote_address()); + handle_miner_connection(connection, server).await; + }, + Err(e) => { + log::warn!("Failed to accept connection: {}", e); + }, + } + }); + } + + log::info!("Acceptor task stopped"); +} + +/// Handle a single miner connection. +async fn handle_miner_connection(connection: quinn::Connection, server: Arc) { + let addr = connection.remote_address(); + log::info!("⛏️ New miner connection from {}", addr); + log::debug!("Waiting for miner {} to open bidirectional stream...", addr); + + // Accept bidirectional stream from miner + let (send, recv) = match connection.accept_bi().await { + Ok(streams) => { + log::info!("⛏️ Stream accepted from miner {}", addr); + streams + }, + Err(e) => { + log::warn!("Failed to accept stream from {}: {}", addr, e); + return; + }, + }; + + // Create channel for sending jobs to this miner + let (job_tx, job_rx) = mpsc::channel::(16); + + // Register miner + let miner_id = server.add_miner(job_tx).await; + + // Send current job if there is one + if let Some(job) = server.get_current_job().await { + log::debug!("Sending current job {} to newly connected miner {}", job.job_id, miner_id); + // We'll send it through the connection handler below + } + + // Handle the connection + let result = connection_handler( + miner_id, + send, + recv, + job_rx, + server.result_tx.clone(), + server.get_current_job().await, + ) + .await; + + if let Err(e) = result { + log::debug!("Miner {} connection ended: {}", miner_id, e); + } + + // Unregister miner + server.remove_miner(miner_id).await; +} + +/// Handle communication with a single miner. +async fn connection_handler( + miner_id: u64, + mut send: quinn::SendStream, + mut recv: quinn::RecvStream, + mut job_rx: mpsc::Receiver, + result_tx: mpsc::Sender, + initial_job: Option, +) -> Result<(), String> { + // Wait for Ready message from miner (required to establish the stream) + log::debug!("Waiting for Ready message from miner {}...", miner_id); + match read_message(&mut recv).await { + Ok(MinerMessage::Ready) => { + log::debug!("Received Ready from miner {}", miner_id); + }, + Ok(other) => { + log::warn!("Expected Ready from miner {}, got {:?}", miner_id, other); + return Err("Protocol error: expected Ready message".to_string()); + }, + Err(e) => { + return Err(format!("Failed to read Ready message: {}", e)); + }, + } + + // Send initial job if there is one + if let Some(job) = initial_job { + log::debug!("Sending initial job {} to miner {}", job.job_id, miner_id); + let msg = MinerMessage::NewJob(job); + write_message(&mut send, &msg) + .await + .map_err(|e| format!("Failed to send initial job: {}", e))?; + } + + loop { + tokio::select! { + // Prioritize reading to detect disconnection faster + biased; + + // Receive results from miner + msg_result = read_message(&mut recv) => { + match msg_result { + Ok(MinerMessage::JobResult(mut result)) => { + log::info!( + "⛏️ Received result from miner {}: job_id={}, status={:?}", + miner_id, + result.job_id, + result.status + ); + // Tag the result with the miner ID + result.miner_id = Some(miner_id); + if result_tx.send(result).await.is_err() { + return Err("Result channel closed".to_string()); + } + } + Ok(MinerMessage::Ready) => { + log::debug!("Ignoring duplicate Ready from miner {}", miner_id); + } + Ok(MinerMessage::NewJob(_)) => { + log::warn!("Received unexpected NewJob from miner {}", miner_id); + } + Err(e) => { + if e.kind() == std::io::ErrorKind::UnexpectedEof { + return Err("Miner disconnected".to_string()); + } + return Err(format!("Read error: {}", e)); + } + } + } + + // Send jobs to miner + job = job_rx.recv() => { + match job { + Some(job) => { + log::debug!("Sending job {} to miner {}", job.job_id, miner_id); + let msg = MinerMessage::NewJob(job); + if let Err(e) = write_message(&mut send, &msg).await { + return Err(format!("Failed to send job: {}", e)); + } + } + None => { + // Channel closed, shut down + return Ok(()); + } + } + } + } + } +} diff --git a/node/src/prometheus.rs b/node/src/prometheus.rs index e5d834b2..d8e6273d 100644 --- a/node/src/prometheus.rs +++ b/node/src/prometheus.rs @@ -7,9 +7,9 @@ use sp_consensus_qpow::QPoWApi; use sp_core::U512; use std::sync::Arc; -pub struct ResonanceBusinessMetrics; +pub struct BusinessMetrics; -impl ResonanceBusinessMetrics { +impl BusinessMetrics { /// Pack a U512 into an f64 by taking the highest-order 64 bits (8 bytes). fn pack_u512_to_f64(value: U512) -> f64 { // Convert U512 to big-endian bytes (64 bytes) diff --git a/node/src/service.rs b/node/src/service.rs index df6f15bd..9215b035 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -1,20 +1,30 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -use futures::{FutureExt, StreamExt}; +//! +//! This module provides the main service setup for a Quantus node, including: +//! - Network configuration and setup +//! - Transaction pool management +//! - Mining infrastructure (local and external miner support) +//! - RPC endpoint configuration + +use futures::FutureExt; +#[cfg(feature = "tx-logging")] +use futures::StreamExt; use quantus_runtime::{self, apis::RuntimeApi, opaque::Block}; use sc_client_api::Backend; -use sc_consensus_qpow::ChainManagement; +use sc_consensus_qpow::{ChainManagement, MiningHandle}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; -use sc_transaction_pool_api::{InPoolTransaction, OffchainTransactionPoolFactory, TransactionPool}; +#[cfg(feature = "tx-logging")] +use sc_transaction_pool_api::InPoolTransaction; +use sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool}; use sp_inherents::CreateInherentDataProviders; use tokio_util::sync::CancellationToken; -use crate::{external_miner_client, prometheus::ResonanceBusinessMetrics}; +use crate::{miner_server::MinerServer, prometheus::BusinessMetrics}; use codec::Encode; use jsonrpsee::tokio; -use qpow_math::mine_range; -use reqwest::Client; +use quantus_miner_api::{ApiResponseStatus, MiningRequest, MiningResult}; +use sc_basic_authorship::ProposerFactory; use sc_cli::TransactionPoolType; use sc_transaction_pool::TransactionPoolOptions; use sp_api::ProvideRuntimeApi; @@ -22,11 +32,472 @@ use sp_consensus::SyncOracle; use sp_consensus_qpow::QPoWApi; use sp_core::{crypto::AccountId32, U512}; use std::{sync::Arc, time::Duration}; -use uuid::Uuid; /// Frequency of block import logging. Every 1000 blocks. const LOG_FREQUENCY: u64 = 1000; +// ============================================================================ +// External Mining Helper Functions +// ============================================================================ + +/// Parse a mining result and extract the seal if valid. +fn parse_mining_result(result: &MiningResult, expected_job_id: &str) -> Option> { + let miner_id = result.miner_id.unwrap_or(0); + + // Check job ID matches + if result.job_id != expected_job_id { + log::debug!(target: "miner", "Received stale result from miner {} for job {}, ignoring", miner_id, result.job_id); + return None; + } + + // Check status + if result.status != ApiResponseStatus::Completed { + match result.status { + ApiResponseStatus::Failed => log::warn!("⛏️ Mining job failed (miner {})", miner_id), + ApiResponseStatus::Cancelled => { + log::debug!(target: "miner", "Mining job was cancelled (miner {})", miner_id) + }, + _ => { + log::debug!(target: "miner", "Unexpected result status from miner {}: {:?}", miner_id, result.status) + }, + } + return None; + } + + // Extract and decode work + let work_hex = result.work.as_ref()?; + match hex::decode(work_hex) { + Ok(seal) if seal.len() == 64 => Some(seal), + Ok(seal) => { + log::error!( + "🚨🚨🚨 INVALID SEAL LENGTH FROM MINER {}! Expected 64 bytes, got {} bytes", + miner_id, + seal.len() + ); + None + }, + Err(e) => { + log::error!("🚨🚨🚨 FAILED TO DECODE SEAL HEX FROM MINER {}: {}", miner_id, e); + None + }, + } +} + +/// Wait for a mining result from the miner server. +/// +/// Returns `Some((miner_id, seal))` if a valid 64-byte seal is received, `None` otherwise +/// (interrupted, failed, invalid, or stale). +/// +/// The `should_stop` closure should return `true` if we should stop waiting +/// (e.g., new block arrived or shutdown requested). +/// +/// This function will keep waiting even if all miners disconnect, since newly +/// connecting miners automatically receive the current job and can submit results. +async fn wait_for_mining_result( + server: &Arc, + job_id: &str, + should_stop: F, +) -> Option<(u64, Vec)> +where + F: Fn() -> bool, +{ + loop { + if should_stop() { + return None; + } + + match server.recv_result_timeout(Duration::from_millis(500)).await { + Some(result) => { + let miner_id = result.miner_id.unwrap_or(0); + if let Some(seal) = parse_mining_result(&result, job_id) { + return Some((miner_id, seal)); + } + // Keep waiting for other miners (stale, failed, or invalid parse) + }, + None => { + // Timeout, continue waiting + }, + } + } +} + +// ============================================================================ +// Mining Loop Helpers +// ============================================================================ + +/// Result of attempting to mine with an external miner. +enum ExternalMiningOutcome { + /// Successfully found and imported a seal. + Success, + /// Mining was interrupted (new block, cancellation, or failure). + Interrupted, +} + +/// Handle a single round of external mining. +/// +/// Broadcasts the job to connected miners and waits for results. +/// If a seal fails validation, continues waiting for more seals. +/// Only returns when a seal is successfully imported, or when interrupted. +async fn handle_external_mining( + server: &Arc, + client: &Arc, + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, + cancellation_token: &CancellationToken, + job_counter: &mut u64, + mining_start_time: &mut std::time::Instant, +) -> ExternalMiningOutcome { + let metadata = match worker_handle.metadata() { + Some(m) => m, + None => return ExternalMiningOutcome::Interrupted, + }; + + // Get difficulty from runtime + let difficulty = match client.runtime_api().get_difficulty(metadata.best_hash) { + Ok(d) => d, + Err(e) => { + log::warn!("⛏️ Failed to get difficulty: {:?}", e); + return ExternalMiningOutcome::Interrupted; + }, + }; + + // Create and broadcast job + *job_counter += 1; + let job_id = job_counter.to_string(); + let mining_hash = hex::encode(metadata.pre_hash.as_bytes()); + log::info!( + "⛏️ Broadcasting job {}: pre_hash={}, difficulty={}", + job_id, + mining_hash, + difficulty + ); + let job = MiningRequest { + job_id: job_id.clone(), + mining_hash, + distance_threshold: difficulty.to_string(), + }; + + server.broadcast_job(job).await; + + // Wait for results from miners, retrying on invalid seals + let best_hash = metadata.best_hash; + loop { + let (miner_id, seal) = match wait_for_mining_result(server, &job_id, || { + cancellation_token.is_cancelled() || + worker_handle.metadata().map(|m| m.best_hash != best_hash).unwrap_or(true) + }) + .await + { + Some(result) => result, + None => return ExternalMiningOutcome::Interrupted, + }; + + // Verify the seal before attempting to submit (submit consumes the build) + if !worker_handle.verify_seal(&seal) { + log::error!( + "🚨🚨🚨 INVALID SEAL FROM MINER {}! Job {} - seal failed verification. This may indicate a miner bug or stale work. Continuing to wait for valid seals...", + miner_id, + job_id + ); + continue; + } + + // Seal is valid, submit it + if futures::executor::block_on(worker_handle.submit(seal.clone())) { + let mining_time = mining_start_time.elapsed().as_secs(); + log::info!( + "🥇 Successfully mined and submitted a new block via external miner {} (mining time: {}s)", + miner_id, + mining_time + ); + *mining_start_time = std::time::Instant::now(); + return ExternalMiningOutcome::Success; + } + + // Submit failed for some other reason (should be rare after verify_seal passed) + log::warn!( + "⛏️ Failed to submit verified seal from miner {}, continuing to wait (job {})", + miner_id, + job_id + ); + } +} + +/// Try to find a valid nonce for local mining. +/// +/// Tries 50k nonces from a random starting point, then yields to check for new blocks. +/// With Poseidon2 hashing this takes ~50-100ms, keeping the node responsive. +async fn handle_local_mining( + client: &Arc, + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, +) -> Option> { + let metadata = worker_handle.metadata()?; + let version = worker_handle.version(); + let block_hash = metadata.pre_hash.0; + let difficulty = client.runtime_api().get_difficulty(metadata.best_hash).unwrap_or_else(|e| { + log::warn!("API error getting difficulty: {:?}", e); + U512::zero() + }); + + if difficulty.is_zero() { + return None; + } + + let start_nonce = U512::from(rand::random::()); + let target = U512::MAX / difficulty; + + let found = tokio::task::spawn_blocking(move || { + let mut nonce = start_nonce; + for _ in 0..50_000 { + let nonce_bytes = nonce.to_big_endian(); + if qpow_math::get_nonce_hash(block_hash, nonce_bytes) < target { + return Some(nonce_bytes); + } + nonce = nonce.overflowing_add(U512::one()).0; + } + None + }) + .await + .ok() + .flatten(); + + found.filter(|_| worker_handle.version() == version).map(|nonce| nonce.encode()) +} + +/// Submit a mined seal to the worker handle. +/// +/// Returns `true` if submission was successful, `false` otherwise. +fn submit_mined_block( + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, + seal: Vec, + mining_start_time: &mut std::time::Instant, + source: &str, +) -> bool { + if futures::executor::block_on(worker_handle.submit(seal)) { + let mining_time = mining_start_time.elapsed().as_secs(); + log::info!( + "🥇 Successfully mined and submitted a new block{} (mining time: {}s)", + source, + mining_time + ); + *mining_start_time = std::time::Instant::now(); + true + } else { + log::warn!("⛏️ Failed to submit mined block{}", source); + false + } +} + +/// The main mining loop that coordinates local and external mining. +/// +/// This function runs continuously until the cancellation token is triggered. +/// It handles: +/// - Waiting for sync to complete +/// - Coordinating with external miners (if server is available) +/// - Falling back to local mining +async fn mining_loop( + client: Arc, + worker_handle: MiningHandle>, ()>, + sync_service: Arc>, + miner_server: Option>, + cancellation_token: CancellationToken, +) { + log::info!("⛏️ QPoW Mining task spawned"); + + let mut mining_start_time = std::time::Instant::now(); + let mut job_counter: u64 = 0; + + loop { + if cancellation_token.is_cancelled() { + log::info!("⛏️ QPoW Mining task shutting down gracefully"); + break; + } + + // Don't mine if we're still syncing + if sync_service.is_major_syncing() { + log::debug!(target: "pow", "Mining paused: node is still syncing with network"); + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(5)) => {} + _ = cancellation_token.cancelled() => continue + } + continue; + } + + // Wait for mining metadata to be available + if worker_handle.metadata().is_none() { + log::debug!(target: "pow", "No mining metadata available"); + tokio::select! { + _ = tokio::time::sleep(Duration::from_millis(250)) => {} + _ = cancellation_token.cancelled() => continue + } + continue; + } + + if let Some(ref server) = miner_server { + // External mining path + handle_external_mining( + server, + &client, + &worker_handle, + &cancellation_token, + &mut job_counter, + &mut mining_start_time, + ) + .await; + } else if let Some(seal) = handle_local_mining(&client, &worker_handle).await { + // Local mining path + submit_mined_block(&worker_handle, seal, &mut mining_start_time, ""); + } + + // Yield to let other async tasks run + tokio::task::yield_now().await; + } + + log::info!("⛏️ QPoW Mining task terminated"); +} + +/// Spawn the transaction logger task. +/// +/// This task logs transactions as they are added to the pool. +/// Only available when the `tx-logging` feature is enabled. +#[cfg(feature = "tx-logging")] +fn spawn_transaction_logger( + task_manager: &TaskManager, + transaction_pool: Arc>, + tx_stream: impl futures::Stream + Send + 'static, +) { + task_manager.spawn_handle().spawn("tx-logger", None, async move { + let tx_stream = tx_stream; + futures::pin_mut!(tx_stream); + while let Some(tx_hash) = tx_stream.next().await { + if let Some(tx) = transaction_pool.ready_transaction(&tx_hash) { + log::trace!(target: "miner", "New transaction: Hash = {:?}", tx_hash); + let extrinsic = tx.data(); + log::trace!(target: "miner", "Payload: {:?}", extrinsic); + } else { + log::warn!("⛏️ Transaction {:?} not found in pool", tx_hash); + } + } + }); +} + +/// Spawn all authority-related tasks (mining, metrics, transaction logging). +/// +/// This is only called when the node is running as an authority (block producer). +#[allow(clippy::too_many_arguments)] +fn spawn_authority_tasks( + task_manager: &mut TaskManager, + client: Arc, + transaction_pool: Arc>, + select_chain: FullSelectChain, + pow_block_import: PowBlockImport, + sync_service: Arc>, + prometheus_registry: Option, + rewards_address: AccountId32, + miner_listen_port: Option, + tx_stream_for_worker: impl futures::Stream + Send + Unpin + 'static, + #[cfg(feature = "tx-logging")] tx_stream_for_logger: impl futures::Stream + + Send + + 'static, +) { + // Create block proposer factory + let proposer = ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + None, + ); + + // Create inherent data providers + let inherent_data_providers = Box::new(move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + Ok(timestamp) + }) + as Box< + dyn CreateInherentDataProviders< + Block, + (), + InherentDataProviders = sp_timestamp::InherentDataProvider, + >, + >; + + // Start the mining worker (block building task) + let (worker_handle, worker_task) = sc_consensus_qpow::start_mining_worker( + Box::new(pow_block_import), + client.clone(), + select_chain, + proposer, + sync_service.clone(), + sync_service.clone(), + rewards_address, + inherent_data_providers, + tx_stream_for_worker, + Duration::from_secs(10), + ); + + task_manager + .spawn_essential_handle() + .spawn_blocking("block-producer", None, worker_task); + + // Start Prometheus business metrics monitoring + BusinessMetrics::start_monitoring_task(client.clone(), prometheus_registry, task_manager); + + // Setup graceful shutdown for mining + let mining_cancellation_token = CancellationToken::new(); + let mining_token_clone = mining_cancellation_token.clone(); + + task_manager.spawn_handle().spawn("mining-shutdown-listener", None, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C"); + log::info!("🛑 Received Ctrl+C signal, shutting down qpow-mining worker"); + mining_token_clone.cancel(); + }); + + // Spawn the main mining loop + task_manager.spawn_essential_handle().spawn("qpow-mining", None, async move { + // Start miner server if port is specified + let miner_server: Option> = if let Some(port) = miner_listen_port { + match MinerServer::start(port).await { + Ok(server) => Some(server), + Err(e) => { + log::error!("⛏️ Failed to start miner server on port {}: {}", port, e); + None + }, + } + } else { + log::warn!("⚠️ No --miner-listen-port specified. Using LOCAL mining only."); + None + }; + + mining_loop(client, worker_handle, sync_service, miner_server, mining_cancellation_token) + .await; + }); + + // Spawn transaction logger (only when tx-logging feature is enabled) + #[cfg(feature = "tx-logging")] + spawn_transaction_logger(task_manager, transaction_pool, tx_stream_for_logger); + + log::info!(target: "miner", "⛏️ Pow miner spawned"); +} + +// ============================================================================ +// Type Definitions +// ============================================================================ + pub(crate) type FullClient = sc_service::TFullClient< Block, RuntimeApi, @@ -152,7 +623,7 @@ pub fn new_full< >( config: Configuration, rewards_address: AccountId32, - external_miner_url: Option, + miner_listen_port: Option, enable_peer_sharing: bool, ) -> Result { let sc_service::PartialComponents { @@ -166,7 +637,9 @@ pub fn new_full< other: (pow_block_import, mut telemetry), } = new_partial(&config)?; - let mut tx_stream = transaction_pool.clone().import_notification_stream(); + let tx_stream_for_worker = transaction_pool.clone().import_notification_stream(); + #[cfg(feature = "tx-logging")] + let tx_stream_for_logger = transaction_pool.clone().import_notification_stream(); let net_config = sc_network::config::FullNetworkConfiguration::< Block, @@ -247,285 +720,33 @@ pub fn new_full< })?; if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - None, // lets worry about telemetry later! TODO - ); - - let inherent_data_providers = Box::new(move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - Ok(timestamp) - }) - as Box< - dyn CreateInherentDataProviders< - Block, - (), - InherentDataProviders = sp_timestamp::InherentDataProvider, - >, - >; - - let (worker_handle, worker_task) = sc_consensus_qpow::start_mining_worker( - Box::new(pow_block_import), - client.clone(), + #[cfg(feature = "tx-logging")] + spawn_authority_tasks( + &mut task_manager, + client, + transaction_pool, select_chain.clone(), - proposer, - sync_service.clone(), - sync_service.clone(), + pow_block_import, + sync_service, + prometheus_registry, rewards_address, - inherent_data_providers, - Duration::from_secs(10), - Duration::from_secs(10), + miner_listen_port, + tx_stream_for_worker, + tx_stream_for_logger, ); - - task_manager.spawn_essential_handle().spawn_blocking("pow", None, worker_task); - - ResonanceBusinessMetrics::start_monitoring_task( - client.clone(), - prometheus_registry.clone(), - &task_manager, + #[cfg(not(feature = "tx-logging"))] + spawn_authority_tasks( + &mut task_manager, + client, + transaction_pool, + select_chain.clone(), + pow_block_import, + sync_service, + prometheus_registry, + rewards_address, + miner_listen_port, + tx_stream_for_worker, ); - - let mining_cancellation_token = CancellationToken::new(); - let mining_token_clone = mining_cancellation_token.clone(); - - // Listen for shutdown signals - task_manager.spawn_handle().spawn("mining-shutdown-listener", None, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C"); - log::info!("🛑 Received Ctrl+C signal, shutting down qpow-mining worker"); - mining_token_clone.cancel(); - }); - - task_manager.spawn_essential_handle().spawn("qpow-mining", None, async move { - log::info!("⛏️ QPoW Mining task spawned"); - let mut nonce: U512 = U512::one(); - let http_client = Client::new(); - let mut current_job_id: Option = None; - - // Submit new mining job - let mut mining_start_time = std::time::Instant::now(); - log::info!("Mining start time: {:?}", mining_start_time); - - loop { - // Check for cancellation - if mining_cancellation_token.is_cancelled() { - log::info!("⛏️ QPoW Mining task shutting down gracefully"); - - // Cancel any pending external mining job - if let Some(job_id) = ¤t_job_id { - if let Some(miner_url) = &external_miner_url { - if let Err(e) = external_miner_client::cancel_mining_job( - &http_client, - miner_url, - job_id, - ) - .await - { - log::warn!("⛏️Failed to cancel mining job during shutdown: {}", e); - } - } - } - - break; - } - - // Don't mine if we're still syncing - if sync_service.is_major_syncing() { - log::debug!(target: "pow", "Mining paused: node is still syncing with network"); - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(5)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - } - - // Get mining metadata - let metadata = match worker_handle.metadata() { - Some(m) => m, - None => { - log::debug!(target: "pow", "No mining metadata available"); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - }, - }; - let version = worker_handle.version(); - - // If external miner URL is provided, use external mining - if let Some(miner_url) = &external_miner_url { - // Cancel previous job if metadata has changed - if let Some(job_id) = ¤t_job_id { - if let Err(e) = external_miner_client::cancel_mining_job( - &http_client, - miner_url, - job_id, - ) - .await - { - log::warn!("⛏️Failed to cancel previous mining job: {}", e); - } - } - - // Get current distance_threshold from runtime - let difficulty = - match client.runtime_api().get_difficulty(metadata.best_hash) { - Ok(d) => d, - Err(e) => { - log::warn!("⛏️Failed to get difficulty: {:?}", e); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - }, - }; - - // Generate new job ID - let job_id = Uuid::new_v4().to_string(); - current_job_id = Some(job_id.clone()); - - if let Err(e) = external_miner_client::submit_mining_job( - &http_client, - miner_url, - &job_id, - &metadata.pre_hash, - difficulty, - nonce, - U512::max_value(), - ) - .await - { - log::warn!("⛏️Failed to submit mining job: {}", e); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - } - - // Poll for results - loop { - match external_miner_client::check_mining_result( - &http_client, - miner_url, - &job_id, - ) - .await - { - Ok(Some(seal)) => { - let current_version = worker_handle.version(); - if current_version == version { - if futures::executor::block_on( - worker_handle.submit(seal.encode()), - ) { - let mining_time = mining_start_time.elapsed().as_secs(); - log::info!("🥇 Successfully mined and submitted a new block via external miner (mining time: {}s)", mining_time); - nonce = U512::one(); - mining_start_time = std::time::Instant::now(); - } else { - log::warn!( - "⛏️ Failed to submit mined block from external miner" - ); - nonce += U512::one(); - } - } else { - log::debug!(target: "miner", "Work from external miner is stale, discarding."); - } - break; - }, - Ok(None) => { - // Still working, check if metadata has changed - if worker_handle - .metadata() - .map(|m| m.best_hash != metadata.best_hash) - .unwrap_or(false) - { - break; - } - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(500)) => {}, - _ = mining_cancellation_token.cancelled() => return, - } - }, - Err(e) => { - log::warn!("⛏️Polling external miner result failed: {}", e); - break; - }, - } - } - } else { - // Local mining: try a range of N sequential nonces using optimized path - let block_hash = metadata.pre_hash.0; // [u8;32] - let start_nonce_bytes = nonce.to_big_endian(); - let difficulty = client - .runtime_api() - .get_difficulty(metadata.best_hash) - .unwrap_or_else(|e| { - log::warn!("API error getting difficulty: {:?}", e); - U512::zero() - }); - let nonces_to_mine = 300u64; - - let found = match tokio::task::spawn_blocking(move || { - mine_range(block_hash, start_nonce_bytes, nonces_to_mine, difficulty) - }) - .await - { - Ok(res) => res, - Err(e) => { - log::warn!("⛏️Local mining task failed: {}", e); - None - }, - }; - - let nonce_bytes = if let Some((good_nonce, _distance)) = found { - good_nonce - } else { - nonce += U512::from(nonces_to_mine); - // Yield back to the runtime to avoid starving other tasks - tokio::task::yield_now().await; - continue; - }; - - let current_version = worker_handle.version(); - // TODO: what does this check do? - if current_version == version { - if futures::executor::block_on(worker_handle.submit(nonce_bytes.encode())) { - let mining_time = mining_start_time.elapsed().as_secs(); - log::info!("🥇 Successfully mined and submitted a new block (mining time: {}s)", mining_time); - nonce = U512::one(); - mining_start_time = std::time::Instant::now(); - } else { - log::warn!("⛏️Failed to submit mined block"); - nonce += U512::one(); - } - } - - // Yield after each mining batch to cooperate with other tasks - tokio::task::yield_now().await; - } - } - - log::info!("⛏️ QPoW Mining task terminated"); - }); - - task_manager.spawn_handle().spawn("tx-logger", None, async move { - while let Some(tx_hash) = tx_stream.next().await { - if let Some(tx) = transaction_pool.ready_transaction(&tx_hash) { - log::trace!(target: "miner", "New transaction: Hash = {:?}", tx_hash); - let extrinsic = tx.data(); - log::trace!(target: "miner", "Payload: {:?}", extrinsic); - } else { - log::warn!("⛏️Transaction {:?} not found in pool", tx_hash); - } - } - }); - - log::info!(target: "miner", "⛏️ Pow miner spawned"); } // Start deterministic-depth finalization task diff --git a/pallets/balances/src/lib.rs b/pallets/balances/src/lib.rs index a0ee17d3..164515a6 100644 --- a/pallets/balances/src/lib.rs +++ b/pallets/balances/src/lib.rs @@ -337,6 +337,13 @@ pub mod pallet { Self::AccountId, Self::Balance, >; + + /// Account ID used as the "from" account when creating transfer proofs for minted tokens + /// (e.g., genesis balances, mining rewards). This should be a well-known address that + /// represents "minted from nothing". + #[pallet::constant] + #[pallet::no_default] + type MintingAccount: Get; } /// The in-code storage version. @@ -572,10 +579,13 @@ pub mod pallet { "duplicate balances in genesis." ); + let mint_account = T::MintingAccount::get(); for &(ref who, free) in self.balances.iter() { frame_system::Pallet::::inc_providers(who); assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }) .is_ok()); + // Create transfer proof for genesis balance (from minting account) + Pallet::::do_store_transfer_proof(&mint_account, who, free); } } } diff --git a/pallets/balances/src/tests/mod.rs b/pallets/balances/src/tests/mod.rs index cc469066..bb33433d 100644 --- a/pallets/balances/src/tests/mod.rs +++ b/pallets/balances/src/tests/mod.rs @@ -112,6 +112,7 @@ impl pallet_transaction_payment::Config for Test { parameter_types! { pub FooReason: TestId = TestId::Foo; + pub MintingAccount: AccountId = AccountId::new([1u8; 32]); } #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] @@ -126,6 +127,7 @@ impl Config for Test { type RuntimeFreezeReason = TestId; type FreezeIdentifier = TestId; type MaxFreezes = VariantCountOf; + type MintingAccount = MintingAccount; } #[derive(Clone)] diff --git a/pallets/balances/src/tests/transfer_counter_tests.rs b/pallets/balances/src/tests/transfer_counter_tests.rs index 92cb360e..57fb1e7b 100644 --- a/pallets/balances/src/tests/transfer_counter_tests.rs +++ b/pallets/balances/src/tests/transfer_counter_tests.rs @@ -34,14 +34,19 @@ fn charlie() -> AccountId { account_id(3) } +/// When monied(true), genesis creates 5 accounts with balances. +/// However, account_id(1) == MintingAccount ([1u8; 32]), so that's a self-transfer +/// which doesn't create a proof. Thus we get 4 transfer proofs. +const GENESIS_TRANSFER_COUNT: u64 = 4; + #[test] -fn transfer_counter_starts_at_zero() { +fn transfer_counter_starts_at_genesis_count() { ExtBuilder::default() .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Transfer counter should start at 0 - assert_eq!(Balances::transfer_count(), 0); + // Transfer counter should start at GENESIS_TRANSFER_COUNT (one per endowed account) + assert_eq!(Balances::transfer_count(), GENESIS_TRANSFER_COUNT); }); } @@ -51,20 +56,19 @@ fn transfer_allow_death_increments_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Perform a transfer assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 5)); - // Counter should increment to 1 - assert_eq!(Balances::transfer_count(), 1); + // Counter should increment by 1 + assert_eq!(Balances::transfer_count(), initial_count + 1); // Perform another transfer assert_ok!(Balances::transfer_allow_death(Some(bob()).into(), charlie(), 3)); - // Counter should increment to 2 - assert_eq!(Balances::transfer_count(), 2); + // Counter should increment by 2 total + assert_eq!(Balances::transfer_count(), initial_count + 2); }); } @@ -74,14 +78,13 @@ fn transfer_keep_alive_increments_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Perform a transfer_keep_alive assert_ok!(Balances::transfer_keep_alive(Some(alice()).into(), bob(), 5)); - // Counter should increment to 1 - assert_eq!(Balances::transfer_count(), 1); + // Counter should increment by 1 + assert_eq!(Balances::transfer_count(), initial_count + 1); }); } @@ -91,14 +94,13 @@ fn force_transfer_increments_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Perform a force transfer assert_ok!(Balances::force_transfer(RuntimeOrigin::root(), alice(), bob(), 5)); - // Counter should increment to 1 - assert_eq!(Balances::transfer_count(), 1); + // Counter should increment by 1 + assert_eq!(Balances::transfer_count(), initial_count + 1); }); } @@ -108,14 +110,13 @@ fn transfer_all_increments_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Perform a transfer_all assert_ok!(Balances::transfer_all(Some(alice()).into(), bob(), false)); - // Counter should increment to 1 - assert_eq!(Balances::transfer_count(), 1); + // Counter should increment by 1 + assert_eq!(Balances::transfer_count(), initial_count + 1); }); } @@ -125,14 +126,13 @@ fn self_transfer_does_not_increment_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Self transfer should not increment counter assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), alice(), 5)); - // Counter should remain 0 since it's a self-transfer - assert_eq!(Balances::transfer_count(), 0); + // Counter should remain unchanged since it's a self-transfer + assert_eq!(Balances::transfer_count(), initial_count); }); } @@ -142,11 +142,14 @@ fn transfer_proof_storage_is_created() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + let current_count = Balances::transfer_count(); + // Perform a transfer assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 5)); - // Check that transfer proof was stored with correct key - let key = (0u64, alice(), bob(), 5); + // Check that transfer proof was stored with correct key (using current count as the + // index) + let key = (current_count, alice(), bob(), 5u128); assert!(TransferProof::::contains_key(&key)); }); } @@ -157,28 +160,30 @@ fn multiple_transfers_create_sequential_proofs() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + let initial_count = Balances::transfer_count(); + // First transfer assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 5)); - assert_eq!(Balances::transfer_count(), 1); + assert_eq!(Balances::transfer_count(), initial_count + 1); // Check first proof exists - let key1 = (0u64, alice(), bob(), 5u128); + let key1 = (initial_count, alice(), bob(), 5u128); assert!(TransferProof::::contains_key(&key1)); // Second transfer assert_ok!(Balances::transfer_allow_death(Some(bob()).into(), charlie(), 3)); - assert_eq!(Balances::transfer_count(), 2); + assert_eq!(Balances::transfer_count(), initial_count + 2); // Check second proof exists - let key2 = (1u64, bob(), charlie(), 3u128); + let key2 = (initial_count + 1, bob(), charlie(), 3u128); assert!(TransferProof::::contains_key(&key2)); // Third transfer with different amount assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), charlie(), 1)); - assert_eq!(Balances::transfer_count(), 3); + assert_eq!(Balances::transfer_count(), initial_count + 3); // Check third proof exists - let key3 = (2u64, alice(), charlie(), 1u128); + let key3 = (initial_count + 2, alice(), charlie(), 1u128); assert!(TransferProof::::contains_key(&key3)); }); } @@ -189,8 +194,7 @@ fn failed_transfers_do_not_increment_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Attempt transfer with insufficient funds assert_noop!( @@ -198,8 +202,8 @@ fn failed_transfers_do_not_increment_counter() { Arithmetic(Underflow) ); - // Counter should remain 0 since transfer failed - assert_eq!(Balances::transfer_count(), 0); + // Counter should remain unchanged since transfer failed + assert_eq!(Balances::transfer_count(), initial_count); }); } @@ -268,19 +272,21 @@ fn transfer_counter_persists_across_blocks() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { + let initial_count = Balances::transfer_count(); + // Perform transfer in block 1 assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 5)); - assert_eq!(Balances::transfer_count(), 1); + assert_eq!(Balances::transfer_count(), initial_count + 1); // Move to block 2 System::set_block_number(2); // Counter should persist - assert_eq!(Balances::transfer_count(), 1); + assert_eq!(Balances::transfer_count(), initial_count + 1); // Perform another transfer in block 2 assert_ok!(Balances::transfer_allow_death(Some(bob()).into(), charlie(), 3)); - assert_eq!(Balances::transfer_count(), 2); + assert_eq!(Balances::transfer_count(), initial_count + 2); }); } @@ -290,17 +296,16 @@ fn zero_value_transfers_increment_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // Perform a zero-value transfer assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 0)); // Counter should increment even for zero-value transfers - assert_eq!(Balances::transfer_count(), 1); + assert_eq!(Balances::transfer_count(), initial_count + 1); // Transfer proof should be created - let key = (0u64, alice(), bob(), 0u128); + let key = (initial_count, alice(), bob(), 0u128); assert!(TransferProof::::contains_key(&key)); }); } @@ -311,26 +316,25 @@ fn different_transfer_types_all_increment_counter() { .existential_deposit(1) .monied(true) .build_and_execute_with(|| { - // Initial counter should be 0 - assert_eq!(Balances::transfer_count(), 0); + let initial_count = Balances::transfer_count(); // transfer_allow_death assert_ok!(Balances::transfer_allow_death(Some(alice()).into(), bob(), 1)); - assert_eq!(Balances::transfer_count(), 1); + assert_eq!(Balances::transfer_count(), initial_count + 1); // transfer_keep_alive assert_ok!(Balances::transfer_keep_alive(Some(alice()).into(), charlie(), 1)); - assert_eq!(Balances::transfer_count(), 2); + assert_eq!(Balances::transfer_count(), initial_count + 2); // force_transfer assert_ok!(Balances::force_transfer(RuntimeOrigin::root(), bob(), charlie(), 1)); - assert_eq!(Balances::transfer_count(), 3); + assert_eq!(Balances::transfer_count(), initial_count + 3); // transfer_all (transfer remaining balance) let remaining = Balances::free_balance(alice()); if remaining > 1 { assert_ok!(Balances::transfer_all(Some(alice()).into(), bob(), false)); - assert_eq!(Balances::transfer_count(), 4); + assert_eq!(Balances::transfer_count(), initial_count + 4); } }); } diff --git a/pallets/merkle-airdrop/README.md b/pallets/merkle-airdrop/README.md deleted file mode 100644 index 2e8495fb..00000000 --- a/pallets/merkle-airdrop/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Merkle Airdrop Pallet - -A Substrate pallet for distributing tokens via Merkle proofs with optional vesting of the airdropped tokens. - -## Testing & Usage - -For testing and interacting with this pallet, please refer to the CLI tool and example in the [resonance-api-client](https://github.com/Quantus-Network/resonance-api-client/blob/master/examples/async/examples/merkle-airdrop-README.md) repository: -- `examples/ac-examples-async/examples/merkle_airdrop_cli.rs` -- `examples/ac-examples-async/examples/merkle_airdrop_cli-README.md` for the documentation - -These tool demonstrates how to: -- Generate Merkle trees and proofs -- Create and fund airdrops -- Claim tokens using proofs diff --git a/pallets/merkle-airdrop/src/benchmarking.rs b/pallets/merkle-airdrop/src/benchmarking.rs deleted file mode 100644 index d965bd8d..00000000 --- a/pallets/merkle-airdrop/src/benchmarking.rs +++ /dev/null @@ -1,196 +0,0 @@ -//! Benchmarking setup for pallet-merkle-airdrop - -extern crate alloc; - -use super::*; -use crate::Pallet as MerkleAirdrop; -use frame_benchmarking::v2::*; -use frame_support::BoundedVec; -use frame_system::RawOrigin; -use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Get, Saturating}; - -// Helper function to mirror pallet's Merkle proof verification logic -fn calculate_expected_root_for_benchmark( - initial_leaf_hash: MerkleHash, - proof_elements: &[MerkleHash], -) -> MerkleHash { - let mut computed_hash = initial_leaf_hash; - for proof_element in proof_elements.iter() { - // The comparison logic must match how MerkleHash is ordered in your pallet - if computed_hash.as_ref() < proof_element.as_ref() { - // This replicates Self::calculate_parent_hash_blake2(&computed_hash, proof_element) - let mut combined_data = computed_hash.as_ref().to_vec(); - combined_data.extend_from_slice(proof_element.as_ref()); - computed_hash = blake2_256(&combined_data); - } else { - // This replicates Self::calculate_parent_hash_blake2(proof_element, &computed_hash) - let mut combined_data = proof_element.as_ref().to_vec(); - combined_data.extend_from_slice(computed_hash.as_ref()); - computed_hash = blake2_256(&combined_data); - } - } - computed_hash -} - -#[benchmarks( - where - T: Send + Sync, - T: Config + pallet_vesting::Config>, -)] -mod benchmarks { - use super::*; - - #[benchmark] - fn create_airdrop() { - let caller: T::AccountId = whitelisted_caller(); - let merkle_root = [0u8; 32]; - let vesting_period = None; - let vesting_schedule = None; - - #[extrinsic_call] - create_airdrop(RawOrigin::Signed(caller), merkle_root, vesting_period, vesting_schedule); - } - - #[benchmark] - fn fund_airdrop() { - let caller: T::AccountId = whitelisted_caller(); - let merkle_root = [0u8; 32]; - - let airdrop_id = MerkleAirdrop::::next_airdrop_id(); - AirdropInfo::::insert( - airdrop_id, - AirdropMetadata { - merkle_root, - balance: 0u32.into(), - creator: caller.clone(), - vesting_period: None, - vesting_delay: None, - }, - ); - - NextAirdropId::::put(airdrop_id + 1); - - let amount: BalanceOf = ::MinVestedTransfer::get(); - - // Get ED and ensure caller has sufficient balance - let ed = CurrencyOf::::minimum_balance(); - - let caller_balance = ed.saturating_mul(10u32.into()).saturating_add(amount); - CurrencyOf::::make_free_balance_be(&caller, caller_balance); - - CurrencyOf::::make_free_balance_be(&MerkleAirdrop::::account_id(), ed); - - #[extrinsic_call] - fund_airdrop(RawOrigin::Signed(caller), airdrop_id, amount); - } - - #[benchmark] - fn claim(p: Linear<0, { T::MaxProofs::get() }>) { - let caller: T::AccountId = whitelisted_caller(); - let recipient: T::AccountId = account("recipient", 0, 0); - - let amount: BalanceOf = ::MinVestedTransfer::get(); - - // 1. Calculate the initial leaf hash - let leaf_hash = MerkleAirdrop::::calculate_leaf_hash_blake2(&recipient, amount); - - // 2. Generate `p` dummy proof elements that will be passed to the extrinsic - let proof_elements_for_extrinsic: alloc::vec::Vec = (0..p) - .map(|i| { - let mut dummy_data = [0u8; 32]; - dummy_data[0] = i as u8; // Make them slightly different for each proof element - blake2_256(&dummy_data) // Hash it to make it a valid MerkleHash type - }) - .collect(); - - let merkle_root_to_store = - calculate_expected_root_for_benchmark(leaf_hash, &proof_elements_for_extrinsic); - - let airdrop_id = MerkleAirdrop::::next_airdrop_id(); - - AirdropInfo::::insert( - airdrop_id, - AirdropMetadata { - merkle_root: merkle_root_to_store, - balance: amount.saturating_mul(2u32.into()), // Ensure enough balance for the claim - creator: caller.clone(), - vesting_period: None, // Simplest case: no vesting period - vesting_delay: None, // Simplest case: no vesting delay - }, - ); - - let large_balance = - amount.saturating_mul(T::MaxProofs::get().into()).saturating_add(amount); - - // Creator might not be strictly needed for `claim` from `None` origin, but good practice - CurrencyOf::::make_free_balance_be(&caller, large_balance); - // Recipient starts with minimal balance or nothing, will receive the airdrop - CurrencyOf::::make_free_balance_be(&recipient, amount); - // Pallet's account needs funds to make the transfer - CurrencyOf::::make_free_balance_be( - &MerkleAirdrop::::account_id(), - large_balance, // Pallet account needs enough to cover the claim - ); - - AirdropInfo::::mutate(airdrop_id, |maybe_info| { - if let Some(info) = maybe_info { - info.balance = large_balance; - } - }); - - // Prepare the Merkle proof argument for the extrinsic call - let merkle_proof_arg = - BoundedVec::::try_from(proof_elements_for_extrinsic) - .expect("Proof elements vector should fit into BoundedVec"); - - // Ensure recipient hasn't claimed yet (benchmark state should be clean) - assert!(!Claimed::::contains_key(airdrop_id, &recipient)); - - #[extrinsic_call] - claim(RawOrigin::None, airdrop_id, recipient.clone(), amount, merkle_proof_arg); - - // Verify successful claim - assert!(Claimed::::contains_key(airdrop_id, &recipient)); - } - - #[benchmark] - fn delete_airdrop() { - let caller: T::AccountId = whitelisted_caller(); - let merkle_root = [0u8; 32]; - - // Create an airdrop first - let airdrop_id = MerkleAirdrop::::next_airdrop_id(); - - AirdropInfo::::insert( - airdrop_id, - AirdropMetadata { - merkle_root, - balance: 0u32.into(), - creator: caller.clone(), - vesting_period: None, - vesting_delay: None, - }, - ); - - NextAirdropId::::put(airdrop_id + 1); - - let ed = CurrencyOf::::minimum_balance(); - let tiny_amount: BalanceOf = 1u32.into(); - let large_balance = ed.saturating_mul(1_000_000u32.into()); - - CurrencyOf::::make_free_balance_be(&caller, large_balance); - CurrencyOf::::make_free_balance_be(&MerkleAirdrop::::account_id(), large_balance); - - AirdropInfo::::mutate(airdrop_id, |info| { - if let Some(info) = info { - info.balance = tiny_amount; - } - }); - - #[extrinsic_call] - delete_airdrop(RawOrigin::Signed(caller), airdrop_id); - } - - impl_benchmark_test_suite!(MerkleAirdrop, crate::mock::new_test_ext(), crate::mock::Test); -} diff --git a/pallets/merkle-airdrop/src/lib.rs b/pallets/merkle-airdrop/src/lib.rs deleted file mode 100644 index b3b2651d..00000000 --- a/pallets/merkle-airdrop/src/lib.rs +++ /dev/null @@ -1,584 +0,0 @@ -//! # Merkle Airdrop Pallet -//! -//! A pallet for distributing tokens via Merkle proofs, allowing efficient token airdrops -//! where recipients can claim their tokens by providing cryptographic proofs of eligibility. -//! -//! ## Overview -//! -//! This pallet provides functionality for: -//! - Creating airdrops with a Merkle root representing all valid claims, and optional vesting -//! parameters -//! - Funding airdrops with tokens to be distributed -//! - Allowing users to claim tokens by providing Merkle proofs -//! - Allowing creators to delete airdrops and reclaim any unclaimed tokens -//! -//! The use of Merkle trees allows for gas-efficient verification of eligibility without -//! storing the complete list of recipients on-chain. -//! -//! ## Interface -//! -//! ### Dispatchable Functions -//! -//! * `create_airdrop` - Create a new airdrop with a Merkle root and vesting parameters -//! * `fund_airdrop` - Fund an existing airdrop with tokens -//! * `claim` - Claim tokens from an airdrop by providing a Merkle proof -//! * `delete_airdrop` - Delete an airdrop and reclaim any remaining tokens (creator only) - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; -use frame_system::pallet_prelude::BlockNumberFor; -pub use pallet::*; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod tests; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -pub mod weights; -use scale_info::TypeInfo; -use sp_core::RuntimeDebug; -pub use weights::*; - -use frame_support::traits::{Currency, VestedTransfer}; - -/// NOTE: Vesting traits still use deprecated `Currency` trait. -type CurrencyOf = - <::Vesting as VestedTransfer<::AccountId>>::Currency; - -/// NOTE: Vesting traits still use deprecated `Currency` trait. -type BalanceOf = as Currency<::AccountId>>::Balance; - -/// Type alias for airdrop info for this pallet -type AirdropMetadataFor = - AirdropMetadata, BalanceOf, ::AccountId>; - -/// Type for storing a Merkle root hash -pub type MerkleRoot = [u8; 32]; - -/// Type for Merkle hash values -pub type MerkleHash = [u8; 32]; - -/// Airdrop ID type -pub type AirdropId = u32; - -#[derive( - Encode, - Decode, - PartialEq, - Eq, - Clone, - TypeInfo, - RuntimeDebug, - MaxEncodedLen, - DecodeWithMemTracking, -)] -pub struct AirdropMetadata { - /// Merkle root of the airdrop - pub merkle_root: MerkleHash, - /// Creator of the airdrop - pub creator: AccountId, - /// Current airdrop balance - pub balance: Balance, - /// Vesting period for the airdrop. `None` for immediate release. - pub vesting_period: Option, - /// Vesting start delay. `None` for immediate start - pub vesting_delay: Option, -} - -#[frame_support::pallet] -pub mod pallet { - use crate::{ - AirdropId, AirdropMetadata, AirdropMetadataFor, BalanceOf, CurrencyOf, MerkleHash, - MerkleRoot, - }; - - use super::weights::WeightInfo; - use frame_support::{ - pallet_prelude::*, - traits::{Currency, Get, VestedTransfer, VestingSchedule}, - }; - use frame_system::pallet_prelude::{BlockNumberFor, *}; - use sp_io::hashing::blake2_256; - use sp_runtime::{ - traits::{AccountIdConversion, BlockNumberProvider, Convert, Saturating}, - transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity, - ValidTransaction, - }, - }; - extern crate alloc; - use alloc::vec; - - #[pallet::pallet] - pub struct Pallet(_); - - /// Configuration trait for the Merkle airdrop pallet. - #[pallet::config] - pub trait Config: frame_system::Config { - /// The vesting mechanism. - type Vesting: VestedTransfer> - + VestingSchedule>; - - /// Convert the block number into a balance. - type BlockNumberToBalance: Convert, BalanceOf>; - - /// The maximum number of proof elements allowed in a Merkle proof. - #[pallet::constant] - type MaxProofs: Get; - - /// The pallet id, used for deriving its sovereign account ID. - #[pallet::constant] - type PalletId: Get; - - /// Priority for unsigned claim transactions. - #[pallet::constant] - type UnsignedClaimPriority: Get; - - /// Weight information for the extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// Block number provider. - type BlockNumberProvider: BlockNumberProvider>; - } - - /// Stores general info about an airdrop - #[pallet::storage] - #[pallet::getter(fn airdrop_info)] - pub type AirdropInfo = StorageMap< - _, - Blake2_128Concat, - AirdropId, - AirdropMetadata, BalanceOf, T::AccountId>, - >; - - /// Storage for claimed status - #[pallet::storage] - #[pallet::getter(fn is_claimed)] - #[allow(clippy::unused_unit)] - pub type Claimed = StorageDoubleMap< - _, - Blake2_128Concat, - AirdropId, - Blake2_128Concat, - T::AccountId, - (), - ValueQuery, - >; - - /// Counter for airdrop IDs - #[pallet::storage] - #[pallet::getter(fn next_airdrop_id)] - pub type NextAirdropId = StorageValue<_, AirdropId, ValueQuery>; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// A new airdrop has been created. - /// - /// Parameters: [airdrop_id, merkle_root] - AirdropCreated { - /// The ID of the created airdrop - airdrop_id: AirdropId, - /// Airdrop metadata - airdrop_metadata: AirdropMetadataFor, - }, - /// An airdrop has been funded with tokens. - /// - /// Parameters: [airdrop_id, amount] - AirdropFunded { - /// The ID of the funded airdrop - airdrop_id: AirdropId, - /// The amount of tokens added to the airdrop - amount: BalanceOf, - }, - /// A user has claimed tokens from an airdrop. - /// - /// Parameters: [airdrop_id, account, amount] - Claimed { - /// The ID of the airdrop claimed from - airdrop_id: AirdropId, - /// The account that claimed the tokens - account: T::AccountId, - /// The amount of tokens claimed - amount: BalanceOf, - }, - /// An airdrop has been deleted. - /// - /// Parameters: [airdrop_id] - AirdropDeleted { - /// The ID of the deleted airdrop - airdrop_id: AirdropId, - }, - } - - #[pallet::error] - #[repr(u8)] - pub enum Error { - /// The specified airdrop does not exist. - AirdropNotFound, - /// The airdrop does not have sufficient balance for this operation. - InsufficientAirdropBalance, - /// The user has already claimed from this airdrop. - AlreadyClaimed, - /// The provided Merkle proof is invalid. - InvalidProof, - /// Only the creator of an airdrop can delete it. - NotAirdropCreator, - } - - impl Error { - /// Convert the error to its underlying code - pub fn to_code(&self) -> u8 { - match self { - Error::::AirdropNotFound => 1, - Error::::InsufficientAirdropBalance => 2, - Error::::AlreadyClaimed => 3, - Error::::InvalidProof => 4, - Error::::NotAirdropCreator => 5, - _ => 0, - } - } - } - - impl Pallet { - /// Returns the account ID of the pallet. - /// - /// This account is used to hold the funds for all airdrops. - pub fn account_id() -> T::AccountId { - T::PalletId::get().into_account_truncating() - } - - /// Verifies a Merkle proof against a Merkle root using Blake2 hash. - /// - /// This function checks if an account is eligible to claim a specific amount from an - /// airdrop by verifying a Merkle proof against the stored Merkle root. - /// - /// # Parameters - /// - /// * `account` - The account ID claiming tokens - /// * `amount` - The amount of tokens being claimed - /// * `merkle_root` - The Merkle root to verify against - /// * `merkle_proof` - The proof path from the leaf to the root - /// - /// # Returns - /// - /// `true` if the proof is valid, `false` otherwise - pub fn verify_merkle_proof( - account: &T::AccountId, - amount: BalanceOf, - merkle_root: &MerkleRoot, - merkle_proof: &[MerkleHash], - ) -> bool { - let leaf = Self::calculate_leaf_hash_blake2(account, amount); - - // Verify the proof by walking up the tree - let mut computed_hash = leaf; - for proof_element in merkle_proof.iter() { - computed_hash = if computed_hash < *proof_element { - Self::calculate_parent_hash_blake2(&computed_hash, proof_element) - } else { - Self::calculate_parent_hash_blake2(proof_element, &computed_hash) - }; - } - computed_hash == *merkle_root - } - - /// Calculates the leaf hash for a Merkle tree using Blake2. - /// - /// This function creates a leaf node hash from an account and amount using the - /// Blake2 hash function, which is optimized for zero-knowledge proofs. - /// - /// # Parameters - /// - /// * `account` - The account ID to include in the leaf - /// * `amount` - The token amount to include in the leaf - /// - /// # Returns - /// - /// A 32-byte array containing the Blake2 hash of the account and amount - pub fn calculate_leaf_hash_blake2( - account: &T::AccountId, - amount: BalanceOf, - ) -> MerkleHash { - let bytes = (account, amount).encode(); - blake2_256(&bytes) - } - - /// Calculates the parent hash in a Merkle tree using Blake2. - /// - /// This function combines two child hashes to create their parent hash in the Merkle tree. - /// The children are ordered lexicographically before hashing to ensure consistency. - /// - /// # Parameters - /// - /// * `left` - The first child hash - /// * `right` - The second child hash - /// - /// # Returns - /// - /// A 32-byte array containing the Blake2 hash of the combined children - pub fn calculate_parent_hash_blake2(left: &MerkleHash, right: &MerkleHash) -> MerkleHash { - // Ensure consistent ordering of inputs (important for verification) - let combined = if left < right { - [left.as_slice(), right.as_slice()].concat() - } else { - [right.as_slice(), left.as_slice()].concat() - }; - - blake2_256(&combined) - } - } - - #[pallet::call] - impl Pallet { - /// Create a new airdrop with a Merkle root. - /// - /// The Merkle root is a cryptographic hash that represents all valid claims - /// for this airdrop. Users will later provide Merkle proofs to verify their - /// eligibility to claim tokens. - /// - /// # Parameters - /// - /// * `origin` - The origin of the call (must be signed) - /// * `merkle_root` - The Merkle root hash representing all valid claims - /// * `vesting_period` - Optional vesting period for the airdrop - /// * `vesting_delay` - Optional delay before vesting starts - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::create_airdrop())] - pub fn create_airdrop( - origin: OriginFor, - merkle_root: MerkleRoot, - vesting_period: Option>, - vesting_delay: Option>, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - let airdrop_id = Self::next_airdrop_id(); - - let airdrop_metadata = AirdropMetadata { - merkle_root, - creator: who.clone(), - balance: Zero::zero(), - vesting_period, - vesting_delay, - }; - - AirdropInfo::::insert(airdrop_id, &airdrop_metadata); - NextAirdropId::::put(airdrop_id.saturating_add(1)); - - Self::deposit_event(Event::AirdropCreated { airdrop_id, airdrop_metadata }); - - Ok(()) - } - - /// Fund an existing airdrop with tokens. - /// - /// This function transfers tokens from the caller to the airdrop's account, - /// making them available for users to claim. - /// - /// # Parameters - /// - /// * `origin` - The origin of the call (must be signed) - /// * `airdrop_id` - The ID of the airdrop to fund - /// * `amount` - The amount of tokens to add to the airdrop - /// - /// # Errors - /// - /// * `AirdropNotFound` - If the specified airdrop does not exist - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::fund_airdrop())] - pub fn fund_airdrop( - origin: OriginFor, - airdrop_id: AirdropId, - amount: BalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - ensure!(AirdropInfo::::contains_key(airdrop_id), Error::::AirdropNotFound); - - CurrencyOf::::transfer( - &who, - &Self::account_id(), - amount, - frame_support::traits::ExistenceRequirement::KeepAlive, - )?; - - AirdropInfo::::mutate(airdrop_id, |maybe_metadata| { - if let Some(metadata) = maybe_metadata { - metadata.balance = metadata.balance.saturating_add(amount); - } - }); - - Self::deposit_event(Event::AirdropFunded { airdrop_id, amount }); - - Ok(()) - } - - /// Claim tokens from an airdrop by providing a Merkle proof. - /// - /// Users can claim their tokens by providing a proof of their eligibility. - /// The proof is verified against the airdrop's Merkle root. - /// Anyone can trigger a claim for any eligible recipient. - /// - /// # Parameters - /// - /// * `origin` - The origin of the call - /// * `airdrop_id` - The ID of the airdrop to claim from - /// * `amount` - The amount of tokens to claim - /// * `merkle_proof` - The Merkle proof verifying eligibility - /// - /// # Errors - /// - /// * `AirdropNotFound` - If the specified airdrop does not exist - /// * `AlreadyClaimed` - If the recipient has already claimed from this airdrop - /// * `InvalidProof` - If the provided Merkle proof is invalid - /// * `InsufficientAirdropBalance` - If the airdrop doesn't have enough tokens - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::claim(merkle_proof.len() as u32))] - pub fn claim( - origin: OriginFor, - airdrop_id: AirdropId, - recipient: T::AccountId, - amount: BalanceOf, - merkle_proof: BoundedVec, - ) -> DispatchResult { - ensure_none(origin)?; - - ensure!( - !Claimed::::contains_key(airdrop_id, &recipient), - Error::::AlreadyClaimed - ); - - let airdrop_metadata = - AirdropInfo::::get(airdrop_id).ok_or(Error::::AirdropNotFound)?; - - ensure!( - Self::verify_merkle_proof( - &recipient, - amount, - &airdrop_metadata.merkle_root, - &merkle_proof - ), - Error::::InvalidProof - ); - - ensure!(airdrop_metadata.balance >= amount, Error::::InsufficientAirdropBalance); - - // Mark as claimed before performing the transfer - Claimed::::insert(airdrop_id, &recipient, ()); - - AirdropInfo::::mutate(airdrop_id, |maybe_metadata| { - if let Some(metadata) = maybe_metadata { - metadata.balance = metadata.balance.saturating_sub(amount); - } - }); - - let per_block = if let Some(vesting_period) = airdrop_metadata.vesting_period { - amount - .checked_div(&T::BlockNumberToBalance::convert(vesting_period)) - .ok_or(Error::::InsufficientAirdropBalance)? - } else { - amount - }; - - let current_block = T::BlockNumberProvider::current_block_number(); - let vesting_start = - current_block.saturating_add(airdrop_metadata.vesting_delay.unwrap_or_default()); - - T::Vesting::vested_transfer( - &Self::account_id(), - &recipient, - amount, - per_block, - vesting_start, - )?; - - Self::deposit_event(Event::Claimed { airdrop_id, account: recipient, amount }); - - Ok(()) - } - - /// Delete an airdrop and reclaim any remaining funds. - /// - /// This function allows the creator of an airdrop to delete it and reclaim - /// any remaining tokens that haven't been claimed. - /// - /// # Parameters - /// - /// * `origin` - The origin of the call (must be the airdrop creator) - /// * `airdrop_id` - The ID of the airdrop to delete - /// - /// # Errors - /// - /// * `AirdropNotFound` - If the specified airdrop does not exist - /// * `NotAirdropCreator` - If the caller is not the creator of the airdrop - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::delete_airdrop())] - pub fn delete_airdrop(origin: OriginFor, airdrop_id: AirdropId) -> DispatchResult { - let who = ensure_signed(origin)?; - - let airdrop_metadata = - AirdropInfo::::take(airdrop_id).ok_or(Error::::AirdropNotFound)?; - - ensure!(airdrop_metadata.creator == who, Error::::NotAirdropCreator); - - CurrencyOf::::transfer( - &Self::account_id(), - &airdrop_metadata.creator, - airdrop_metadata.balance, - frame_support::traits::ExistenceRequirement::KeepAlive, - )?; - - Self::deposit_event(Event::AirdropDeleted { airdrop_id }); - - Ok(()) - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::claim { airdrop_id, recipient, amount, merkle_proof } = call { - // 1. Check if airdrop exists - let airdrop_metadata = AirdropInfo::::get(airdrop_id).ok_or_else(|| { - let error = Error::::AirdropNotFound; - InvalidTransaction::Custom(error.to_code()) - })?; - - // 2. Check if already claimed - if Claimed::::contains_key(airdrop_id, recipient) { - let error = Error::::AlreadyClaimed; - return InvalidTransaction::Custom(error.to_code()).into(); - } - - // 3. Verify Merkle Proof - if !Self::verify_merkle_proof( - recipient, - *amount, - &airdrop_metadata.merkle_root, - merkle_proof, - ) { - let error = Error::::InvalidProof; - return InvalidTransaction::Custom(error.to_code()).into(); - } - - Ok(ValidTransaction { - priority: T::UnsignedClaimPriority::get(), - requires: vec![], - provides: vec![(airdrop_id, recipient, amount).encode()], - longevity: TransactionLongevity::MAX, - propagate: true, - }) - } else { - log::error!(target: "merkle-airdrop", "ValidateUnsigned: Received non-claim transaction or unexpected call structure"); - InvalidTransaction::Call.into() - } - } - } -} diff --git a/pallets/merkle-airdrop/src/mock.rs b/pallets/merkle-airdrop/src/mock.rs deleted file mode 100644 index 0a5c865c..00000000 --- a/pallets/merkle-airdrop/src/mock.rs +++ /dev/null @@ -1,129 +0,0 @@ -use crate as pallet_merkle_airdrop; -use frame_support::{ - parameter_types, - traits::{ConstU32, Everything, WithdrawReasons}, - PalletId, -}; -use frame_system::{self as system}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - BuildStorage, -}; - -type Block = frame_system::mocking::MockBlock; - -// Configure a mock runtime to test the pallet. -frame_support::construct_runtime!( - pub enum Test { - System: frame_system, - Vesting: pallet_vesting, - Balances: pallet_balances, - MerkleAirdrop: pallet_merkle_airdrop, - } -); - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const SS58Prefix: u8 = 189; -} - -impl system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type Nonce = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Block = Block; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = SS58Prefix; - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; - type RuntimeTask = (); - type ExtensionsWeightInfo = (); - type SingleBlockMigrations = (); - type MultiBlockMigrator = (); - type PreInherents = (); - type PostInherents = (); - type PostTransactions = (); - type RuntimeEvent = RuntimeEvent; -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaxLocks: u32 = 50; -} - -impl pallet_balances::Config for Test { - type Balance = u64; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = (); - type FreezeIdentifier = (); - type MaxFreezes = (); - type RuntimeFreezeReason = (); - type DoneSlashHandler = (); -} - -parameter_types! { - pub const MinVestedTransfer: u64 = 1; - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); -} - -impl pallet_vesting::Config for Test { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type WeightInfo = (); - type BlockNumberProvider = System; - type MinVestedTransfer = MinVestedTransfer; - type BlockNumberToBalance = ConvertInto; - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - - const MAX_VESTING_SCHEDULES: u32 = 3; -} - -parameter_types! { - pub const MaxProofs: u32 = 100; - pub const MerkleAirdropPalletId: PalletId = PalletId(*b"airdrop!"); - pub const UnsignedClaimPriority: u64 = 100; -} - -impl pallet_merkle_airdrop::Config for Test { - type Vesting = Vesting; - type MaxProofs = MaxProofs; - type PalletId = MerkleAirdropPalletId; - type UnsignedClaimPriority = UnsignedClaimPriority; - type WeightInfo = (); - type BlockNumberProvider = System; - type BlockNumberToBalance = ConvertInto; -} - -// Build genesis storage according to the mock runtime. -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10_000_000), (MerkleAirdrop::account_id(), 1)], - } - .assimilate_storage(&mut t) - .unwrap(); - - t.into() -} diff --git a/pallets/merkle-airdrop/src/tests.rs b/pallets/merkle-airdrop/src/tests.rs deleted file mode 100644 index a142b6c5..00000000 --- a/pallets/merkle-airdrop/src/tests.rs +++ /dev/null @@ -1,591 +0,0 @@ -#![allow(clippy::unit_cmp)] - -use crate::{mock::*, Error, Event}; -use codec::Encode; -use frame_support::{ - assert_noop, assert_ok, - traits::{InspectLockableCurrency, LockIdentifier}, - BoundedVec, -}; -use sp_core::blake2_256; -use sp_runtime::TokenError; - -fn bounded_proof(proof: Vec<[u8; 32]>) -> BoundedVec<[u8; 32], MaxProofs> { - proof.try_into().expect("Proof exceeds maximum size") -} - -// Helper function to calculate a leaf hash for testing -fn calculate_leaf_hash(account: &u64, amount: u64) -> [u8; 32] { - let account_bytes = account.encode(); - let amount_bytes = amount.encode(); - let leaf_data = [&account_bytes[..], &amount_bytes[..]].concat(); - - blake2_256(&leaf_data) -} - -// Helper function to calculate a parent hash for testing -fn calculate_parent_hash(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] { - let combined = if left < right { - [&left[..], &right[..]].concat() - } else { - [&right[..], &left[..]].concat() - }; - - blake2_256(&combined) -} - -const VESTING_ID: LockIdentifier = *b"vesting "; - -#[test] -fn create_airdrop_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let merkle_root = [0u8; 32]; - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(100), - Some(10) - )); - - let airdrop_metadata = crate::AirdropMetadata { - merkle_root, - creator: 1, - balance: 0, - vesting_period: Some(100), - vesting_delay: Some(10), - }; - - System::assert_last_event( - Event::AirdropCreated { airdrop_id: 0, airdrop_metadata: airdrop_metadata.clone() } - .into(), - ); - - assert_eq!(MerkleAirdrop::airdrop_info(0), Some(airdrop_metadata)); - }); -} - -#[test] -fn fund_airdrop_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let merkle_root = [0u8; 32]; - let amount = 100; - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(10), - Some(10) - )); - - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, 0); - - // fund airdrop with insufficient balance should fail - assert_noop!( - MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(123456), 0, amount * 10000), - TokenError::FundsUnavailable, - ); - - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, amount)); - - System::assert_last_event(Event::AirdropFunded { airdrop_id: 0, amount }.into()); - - // Check that the airdrop balance was updated - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, amount); - - // Check that the balance was transferred - assert_eq!(Balances::free_balance(1), 9999900); // 10000000 - 100 - assert_eq!(Balances::free_balance(MerkleAirdrop::account_id()), 101); - - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, amount)); - - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, amount * 2); - assert_eq!(Balances::free_balance(1), 9999800); // 9999900 - 100 - assert_eq!(Balances::free_balance(MerkleAirdrop::account_id()), 201); // locked for vesting - }); -} - -#[test] -fn claim_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let account1: u64 = 2; // Account that will claim - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(100), - Some(2) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 1000)); - - // Create proof for account1d - let merkle_proof = bounded_proof(vec![leaf2]); - - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 500, merkle_proof.clone())); - - System::assert_last_event(Event::Claimed { airdrop_id: 0, account: 2, amount: 500 }.into()); - - assert_eq!(MerkleAirdrop::is_claimed(0, 2), ()); - assert_eq!(Balances::balance_locked(VESTING_ID, &2), 500); // Unlocked - - assert_eq!(Balances::free_balance(2), 500); - assert_eq!(Balances::free_balance(MerkleAirdrop::account_id()), 501); // 1 (initial) + 1000 - // (funded) - 500 (claimed) - }); -} - -#[test] -fn create_airdrop_requires_signed_origin() { - new_test_ext().execute_with(|| { - let merkle_root = [0u8; 32]; - - assert_noop!( - MerkleAirdrop::create_airdrop(RuntimeOrigin::none(), merkle_root, None, None), - frame_support::error::BadOrigin - ); - }); -} - -#[test] -fn fund_airdrop_fails_for_nonexistent_airdrop() { - new_test_ext().execute_with(|| { - assert_noop!( - MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 999, 1000), - Error::::AirdropNotFound - ); - }); -} - -#[test] -fn claim_fails_for_nonexistent_airdrop() { - new_test_ext().execute_with(|| { - let merkle_proof = bounded_proof(vec![[0u8; 32]]); - - assert_noop!( - MerkleAirdrop::claim(RuntimeOrigin::none(), 999, 1, 500, merkle_proof), - Error::::AirdropNotFound - ); - }); -} - -#[test] -fn claim_already_claimed() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let account1: u64 = 2; // Account that will claim - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(100), - Some(10) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 1000)); - - let merkle_proof = bounded_proof(vec![leaf2]); - - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 500, merkle_proof.clone())); - - // Try to claim again - assert_noop!( - MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 500, merkle_proof.clone()), - Error::::AlreadyClaimed - ); - }); -} - -#[test] -fn verify_merkle_proof_works() { - new_test_ext().execute_with(|| { - // Create test accounts and amounts - let account1: u64 = 1; - let amount1: u64 = 500; - let account2: u64 = 2; - let amount2: u64 = 300; - - // Calculate leaf hashes - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - - // Calculate the Merkle root (hash of the two leaves) - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - // Create proofs - let proof_for_account1 = vec![leaf2]; - let proof_for_account2 = vec![leaf1]; - - // Test the verify_merkle_proof function directly - assert!( - MerkleAirdrop::verify_merkle_proof( - &account1, - amount1, - &merkle_root, - &proof_for_account1 - ), - "Proof for account1 should be valid" - ); - - assert!( - MerkleAirdrop::verify_merkle_proof( - &account2, - amount2, - &merkle_root, - &proof_for_account2 - ), - "Proof for account2 should be valid" - ); - - assert!( - !MerkleAirdrop::verify_merkle_proof( - &account1, - 400, // Wrong amount - &merkle_root, - &proof_for_account1 - ), - "Proof with wrong amount should be invalid" - ); - - let wrong_proof = vec![[1u8; 32]]; - assert!( - !MerkleAirdrop::verify_merkle_proof(&account1, amount1, &merkle_root, &wrong_proof), - "Wrong proof should be invalid" - ); - - assert!( - !MerkleAirdrop::verify_merkle_proof( - &3, // Wrong account - amount1, - &merkle_root, - &proof_for_account1 - ), - "Proof with wrong account should be invalid" - ); - }); -} - -#[test] -fn claim_invalid_proof_fails() { - new_test_ext().execute_with(|| { - let account1: u64 = 2; - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(100), - Some(10) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 1000)); - - let invalid_proof = bounded_proof(vec![[1u8; 32]]); // Different from the actual leaf2 - - assert_noop!( - MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 500, invalid_proof), - Error::::InvalidProof - ); - }); -} - -#[test] -fn claim_insufficient_airdrop_balance_fails() { - new_test_ext().execute_with(|| { - // Create a valid merkle tree - let account1: u64 = 2; - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(1000), - Some(100) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 400)); // Fund less than claim amount - - let merkle_proof = bounded_proof(vec![leaf2]); - - // Attempt to claim more than available - assert_noop!( - MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 500, merkle_proof), - Error::::InsufficientAirdropBalance - ); - }); -} - -#[test] -fn claim_nonexistent_airdrop_fails() { - new_test_ext().execute_with(|| { - // Attempt to claim from a nonexistent airdrop - assert_noop!( - MerkleAirdrop::claim( - RuntimeOrigin::none(), - 999, - 2, - 500, - bounded_proof(vec![[0u8; 32]]) - ), - Error::::AirdropNotFound - ); - }); -} - -#[test] -fn claim_updates_balances_correctly() { - new_test_ext().execute_with(|| { - // Create a valid merkle tree - let account1: u64 = 2; - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(100), - Some(10) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 1000)); - - let initial_account_balance = Balances::free_balance(2); - let initial_pallet_balance = Balances::free_balance(MerkleAirdrop::account_id()); - - assert_ok!(MerkleAirdrop::claim( - RuntimeOrigin::none(), - 0, - 2, - 500, - bounded_proof(vec![leaf2]) - )); - - assert_eq!(Balances::free_balance(2), initial_account_balance + 500); - assert_eq!( - Balances::free_balance(MerkleAirdrop::account_id()), - initial_pallet_balance - 500 - ); - - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, 500); - assert_eq!(MerkleAirdrop::is_claimed(0, 2), ()); - }); -} - -#[test] -fn multiple_users_can_claim() { - new_test_ext().execute_with(|| { - let account1: u64 = 2; - let amount1: u64 = 5000; - let account2: u64 = 3; - let amount2: u64 = 3000; - let account3: u64 = 4; - let amount3: u64 = 2000; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let leaf3 = calculate_leaf_hash(&account3, amount3); - let parent1 = calculate_parent_hash(&leaf1, &leaf2); - let merkle_root = calculate_parent_hash(&parent1, &leaf3); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(1), - merkle_root, - Some(1000), - Some(10) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(1), 0, 10001)); - - // User 1 claims - let proof1 = bounded_proof(vec![leaf2, leaf3]); - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 2, 5000, proof1)); - assert_eq!(Balances::free_balance(2), 5000); // free balance but it's locked for vesting - assert_eq!(Balances::balance_locked(VESTING_ID, &2), 5000); - - // User 2 claims - let proof2 = bounded_proof(vec![leaf1, leaf3]); - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 3, 3000, proof2)); - assert_eq!(Balances::free_balance(3), 3000); - assert_eq!(Balances::balance_locked(VESTING_ID, &3), 3000); - - // User 3 claims - let proof3 = bounded_proof(vec![parent1]); - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, 4, 2000, proof3)); - assert_eq!(Balances::free_balance(4), 2000); - assert_eq!(Balances::balance_locked(VESTING_ID, &4), 2000); - - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, 1); - - assert_eq!(MerkleAirdrop::is_claimed(0, 2), ()); - assert_eq!(MerkleAirdrop::is_claimed(0, 3), ()); - assert_eq!(MerkleAirdrop::is_claimed(0, 4), ()); - }); -} - -#[test] -fn delete_airdrop_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let merkle_root = [0u8; 32]; - let creator = 1; - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(creator), - merkle_root, - Some(100), - Some(10) - )); - - let airdrop_info = MerkleAirdrop::airdrop_info(0).unwrap(); - - assert_eq!(airdrop_info.creator, creator); - - // Delete the airdrop (balance is zero) - assert_ok!(MerkleAirdrop::delete_airdrop(RuntimeOrigin::signed(creator), 0)); - - System::assert_last_event(Event::AirdropDeleted { airdrop_id: 0 }.into()); - - // Check that the airdrop no longer exists - assert!(MerkleAirdrop::airdrop_info(0).is_none()); - }); -} - -#[test] -fn delete_airdrop_with_balance_refunds_creator() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let merkle_root = [0u8; 32]; - let creator = 1; - let initial_creator_balance = Balances::free_balance(creator); - let fund_amount = 100; - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(creator), - merkle_root, - Some(100), - Some(10) - )); - - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(creator), 0, fund_amount)); - - // Creator's balance should be reduced by fund_amount - assert_eq!(Balances::free_balance(creator), initial_creator_balance - fund_amount); - - assert_ok!(MerkleAirdrop::delete_airdrop(RuntimeOrigin::signed(creator), 0)); - - // Check that the funds were returned to the creator - assert_eq!(Balances::free_balance(creator), initial_creator_balance); - - System::assert_last_event(Event::AirdropDeleted { airdrop_id: 0 }.into()); - }); -} - -#[test] -fn delete_airdrop_non_creator_fails() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let merkle_root = [0u8; 32]; - let creator = 1; - let non_creator = 2; - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(creator), - merkle_root, - Some(100), - Some(10) - )); - - assert_noop!( - MerkleAirdrop::delete_airdrop(RuntimeOrigin::signed(non_creator), 0), - Error::::NotAirdropCreator - ); - }); -} - -#[test] -fn delete_airdrop_nonexistent_fails() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - assert_noop!( - MerkleAirdrop::delete_airdrop(RuntimeOrigin::signed(1), 999), - Error::::AirdropNotFound - ); - }); -} - -#[test] -fn delete_airdrop_after_claims_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - let creator: u64 = 1; - let initial_creator_balance = Balances::free_balance(creator); - let account1: u64 = 2; - let amount1: u64 = 500; - let account2: u64 = 3; - let amount2: u64 = 300; - let total_fund = 1000; - - let leaf1 = calculate_leaf_hash(&account1, amount1); - let leaf2 = calculate_leaf_hash(&account2, amount2); - let merkle_root = calculate_parent_hash(&leaf1, &leaf2); - - assert_ok!(MerkleAirdrop::create_airdrop( - RuntimeOrigin::signed(creator), - merkle_root, - Some(100), - Some(10) - )); - assert_ok!(MerkleAirdrop::fund_airdrop(RuntimeOrigin::signed(creator), 0, total_fund)); - - // Let only one account claim (partial claiming) - let proof1 = bounded_proof(vec![leaf2]); - assert_ok!(MerkleAirdrop::claim(RuntimeOrigin::none(), 0, account1, amount1, proof1)); - - // Check that some balance remains - assert_eq!(MerkleAirdrop::airdrop_info(0).unwrap().balance, total_fund - amount1); - - // Now the creator deletes the airdrop with remaining balance - assert_ok!(MerkleAirdrop::delete_airdrop(RuntimeOrigin::signed(creator), 0)); - - // Check creator was refunded the unclaimed amount - assert_eq!( - Balances::free_balance(creator), - initial_creator_balance - total_fund + (total_fund - amount1) - ); - }); -} diff --git a/pallets/merkle-airdrop/src/weights.rs b/pallets/merkle-airdrop/src/weights.rs deleted file mode 100644 index c0213e38..00000000 --- a/pallets/merkle-airdrop/src/weights.rs +++ /dev/null @@ -1,193 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - - -//! Autogenerated weights for `pallet_merkle_airdrop` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 47.2.0 -//! DATE: 2025-06-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook-Pro-4.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` - -// Executed Command: -// frame-omni-bencher -// v1 -// benchmark -// pallet -// --runtime -// ./target/release/wbuild/quantus-runtime/quantus_runtime.wasm -// --pallet -// pallet-merkle-airdrop -// --extrinsic -// * -// --template -// ./.maintain/frame-weight-template.hbs -// --output -// ./pallets/merkle-airdrop/src/weights.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_merkle_airdrop`. -pub trait WeightInfo { - fn create_airdrop() -> Weight; - fn fund_airdrop() -> Weight; - fn claim(p: u32, ) -> Weight; - fn delete_airdrop() -> Weight; -} - -/// Weights for `pallet_merkle_airdrop` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MerkleAirdrop::NextAirdropId` (r:1 w:1) - /// Proof: `MerkleAirdrop::NextAirdropId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MerkleAirdrop::AirdropInfo` (r:0 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - fn create_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `1489` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 1489) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn fund_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `262` - // Estimated: `3593` - // Minimum execution time: 40_000_000 picoseconds. - Weight::from_parts(42_000_000, 3593) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: `MerkleAirdrop::Claimed` (r:1 w:1) - /// Proof: `MerkleAirdrop::Claimed` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `Vesting::Vesting` (r:1 w:1) - /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 100]`. - fn claim(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `441` - // Estimated: `6196` - // Minimum execution time: 73_000_000 picoseconds. - Weight::from_parts(74_879_630, 6196) - // Standard Error: 1_851 - .saturating_add(Weight::from_parts(368_666, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) - } - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn delete_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `262` - // Estimated: `3593` - // Minimum execution time: 39_000_000 picoseconds. - Weight::from_parts(39_000_000, 3593) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MerkleAirdrop::NextAirdropId` (r:1 w:1) - /// Proof: `MerkleAirdrop::NextAirdropId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MerkleAirdrop::AirdropInfo` (r:0 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - fn create_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `1489` - // Minimum execution time: 7_000_000 picoseconds. - Weight::from_parts(8_000_000, 1489) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn fund_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `262` - // Estimated: `3593` - // Minimum execution time: 40_000_000 picoseconds. - Weight::from_parts(42_000_000, 3593) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: `MerkleAirdrop::Claimed` (r:1 w:1) - /// Proof: `MerkleAirdrop::Claimed` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `Vesting::Vesting` (r:1 w:1) - /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 100]`. - fn claim(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `441` - // Estimated: `6196` - // Minimum execution time: 73_000_000 picoseconds. - Weight::from_parts(74_879_630, 6196) - // Standard Error: 1_851 - .saturating_add(Weight::from_parts(368_666, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) - } - /// Storage: `MerkleAirdrop::AirdropInfo` (r:1 w:1) - /// Proof: `MerkleAirdrop::AirdropInfo` (`max_values`: None, `max_size`: Some(110), added: 2585, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn delete_airdrop() -> Weight { - // Proof Size summary in bytes: - // Measured: `262` - // Estimated: `3593` - // Minimum execution time: 39_000_000 picoseconds. - Weight::from_parts(39_000_000, 3593) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } -} diff --git a/pallets/mining-rewards/src/lib.rs b/pallets/mining-rewards/src/lib.rs index d60557c1..4f89231e 100644 --- a/pallets/mining-rewards/src/lib.rs +++ b/pallets/mining-rewards/src/lib.rs @@ -123,7 +123,7 @@ pub mod pallet { let total_reward = remaining_supply .checked_div(&emission_divisor) - .unwrap_or_else(|| BalanceOf::::zero()); + .unwrap_or_else(BalanceOf::::zero); // Split the reward between treasury and miner let treasury_portion = T::TreasuryPortion::get(); diff --git a/pallets/mining-rewards/src/mock.rs b/pallets/mining-rewards/src/mock.rs index 2883832d..6a620fb9 100644 --- a/pallets/mining-rewards/src/mock.rs +++ b/pallets/mining-rewards/src/mock.rs @@ -82,6 +82,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type MaxFreezes = ConstU32<0>; type DoneSlashHandler = (); + type MintingAccount = MintingAccount; } parameter_types! { diff --git a/pallets/merkle-airdrop/Cargo.toml b/pallets/multisig/Cargo.toml similarity index 59% rename from pallets/merkle-airdrop/Cargo.toml rename to pallets/multisig/Cargo.toml index 3c88c4bb..0d768485 100644 --- a/pallets/merkle-airdrop/Cargo.toml +++ b/pallets/multisig/Cargo.toml @@ -1,58 +1,55 @@ [package] authors.workspace = true -description = "A pallet for distributing tokens via Merkle proofs" +description = "Multisig pallet for Quantus" edition.workspace = true homepage.workspace = true license = "MIT-0" -name = "pallet-merkle-airdrop" -publish = false +name = "pallet-multisig" repository.workspace = true -version = "0.1.0" +version = "1.0.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -binary-merkle-tree.workspace = true -codec = { workspace = true, default-features = false, features = ["derive"] } +codec = { features = ["derive", "max-encoded-len"], workspace = true } frame-benchmarking = { optional = true, workspace = true } frame-support.workspace = true frame-system.workspace = true log.workspace = true -pallet-vesting = { workspace = true, optional = true } -scale-info = { workspace = true, default-features = false, features = ["derive"] } -sha2.workspace = true +pallet-balances.workspace = true +scale-info = { features = ["derive"], workspace = true } +sp-arithmetic.workspace = true sp-core.workspace = true sp-io.workspace = true sp-runtime.workspace = true [dev-dependencies] -pallet-balances.features = ["std"] -pallet-balances.workspace = true -pallet-vesting.workspace = true +frame-support = { workspace = true, features = ["experimental"], default-features = true } +pallet-balances = { workspace = true, features = ["std"] } +pallet-timestamp.workspace = true sp-core.workspace = true sp-io.workspace = true -sp-runtime.workspace = true [features] default = ["std"] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", + "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-vesting", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] std = [ - "binary-merkle-tree/std", "codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", "pallet-balances/std", - "pallet-vesting?/std", + "pallet-timestamp/std", "scale-info/std", - "sha2/std", + "sp-arithmetic/std", "sp-core/std", "sp-io/std", "sp-runtime/std", @@ -60,4 +57,5 @@ std = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/pallets/multisig/README.md b/pallets/multisig/README.md new file mode 100644 index 00000000..a7853087 --- /dev/null +++ b/pallets/multisig/README.md @@ -0,0 +1,512 @@ +# Multisig Pallet + +A multisignature wallet pallet for the Quantus blockchain with an economic security model. + +## Overview + +This pallet provides functionality for creating and managing multisig accounts that require multiple approvals before executing transactions. It implements a dual fee+deposit system for spam prevention and storage cleanup mechanisms with grace periods. + +## Quick Start + +Basic workflow for using a multisig: + +```rust +// 1. Create a 2-of-3 multisig (Alice creates, Bob/Charlie/Dave are signers) +Multisig::create_multisig(Origin::signed(alice), vec![bob, charlie, dave], 2); +let multisig_addr = Multisig::derive_multisig_address(&[bob, charlie, dave], 0); + +// 2. Bob proposes a transaction +let call = RuntimeCall::Balances(pallet_balances::Call::transfer { dest: eve, value: 100 }); +Multisig::propose(Origin::signed(bob), multisig_addr, call.encode(), expiry_block); + +// 3. Charlie approves - transaction executes automatically (2/2 threshold reached) +Multisig::approve(Origin::signed(charlie), multisig_addr, proposal_id); +// ✅ Transaction executed! No separate call needed. +``` + +**Key Point:** Once the threshold is reached, the transaction is **automatically executed**. +There is no separate `execute()` call exposed to users. + +## Core Functionality + +### 1. Create Multisig +Creates a new multisig account with deterministic address generation. + +**Required Parameters:** +- `signers: Vec` - List of authorized signers (REQUIRED, 1 to MaxSigners) +- `threshold: u32` - Number of approvals needed (REQUIRED, 1 ≤ threshold ≤ signers.len()) + +**Validation:** +- No duplicate signers +- Threshold must be > 0 +- Threshold cannot exceed number of signers +- Signers count must be ≤ MaxSigners + +**Important:** Signers are automatically sorted before storing and address generation. Order doesn't matter: +- `[alice, bob, charlie]` → sorted to `[alice, bob, charlie]` → `address_1` +- `[charlie, bob, alice]` → sorted to `[alice, bob, charlie]` → `address_1` (same!) +- To create multiple multisigs with same signers, the nonce provides uniqueness + +**Economic Costs:** +- **MultisigFee**: Non-refundable fee (spam prevention) → burned +- **MultisigDeposit**: Refundable deposit (storage rent) → returned when multisig dissolved + +### 2. Propose Transaction +Creates a new proposal for multisig execution. + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig account (REQUIRED) +- `call: Vec` - Encoded RuntimeCall to execute (REQUIRED, max MaxCallSize bytes) +- `expiry: BlockNumber` - Deadline for collecting approvals (REQUIRED) + +**Validation:** +- Caller must be a signer +- Call size must be ≤ MaxCallSize +- Multisig cannot have MaxTotalProposalsInStorage or more total proposals in storage +- Caller cannot exceed their per-signer proposal limit (`MaxTotalProposalsInStorage / signers_count`) +- Expiry must be in the future (expiry > current_block) +- Expiry must not exceed MaxExpiryDuration blocks from now (expiry ≤ current_block + MaxExpiryDuration) + +**Auto-Cleanup Before Creation:** +Before creating a new proposal, the system **automatically removes all expired Active proposals** for this multisig: +- Expired proposals are identified (current_block > expiry) +- Deposits are returned to original proposers +- Storage is cleaned up +- Counters are decremented +- Events are emitted for each removed proposal + +This ensures storage is kept clean and users get their deposits back without manual intervention. + +**Threshold=1 Auto-Execution:** +If the multisig has `threshold=1`, the proposal **executes immediately** after creation: +- Proposer's approval counts as the first (and only required) approval +- Call is dispatched automatically +- Proposal is removed from storage immediately +- Deposit is returned to proposer immediately +- No separate `approve()` call needed + +**Economic Costs:** +- **ProposalFee**: Non-refundable fee (spam prevention, scaled by signer count) → burned +- **ProposalDeposit**: Refundable deposit (storage rent) → returned when proposal removed + +**Important:** Fee is ALWAYS paid, even if proposal expires or is cancelled. Only deposit is refundable. + +### 3. Approve Transaction +Adds caller's approval to an existing proposal. **If this approval brings the total approvals +to or above the threshold, the transaction will be automatically executed and immediately removed from storage.** + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig (REQUIRED) +- `proposal_id: u32` - ID (nonce) of the proposal to approve (REQUIRED) + +**Validation:** +- Caller must be a signer +- Proposal must exist +- Proposal must not be expired (current_block ≤ expiry) +- Caller must not have already approved + +**Auto-Execution:** +When approval count reaches the threshold: +- Encoded call is executed as multisig_address origin +- Proposal **immediately removed** from storage +- ProposalDeposit **immediately returned** to proposer +- TransactionExecuted event emitted with execution result + +**Economic Costs:** None (deposit immediately returned on execution) + +### 4. Cancel Transaction +Cancels a proposal and immediately removes it from storage (proposer only). + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig (REQUIRED) +- `proposal_id: u32` - ID (nonce) of the proposal to cancel (REQUIRED) + +**Validation:** +- Caller must be the proposer +- Proposal must exist and be Active + +**Economic Effects:** +- Proposal **immediately removed** from storage +- ProposalDeposit **immediately returned** to proposer +- Counters decremented + +**Economic Costs:** None (deposit immediately returned) + +**Note:** ProposalFee is NOT refunded - it was burned at proposal creation. + +### 5. Remove Expired +Manually removes expired proposals from storage. Only signers can call this. + +**Important:** This is rarely needed because expired proposals are automatically cleaned up on any multisig activity (`propose()`, `approve()`, `cancel()`). + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig (REQUIRED) +- `proposal_id: u32` - ID (nonce) of the expired proposal (REQUIRED) + +**Validation:** +- Caller must be a signer of the multisig +- Proposal must exist and be Active +- Must be expired (current_block > expiry) + +**Note:** Executed/Cancelled proposals are automatically removed immediately, so this only applies to Active+Expired proposals. + +**Economic Effects:** +- ProposalDeposit returned to **original proposer** (not caller) +- Proposal removed from storage +- Counters decremented + +**Economic Costs:** None (deposit always returned to proposer) + +**Auto-Cleanup:** ALL expired proposals are automatically removed on any multisig activity (`propose()`, `approve()`, `cancel()`), making this function often unnecessary. + +### 6. Claim Deposits +Batch cleanup operation to recover all expired proposal deposits. + +**Important:** This is rarely needed because expired proposals are automatically cleaned up on any multisig activity (`propose()`, `approve()`, `cancel()`). + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig (REQUIRED) + +**Validation:** +- Only cleans proposals where caller is proposer +- Only removes Active+Expired proposals (Executed/Cancelled already auto-removed) +- Must be expired (current_block > expiry) + +**Economic Effects:** +- Returns all eligible proposal deposits to caller +- Removes all expired proposals from storage +- Counters decremented + +**Economic Costs:** None (only returns deposits) + +**Auto-Cleanup:** ALL expired proposals are automatically removed on any multisig activity (`propose()`, `approve()`, `cancel()`), making this function often unnecessary. + +### 7. Dissolve Multisig +Permanently removes a multisig and returns the creation deposit to the original creator. + +**Required Parameters:** +- `multisig_address: AccountId` - Target multisig (REQUIRED) + +**Pre-conditions:** +- NO proposals can exist (any status) +- Multisig balance MUST be zero +- Caller must be creator OR any signer + +**Post-conditions:** +- MultisigDeposit returned to **original creator** (not caller) +- Multisig removed from storage +- Cannot be used after dissolution + +**Economic Costs:** None (returns MultisigDeposit) + +**Important:** MultisigFee is NEVER returned - only the MultisigDeposit. + +## Use Cases + +**Payroll Multisig (transfers only):** +```rust +// Only allow keep_alive transfers to prevent account deletion +matches!(call, RuntimeCall::Balances(Call::transfer_keep_alive { .. })) +``` + +**Treasury Multisig (governance + transfers):** +```rust +matches!(call, + RuntimeCall::Balances(Call::transfer_keep_alive { .. }) | + RuntimeCall::Scheduler(Call::schedule { .. }) | // Time-locked ops + RuntimeCall::Democracy(Call::veto { .. }) // Emergency stops +) +``` + +## Economic Model + +### Fees (Non-refundable, burned) +**Purpose:** Spam prevention and deflationary pressure + +- **MultisigFee**: + - Charged on multisig creation + - Burned immediately (reduces total supply) + - **Never returned** (even if multisig dissolved) + - Creates economic barrier to prevent spam multisig creation + +- **ProposalFee**: + - Charged on proposal creation + - **Dynamically scaled** by signer count: `BaseFee × (1 + SignerCount × StepFactor)` + - Burned immediately (reduces total supply) + - **Never returned** (even if proposal expires or is cancelled) + - Makes spam expensive, scales cost with multisig complexity + +**Why burned (not sent to treasury)?** +- Creates deflationary pressure on token supply +- Simpler implementation (no treasury dependency) +- Spam attacks reduce circulating supply +- Lower transaction costs (withdraw vs transfer) + +### Deposits (Refundable, locked as storage rent) +**Purpose:** Compensate for on-chain storage, incentivize cleanup + +- **MultisigDeposit**: + - Reserved on multisig creation + - Returned when multisig dissolved (via `dissolve_multisig`) + - Locked until no proposals exist and balance is zero + - Opportunity cost incentivizes cleanup + +- **ProposalDeposit**: + - Reserved on proposal creation + - **Auto-Returned Immediately:** + - When proposal executed (threshold reached) + - When proposal cancelled (proposer cancels) + - **Auto-Cleanup:** ALL expired proposals are automatically removed on ANY multisig activity + - Triggered by: `propose()`, `approve()`, `cancel()` + - Deposits returned to original proposers + - No manual cleanup needed for active multisigs + - **Manual Cleanup:** Only needed for inactive multisigs via `remove_expired()` or `claim_deposits()` + +### Storage Limits & Configuration +**Purpose:** Prevent unbounded storage growth and resource exhaustion + +- **MaxSigners**: Maximum signers per multisig + - Trade-off: Higher → more flexible governance, more computation per approval + +- **MaxTotalProposalsInStorage**: Maximum total proposals (Active + Executed + Cancelled) + - Trade-off: Higher → more flexible, more storage risk + - Forces periodic cleanup to continue operating + - **Auto-cleanup**: Expired proposals are automatically removed when new proposals are created + - **Per-Signer Limit**: Each signer gets `MaxTotalProposalsInStorage / signers_count` quota + - Prevents single signer from monopolizing storage (filibuster protection) + - Fair allocation ensures all signers can participate + - Example: 20 total, 5 signers → 4 proposals max per signer + +- **MaxCallSize**: Maximum encoded call size in bytes + - Trade-off: Larger → more flexibility, more storage per proposal + - Should accommodate common operations (transfers, staking, governance) + +- **MaxExpiryDuration**: Maximum blocks in the future for proposal expiry + - Trade-off: Shorter → faster turnover, may not suit slow decision-making + - Prevents infinite-duration deposit locks + - Should exceed typical multisig decision timeframes + +**Configuration values are runtime-specific.** See runtime config for production values. + +## Storage + +### Multisigs: Map +Stores multisig account data: +```rust +MultisigData { + signers: BoundedVec, // List of authorized signers + threshold: u32, // Required approvals + nonce: u64, // Unique identifier used in address generation + deposit: Balance, // Reserved deposit (refundable) + creator: AccountId, // Who created it (receives deposit back) + last_activity: BlockNumber, // Last action timestamp (for grace period) + active_proposals: u32, // Count of open proposals (monitoring/analytics) + proposals_per_signer: BoundedBTreeMap, // Per-signer proposal count (filibuster protection) +} +``` + +### Proposals: DoubleMap +Stores proposal data indexed by (multisig_address, proposal_id): +```rust +ProposalData { + proposer: AccountId, // Who proposed (receives deposit back) + call: BoundedVec, // Encoded RuntimeCall to execute + expiry: BlockNumber, // Deadline for approvals + approvals: BoundedVec, // List of signers who approved + deposit: Balance, // Reserved deposit (refundable) + status: ProposalStatus, // Active only (Executed/Cancelled are removed immediately) +} +``` + +**Important:** Only **Active** proposals are stored. Executed and Cancelled proposals are **immediately removed** from storage and their deposits are returned. Historical data is available through events (see Historical Data section below). + +### GlobalNonce: u64 +Internal counter for generating unique multisig addresses. Not exposed via API. + +## Events + +- `MultisigCreated { creator, multisig_address, signers, threshold, nonce }` +- `ProposalCreated { multisig_address, proposer, proposal_id }` +- `ProposalApproved { multisig_address, approver, proposal_id, approvals_count }` +- `ProposalExecuted { multisig_address, proposal_id, proposer, call, approvers, result }` +- `ProposalCancelled { multisig_address, proposer, proposal_id }` +- `ProposalRemoved { multisig_address, proposal_id, proposer, removed_by }` +- `DepositsClaimed { multisig_address, claimer, total_returned, proposals_removed, multisig_removed }` +- `MultisigDissolved { multisig_address, caller, deposit_returned }` + +## Errors + +- `NotEnoughSigners` - Less than 1 signer provided +- `ThresholdZero` - Threshold cannot be 0 +- `ThresholdTooHigh` - Threshold exceeds number of signers +- `TooManySigners` - Exceeds MaxSigners limit +- `DuplicateSigner` - Duplicate address in signers list +- `MultisigAlreadyExists` - Multisig with this address already exists +- `MultisigNotFound` - Multisig does not exist +- `NotASigner` - Caller is not authorized signer +- `ProposalNotFound` - Proposal does not exist +- `NotProposer` - Caller is not the proposer (for cancel) +- `AlreadyApproved` - Signer already approved this proposal +- `NotEnoughApprovals` - Threshold not met (internal error, should not occur) +- `ExpiryInPast` - Proposal expiry is not in the future (for propose) +- `ExpiryTooFar` - Proposal expiry exceeds MaxExpiryDuration (for propose) +- `ProposalExpired` - Proposal deadline passed (for approve) +- `CallTooLarge` - Encoded call exceeds MaxCallSize +- `InvalidCall` - Call decoding failed during execution +- `InsufficientBalance` - Not enough funds for fee/deposit +- `TooManyProposalsInStorage` - Multisig has MaxTotalProposalsInStorage total proposals (cleanup required to create new) +- `TooManyProposalsPerSigner` - Caller has reached their per-signer proposal limit (`MaxTotalProposalsInStorage / signers_count`) +- `ProposalNotExpired` - Proposal not yet expired (for remove_expired) +- `ProposalNotActive` - Proposal is not active (already executed or cancelled) + +## Important Behavior + +### Simple Proposal IDs (Not Hashes) +Proposals are identified by a simple **nonce (u32)** instead of a hash: +- **More efficient:** 4 bytes instead of 32 bytes (Blake2_256 hash) +- **Simpler:** No need to hash `(call, nonce)`, just use nonce directly +- **Better UX:** Sequential IDs (0, 1, 2...) easier to read than random hashes +- **Easier queries:** Can iterate proposals by ID without needing call data + +**Example:** +```rust +propose(...) // → proposal_id: 0 +propose(...) // → proposal_id: 1 +propose(...) // → proposal_id: 2 + +// Approve by ID (not hash) +approve(multisig, 1) // Approve proposal #1 +``` + +### Signer Order Doesn't Matter +Signers are **automatically sorted** before address generation and storage: +- Input order is irrelevant - signers are always sorted deterministically +- Address is derived from `Hash(PalletId + sorted_signers + nonce)` +- Same signers in any order = same multisig address (with same nonce) +- To create multiple multisigs with same participants, use different creation transactions (nonce auto-increments) + +**Example:** +```rust +// These create the SAME multisig address (same signers, same nonce): +create_multisig([alice, bob, charlie], 2) // → multisig_addr_1 (nonce=0) +create_multisig([charlie, bob, alice], 2) // → multisig_addr_1 (SAME! nonce would be 1 but already exists) + +// To create another multisig with same signers: +create_multisig([alice, bob, charlie], 2) // → multisig_addr_2 (nonce=1, different address) +``` + +## Historical Data and Event Indexing + +The pallet does **not** maintain on-chain storage of executed proposal history. Instead, all historical data is available through **blockchain events**, which are designed to be efficiently indexed by off-chain indexers like **SubSquid**. + +### ProposalExecuted Event + +When a proposal is successfully executed, the pallet emits a comprehensive `ProposalExecuted` event containing all relevant data: + +```rust +Event::ProposalExecuted { + multisig_address: T::AccountId, // The multisig that executed + proposal_id: u32, // ID (nonce) of the proposal + proposer: T::AccountId, // Who originally proposed it + call: Vec, // The encoded call that was executed + approvers: Vec, // All accounts that approved + result: DispatchResult, // Whether execution succeeded or failed +} +``` + +### Indexing with SubSquid + +This event structure is optimized for indexing by SubSquid and similar indexers: +- **Complete data**: All information needed to reconstruct the full proposal history +- **Queryable**: Indexers can efficiently query by multisig address, proposer, approvers, etc. +- **Execution result**: Both successful and failed executions are recorded +- **No storage bloat**: Events don't consume on-chain storage long-term + +**All events** for complete history: +- `MultisigCreated` - When a multisig is created +- `ProposalCreated` - When a proposal is submitted +- `ProposalApproved` - Each time someone approves (includes current approval count) +- `ProposalExecuted` - When a proposal is executed (includes full execution details) +- `ProposalCancelled` - When a proposal is cancelled by proposer +- `ProposalRemoved` - When a proposal is removed from storage (deposits returned) +- `DepositsClaimed` - Batch removal of multiple proposals + +### Benefits of Event-Based History + +- ✅ **No storage costs**: Events don't occupy chain storage after archival +- ✅ **Complete history**: All actions are recorded permanently in events +- ✅ **Efficient querying**: Off-chain indexers provide fast, flexible queries +- ✅ **No DoS risk**: No on-chain iteration over unbounded storage +- ✅ **Standard practice**: Follows Substrate best practices for historical data + +## Security Considerations + +### Spam Prevention +- Fees (non-refundable, burned) prevent proposal spam +- Deposits (refundable) prevent storage bloat +- MaxTotalProposalsInStorage caps total storage per multisig +- Per-signer limits prevent single signer from monopolizing storage (filibuster protection) +- Auto-cleanup of expired proposals reduces storage pressure + +### Storage Cleanup +- Grace period allows proposers priority cleanup +- After grace: public cleanup incentivized +- Batch cleanup via claim_deposits for efficiency + +### Economic Attacks +- **Multisig Spam:** Costs MultisigFee (burned, reduces supply) + - No refund even if never used + - Economic barrier to creation spam +- **Proposal Spam:** Costs ProposalFee (burned, reduces supply) + ProposalDeposit (locked) + - Fee never returned (even if expired/cancelled) + - Deposit locked until cleanup + - Cost scales with multisig size (dynamic pricing) +- **Filibuster Attack (Single Signer Monopolization):** + - **Attack:** One signer tries to fill entire proposal queue + - **Defense:** Per-signer limit caps each at `MaxTotalProposalsInStorage / signers_count` + - **Effect:** Other signers retain their fair quota + - **Cost:** Attacker still pays fees for their proposals (burned) +- **Result:** Spam attempts reduce circulating supply +- **No global limits:** Only per-multisig limits (decentralized resistance) + +### Call Execution +- Calls execute with multisig_address as origin +- Multisig can call ANY pallet (including recursive multisig calls) +- Call validation happens at execution time +- Failed calls emit event with error but don't revert proposal removal + +## Configuration Example + + +```rust +impl pallet_multisig::Config for Runtime { + type RuntimeCall = RuntimeCall; + type Currency = Balances; + + // Storage limits (prevent unbounded growth) + type MaxSigners = ConstU32<100>; // Max complexity + type MaxTotalProposalsInStorage = ConstU32<200>; // Total storage cap (auto-cleanup on propose) + type MaxCallSize = ConstU32<10240>; // Per-proposal storage limit + type MaxExpiryDuration = ConstU32<100_800>; // Max proposal lifetime (~2 weeks @ 12s) + + // Economic parameters (example values - adjust per runtime) + type MultisigFee = ConstU128<{ 100 * MILLI_UNIT }>; // Creation barrier + type MultisigDeposit = ConstU128<{ 500 * MILLI_UNIT }>; // Storage rent + type ProposalFee = ConstU128<{ 1000 * MILLI_UNIT }>; // Base proposal cost + type ProposalDeposit = ConstU128<{ 1000 * MILLI_UNIT }>; // Cleanup incentive + type SignerStepFactor = Permill::from_percent(1); // Dynamic pricing (1% per signer) + + type PalletId = ConstPalletId(*b"py/mltsg"); + type WeightInfo = pallet_multisig::weights::SubstrateWeight; +} +``` + +**Parameter Selection Considerations:** +- **High-value chains:** Lower fees, higher deposits, tighter limits +- **Low-value chains:** Higher fees (maintain spam protection), lower deposits +- **Enterprise use:** Higher MaxSigners, longer MaxExpiryDuration +- **Public use:** Moderate limits, shorter expiry for faster turnover + +## License + +MIT-0 diff --git a/pallets/multisig/src/benchmarking.rs b/pallets/multisig/src/benchmarking.rs new file mode 100644 index 00000000..03db467f --- /dev/null +++ b/pallets/multisig/src/benchmarking.rs @@ -0,0 +1,569 @@ +//! Benchmarking setup for pallet-multisig + +use super::*; +use crate::Pallet as Multisig; +use alloc::vec; +use frame_benchmarking::{account as benchmark_account, v2::*, BenchmarkError}; +use frame_support::traits::{fungible::Mutate, ReservableCurrency}; +use frame_system::RawOrigin; + +const SEED: u32 = 0; + +// Helper to fund an account +type BalanceOf2 = ::Balance; + +fn fund_account(account: &T::AccountId, amount: BalanceOf2) +where + T: Config + pallet_balances::Config, +{ + let _ = as Mutate>::mint_into( + account, + amount * as frame_support::traits::Currency>::minimum_balance(), + ); +} + +#[benchmarks( + where + T: Config + pallet_balances::Config, + BalanceOf2: From, +)] +mod benchmarks { + use super::*; + use codec::Encode; + + #[benchmark] + fn create_multisig() -> Result<(), BenchmarkError> { + let caller: T::AccountId = whitelisted_caller(); + + // Fund the caller with enough balance for deposit + fund_account::(&caller, BalanceOf2::::from(10000u128)); + + // Create signers (including caller) + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + let signers = vec![caller.clone(), signer1, signer2]; + let threshold = 2u32; + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), signers.clone(), threshold); + + // Verify the multisig was created + // Note: signers are sorted internally, so we must sort for address derivation + let mut sorted_signers = signers.clone(); + sorted_signers.sort(); + let multisig_address = Multisig::::derive_multisig_address(&sorted_signers, 0); + assert!(Multisigs::::contains_key(multisig_address)); + + Ok(()) + } + + #[benchmark] + fn propose( + c: Linear<0, { T::MaxCallSize::get().saturating_sub(100) }>, + e: Linear<0, { T::MaxTotalProposalsInStorage::get() }>, // expired proposals to cleanup + ) -> Result<(), BenchmarkError> { + // Setup: Create a multisig first + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(100000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + fund_account::(&signer1, BalanceOf2::::from(100000u128)); + fund_account::(&signer2, BalanceOf2::::from(100000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + signers.sort(); + + // Create multisig directly in storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: e, // We'll insert e expired proposals + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: frame_system::Pallet::::block_number(), + active_proposals: e, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Insert e expired proposals (worst case for auto-cleanup) + let expired_block = 10u32.into(); + for i in 0..e { + let system_call = frame_system::Call::::remark { remark: vec![i as u8; 10] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let bounded_call: BoundedCallOf = encoded_call.try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry: expired_block, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + Proposals::::insert(&multisig_address, i, proposal_data); + } + + // Move past expiry so proposals are expired + frame_system::Pallet::::set_block_number(100u32.into()); + + // Create a new proposal (will auto-cleanup all e expired proposals) + let system_call = frame_system::Call::::remark { remark: vec![99u8; c as usize] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let expiry = frame_system::Pallet::::block_number() + 1000u32.into(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), multisig_address.clone(), encoded_call, expiry); + + // Verify new proposal was created and expired ones were cleaned + let multisig = Multisigs::::get(&multisig_address).unwrap(); + assert_eq!(multisig.active_proposals, 1); // Only new proposal remains + + Ok(()) + } + + #[benchmark] + fn approve( + c: Linear<0, { T::MaxCallSize::get().saturating_sub(100) }>, + e: Linear<0, { T::MaxTotalProposalsInStorage::get() }>, // expired proposals to cleanup + ) -> Result<(), BenchmarkError> { + // Setup: Create multisig and proposal directly in storage + // Threshold is 3, so adding one more approval won't trigger execution + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(100000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + let signer3: T::AccountId = benchmark_account("signer3", 2, SEED); + fund_account::(&signer1, BalanceOf2::::from(100000u128)); + fund_account::(&signer2, BalanceOf2::::from(100000u128)); + fund_account::(&signer3, BalanceOf2::::from(100000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone(), signer3.clone()]; + let threshold = 3u32; // Need 3 approvals + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: e + 1, // We'll insert e expired proposals + 1 active + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: frame_system::Pallet::::block_number(), + active_proposals: e + 1, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Insert e expired proposals (worst case for auto-cleanup) + let expired_block = 10u32.into(); + for i in 0..e { + let system_call = frame_system::Call::::remark { remark: vec![i as u8; 10] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let bounded_call: BoundedCallOf = encoded_call.try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry: expired_block, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + Proposals::::insert(&multisig_address, i, proposal_data); + } + + // Move past expiry so proposals are expired + frame_system::Pallet::::set_block_number(100u32.into()); + + // Directly insert active proposal into storage with 1 approval + // Create a remark call where the remark itself is c bytes + let system_call = frame_system::Call::::remark { remark: vec![1u8; c as usize] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let expiry = frame_system::Pallet::::block_number() + 1000u32.into(); + let bounded_call: BoundedCallOf = encoded_call.clone().try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + + let proposal_id = e; // Active proposal after expired ones + Proposals::::insert(&multisig_address, proposal_id, proposal_data); + + #[extrinsic_call] + _(RawOrigin::Signed(signer1.clone()), multisig_address.clone(), proposal_id); + + // Verify approval was added (now 2/3, not executed yet) + let proposal = Proposals::::get(&multisig_address, proposal_id).unwrap(); + assert!(proposal.approvals.contains(&signer1)); + assert_eq!(proposal.approvals.len(), 2); + + Ok(()) + } + + #[benchmark] + fn approve_and_execute( + c: Linear<0, { T::MaxCallSize::get().saturating_sub(100) }>, + ) -> Result<(), BenchmarkError> { + // Benchmarks approve() when it triggers auto-execution (threshold reached) + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(10000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + fund_account::(&signer1, BalanceOf2::::from(10000u128)); + fund_account::(&signer2, BalanceOf2::::from(10000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: 1, // We'll insert proposal with id 0 + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: frame_system::Pallet::::block_number(), + active_proposals: 1, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Directly insert proposal with 1 approval (caller already approved) + // signer2 will approve and trigger execution + // Create a remark call where the remark itself is c bytes + let system_call = frame_system::Call::::remark { remark: vec![1u8; c as usize] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let expiry = frame_system::Pallet::::block_number() + 1000u32.into(); + let bounded_call: BoundedCallOf = encoded_call.clone().try_into().unwrap(); + // Only 1 approval so far + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + + let proposal_id = 0u32; + Proposals::::insert(&multisig_address, proposal_id, proposal_data); + + // signer2 approves, reaching threshold (2/2), triggering auto-execution + #[extrinsic_call] + approve(RawOrigin::Signed(signer2.clone()), multisig_address.clone(), proposal_id); + + // Verify proposal was removed from storage (auto-deleted after execution) + assert!(!Proposals::::contains_key(&multisig_address, proposal_id)); + + Ok(()) + } + + #[benchmark] + fn cancel( + c: Linear<0, { T::MaxCallSize::get().saturating_sub(100) }>, + e: Linear<0, { T::MaxTotalProposalsInStorage::get() }>, // expired proposals to cleanup + ) -> Result<(), BenchmarkError> { + // Setup: Create multisig and proposal directly in storage + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(100000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + fund_account::(&signer1, BalanceOf2::::from(100000u128)); + fund_account::(&signer2, BalanceOf2::::from(100000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: e + 1, // We'll insert e expired proposals + 1 active + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: frame_system::Pallet::::block_number(), + active_proposals: e + 1, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Insert e expired proposals (worst case for auto-cleanup) + let expired_block = 10u32.into(); + for i in 0..e { + let system_call = frame_system::Call::::remark { remark: vec![i as u8; 10] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let bounded_call: BoundedCallOf = encoded_call.try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry: expired_block, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + Proposals::::insert(&multisig_address, i, proposal_data); + } + + // Move past expiry so proposals are expired + frame_system::Pallet::::set_block_number(100u32.into()); + + // Directly insert active proposal into storage + // Create a remark call where the remark itself is c bytes + let system_call = frame_system::Call::::remark { remark: vec![1u8; c as usize] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let expiry = frame_system::Pallet::::block_number() + 1000u32.into(); + let bounded_call: BoundedCallOf = encoded_call.clone().try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + + let proposal_id = e; // Active proposal after expired ones + Proposals::::insert(&multisig_address, proposal_id, proposal_data); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), multisig_address.clone(), proposal_id); + + // Verify proposal was removed from storage (auto-deleted after cancellation) + assert!(!Proposals::::contains_key(&multisig_address, proposal_id)); + + Ok(()) + } + + #[benchmark] + fn remove_expired() -> Result<(), BenchmarkError> { + // Setup: Create multisig and expired proposal directly in storage + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(10000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + fund_account::(&signer1, BalanceOf2::::from(10000u128)); + fund_account::(&signer2, BalanceOf2::::from(10000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: 1, // We'll insert proposal with id 0 + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: 1u32.into(), + active_proposals: 1, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Create proposal with expired timestamp + let system_call = frame_system::Call::::remark { remark: vec![1u8; 32] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let expiry = 10u32.into(); // Already expired + let bounded_call: BoundedCallOf = encoded_call.clone().try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + + let proposal_id = 0u32; + Proposals::::insert(&multisig_address, proposal_id, proposal_data); + + // Move past expiry + frame_system::Pallet::::set_block_number(100u32.into()); + + // Call as signer (caller is one of signers) + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), multisig_address.clone(), proposal_id); + + // Verify proposal was removed + assert!(!Proposals::::contains_key(&multisig_address, proposal_id)); + + Ok(()) + } + + #[benchmark] + fn claim_deposits( + p: Linear<1, { T::MaxTotalProposalsInStorage::get() }>, /* number of expired proposals + * to cleanup */ + ) -> Result<(), BenchmarkError> { + // Setup: Create multisig with multiple expired proposals directly in storage + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(100000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + fund_account::(&signer1, BalanceOf2::::from(10000u128)); + fund_account::(&signer2, BalanceOf2::::from(10000u128)); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: p, // We'll insert p proposals with ids 0..p-1 + creator: caller.clone(), + deposit: T::MultisigDeposit::get(), + last_activity: 1u32.into(), + active_proposals: p, + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Create multiple expired proposals directly in storage + let expiry = 10u32.into(); // Already expired + + for i in 0..p { + let system_call = frame_system::Call::::remark { remark: vec![i as u8; 32] }; + let call = ::RuntimeCall::from(system_call); + let encoded_call = call.encode(); + let bounded_call: BoundedCallOf = encoded_call.clone().try_into().unwrap(); + let bounded_approvals: BoundedApprovalsOf = vec![caller.clone()].try_into().unwrap(); + + let proposal_data = ProposalDataOf:: { + proposer: caller.clone(), + call: bounded_call, + expiry, + approvals: bounded_approvals, + deposit: 10u32.into(), + status: ProposalStatus::Active, + }; + + Proposals::::insert(&multisig_address, i, proposal_data); + } + + // Move past expiry + frame_system::Pallet::::set_block_number(100u32.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), multisig_address.clone()); + + // Verify all expired proposals were cleaned up + assert_eq!(Proposals::::iter_key_prefix(&multisig_address).count(), 0); + + Ok(()) + } + + #[benchmark] + fn dissolve_multisig() -> Result<(), BenchmarkError> { + // Setup: Create a clean multisig (no proposals, zero balance) + let caller: T::AccountId = whitelisted_caller(); + fund_account::(&caller, BalanceOf2::::from(10000u128)); + + let signer1: T::AccountId = benchmark_account("signer1", 0, SEED); + let signer2: T::AccountId = benchmark_account("signer2", 1, SEED); + + let mut signers = vec![caller.clone(), signer1.clone(), signer2.clone()]; + let threshold = 2u32; + + // Sort signers to match create_multisig behavior + signers.sort(); + + // Directly insert multisig into storage + let multisig_address = Multisig::::derive_multisig_address(&signers, 0); + let bounded_signers: BoundedSignersOf = signers.clone().try_into().unwrap(); + let deposit = T::MultisigDeposit::get(); + + // Reserve deposit from caller + T::Currency::reserve(&caller, deposit)?; + + let multisig_data = MultisigDataOf:: { + signers: bounded_signers, + threshold, + nonce: 0, + proposal_nonce: 0, + creator: caller.clone(), + deposit, + last_activity: frame_system::Pallet::::block_number(), + active_proposals: 0, // No proposals + proposals_per_signer: BoundedBTreeMap::new(), + }; + Multisigs::::insert(&multisig_address, multisig_data); + + // Ensure multisig address has zero balance (required for dissolution) + // Don't fund it at all + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), multisig_address.clone()); + + // Verify multisig was removed + assert!(!Multisigs::::contains_key(&multisig_address)); + + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/pallets/multisig/src/lib.rs b/pallets/multisig/src/lib.rs new file mode 100644 index 00000000..3befe0fc --- /dev/null +++ b/pallets/multisig/src/lib.rs @@ -0,0 +1,1150 @@ +//! # Quantus Multisig Pallet +//! +//! This pallet provides multisignature functionality for managing shared accounts +//! that require multiple approvals before executing transactions. +//! +//! ## Features +//! +//! - Create multisig addresses with configurable thresholds +//! - Propose transactions for multisig approval +//! - Approve proposed transactions +//! - Execute transactions once threshold is reached +//! +//! ## Data Structures +//! +//! - **Multisig**: Contains signers, threshold, and global nonce +//! - **Proposal**: Contains transaction data, proposer, expiry, and approvals + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +use alloc::vec::Vec; +pub use pallet::*; +pub use weights::*; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +pub mod weights; + +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{traits::Get, BoundedBTreeMap, BoundedVec}; +use scale_info::TypeInfo; +use sp_runtime::RuntimeDebug; + +/// Multisig account data +#[derive(Encode, Decode, MaxEncodedLen, Clone, TypeInfo, RuntimeDebug, PartialEq, Eq)] +pub struct MultisigData +{ + /// List of signers who can approve transactions + pub signers: BoundedSigners, + /// Number of approvals required to execute a transaction + pub threshold: u32, + /// Global unique identifier for this multisig (for address derivation) + pub nonce: u64, + /// Proposal counter for unique proposal hashes + pub proposal_nonce: u32, + /// Account that created this multisig + pub creator: AccountId, + /// Deposit reserved by the creator + pub deposit: Balance, + /// Last block when this multisig was used + pub last_activity: BlockNumber, + /// Number of currently active (non-executed/non-cancelled) proposals + pub active_proposals: u32, + /// Counter of proposals in storage per signer (for filibuster protection) + pub proposals_per_signer: BoundedProposalsPerSigner, +} + +impl< + BlockNumber: Default, + AccountId: Default, + BoundedSigners: Default, + Balance: Default, + BoundedProposalsPerSigner: Default, + > Default + for MultisigData +{ + fn default() -> Self { + Self { + signers: Default::default(), + threshold: 1, + nonce: 0, + proposal_nonce: 0, + creator: Default::default(), + deposit: Default::default(), + last_activity: Default::default(), + active_proposals: 0, + proposals_per_signer: Default::default(), + } + } +} + +/// Proposal status +#[derive(Encode, Decode, MaxEncodedLen, Clone, TypeInfo, RuntimeDebug, PartialEq, Eq)] +pub enum ProposalStatus { + /// Proposal is active and awaiting approvals + Active, + /// Proposal was executed successfully + Executed, + /// Proposal was cancelled by proposer + Cancelled, +} + +/// Proposal data +#[derive(Encode, Decode, MaxEncodedLen, Clone, TypeInfo, RuntimeDebug, PartialEq, Eq)] +pub struct ProposalData { + /// Account that proposed this transaction + pub proposer: AccountId, + /// The encoded call to be executed + pub call: BoundedCall, + /// Expiry block number + pub expiry: BlockNumber, + /// List of accounts that have approved this proposal + pub approvals: BoundedApprovals, + /// Deposit held for this proposal (returned only when proposal is removed) + pub deposit: Balance, + /// Current status of the proposal + pub status: ProposalStatus, +} + +/// Balance type +type BalanceOf = <::Currency as frame_support::traits::Currency< + ::AccountId, +>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use codec::Encode; + use frame_support::{ + dispatch::{ + DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, Pays, PostDispatchInfo, + }, + pallet_prelude::*, + traits::{Currency, ReservableCurrency}, + PalletId, + }; + use frame_system::pallet_prelude::*; + use sp_arithmetic::traits::Saturating; + use sp_runtime::{ + traits::{Dispatchable, Hash, TrailingZeroInput}, + Permill, + }; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config>> { + /// The overarching call type + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + codec::Decode; + + /// Currency type for handling deposits + type Currency: Currency + ReservableCurrency; + + /// Maximum number of signers allowed in a multisig + #[pallet::constant] + type MaxSigners: Get; + + /// Maximum total number of proposals in storage per multisig (Active + Executed + + /// Cancelled) This prevents unbounded storage growth and incentivizes cleanup + #[pallet::constant] + type MaxTotalProposalsInStorage: Get; + + /// Maximum size of an encoded call + #[pallet::constant] + type MaxCallSize: Get; + + /// Fee charged for creating a multisig (non-refundable, burned) + #[pallet::constant] + type MultisigFee: Get>; + + /// Deposit reserved for creating a multisig (returned when dissolved). + /// Keeps the state clean by incentivizing removal of unused multisigs. + #[pallet::constant] + type MultisigDeposit: Get>; + + /// Deposit required per proposal (returned on execute or cancel) + #[pallet::constant] + type ProposalDeposit: Get>; + + /// Fee charged for creating a proposal (non-refundable, paid always) + #[pallet::constant] + type ProposalFee: Get>; + + /// Percentage increase in ProposalFee for each signer in the multisig. + /// + /// Formula: `FinalFee = ProposalFee + (ProposalFee * SignerCount * SignerStepFactor)` + /// Example: If Fee=100, Signers=5, Factor=1%, then Extra = 100 * 5 * 0.01 = 5. Total = 105. + #[pallet::constant] + type SignerStepFactor: Get; + + /// Pallet ID for generating multisig addresses + #[pallet::constant] + type PalletId: Get; + + /// Maximum duration (in blocks) that a proposal can be set to expire in the future. + /// This prevents proposals from being created with extremely far expiry dates + /// that would lock deposits and bloat storage for extended periods. + /// + /// Example: If set to 100_000 blocks (~2 weeks at 12s blocks), + /// a proposal created at block 1000 cannot have expiry > 101_000. + #[pallet::constant] + type MaxExpiryDuration: Get>; + + /// Weight information for extrinsics + type WeightInfo: WeightInfo; + } + + /// Type alias for bounded signers vector + pub type BoundedSignersOf = + BoundedVec<::AccountId, ::MaxSigners>; + + /// Type alias for bounded approvals vector + pub type BoundedApprovalsOf = + BoundedVec<::AccountId, ::MaxSigners>; + + /// Type alias for bounded call data + pub type BoundedCallOf = BoundedVec::MaxCallSize>; + + /// Type alias for bounded proposals per signer map + pub type BoundedProposalsPerSignerOf = + BoundedBTreeMap<::AccountId, u32, ::MaxSigners>; + + /// Type alias for MultisigData with proper bounds + pub type MultisigDataOf = MultisigData< + BlockNumberFor, + ::AccountId, + BoundedSignersOf, + BalanceOf, + BoundedProposalsPerSignerOf, + >; + + /// Type alias for ProposalData with proper bounds + pub type ProposalDataOf = ProposalData< + ::AccountId, + BalanceOf, + BlockNumberFor, + BoundedCallOf, + BoundedApprovalsOf, + >; + + /// Global nonce for generating unique multisig addresses + #[pallet::storage] + pub type GlobalNonce = StorageValue<_, u64, ValueQuery>; + + /// Multisigs stored by their generated address + #[pallet::storage] + #[pallet::getter(fn multisigs)] + pub type Multisigs = + StorageMap<_, Blake2_128Concat, T::AccountId, MultisigDataOf, OptionQuery>; + + /// Proposals indexed by (multisig_address, proposal_nonce) + #[pallet::storage] + #[pallet::getter(fn proposals)] + pub type Proposals = StorageDoubleMap< + _, + Blake2_128Concat, + T::AccountId, + Twox64Concat, + u32, + ProposalDataOf, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new multisig account was created + /// [creator, multisig_address, signers, threshold, nonce] + MultisigCreated { + creator: T::AccountId, + multisig_address: T::AccountId, + signers: Vec, + threshold: u32, + nonce: u64, + }, + /// A proposal has been created + ProposalCreated { multisig_address: T::AccountId, proposer: T::AccountId, proposal_id: u32 }, + /// A proposal has been approved by a signer + ProposalApproved { + multisig_address: T::AccountId, + approver: T::AccountId, + proposal_id: u32, + approvals_count: u32, + }, + /// A proposal has been executed + /// Contains all data needed for indexing by SubSquid + ProposalExecuted { + multisig_address: T::AccountId, + proposal_id: u32, + proposer: T::AccountId, + call: Vec, + approvers: Vec, + result: DispatchResult, + }, + /// A proposal has been cancelled by the proposer + ProposalCancelled { + multisig_address: T::AccountId, + proposer: T::AccountId, + proposal_id: u32, + }, + /// Expired proposal was removed from storage + ProposalRemoved { + multisig_address: T::AccountId, + proposal_id: u32, + proposer: T::AccountId, + removed_by: T::AccountId, + }, + /// Batch deposits claimed + DepositsClaimed { + multisig_address: T::AccountId, + claimer: T::AccountId, + total_returned: BalanceOf, + proposals_removed: u32, + multisig_removed: bool, + }, + /// A multisig account was dissolved and deposit returned + MultisigDissolved { + multisig_address: T::AccountId, + caller: T::AccountId, + deposit_returned: BalanceOf, + }, + } + + #[pallet::error] + pub enum Error { + /// Not enough signers provided + NotEnoughSigners, + /// Threshold must be greater than zero + ThresholdZero, + /// Threshold exceeds number of signers + ThresholdTooHigh, + /// Too many signers + TooManySigners, + /// Duplicate signer in list + DuplicateSigner, + /// Multisig already exists + MultisigAlreadyExists, + /// Multisig not found + MultisigNotFound, + /// Caller is not a signer of this multisig + NotASigner, + /// Proposal not found + ProposalNotFound, + /// Caller is not the proposer + NotProposer, + /// Already approved by this signer + AlreadyApproved, + /// Not enough approvals to execute + NotEnoughApprovals, + /// Proposal expiry is in the past + ExpiryInPast, + /// Proposal expiry is too far in the future (exceeds MaxExpiryDuration) + ExpiryTooFar, + /// Proposal has expired + ProposalExpired, + /// Call data too large + CallTooLarge, + /// Failed to decode call data + InvalidCall, + /// Too many total proposals in storage for this multisig (cleanup required) + TooManyProposalsInStorage, + /// This signer has too many proposals in storage (filibuster protection) + TooManyProposalsPerSigner, + /// Insufficient balance for deposit + InsufficientBalance, + /// Proposal has active deposit + ProposalHasDeposit, + /// Proposal has not expired yet + ProposalNotExpired, + /// Proposal is not active (already executed or cancelled) + ProposalNotActive, + /// Cannot dissolve multisig with existing proposals (clear them first) + ProposalsExist, + /// Multisig account must have zero balance before dissolution + MultisigAccountNotZero, + } + + #[pallet::call] + impl Pallet { + /// Create a new multisig account + /// + /// Parameters: + /// - `signers`: List of accounts that can sign for this multisig + /// - `threshold`: Number of approvals required to execute transactions + /// + /// The multisig address is derived from a hash of all signers + global nonce. + /// The creator must pay a non-refundable fee (burned). + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::create_multisig())] + pub fn create_multisig( + origin: OriginFor, + signers: Vec, + threshold: u32, + ) -> DispatchResult { + let creator = ensure_signed(origin)?; + + // Validate inputs + ensure!(threshold > 0, Error::::ThresholdZero); + ensure!(!signers.is_empty(), Error::::NotEnoughSigners); + ensure!(threshold <= signers.len() as u32, Error::::ThresholdTooHigh); + ensure!(signers.len() <= T::MaxSigners::get() as usize, Error::::TooManySigners); + + // Sort signers for deterministic address generation + // (order shouldn't matter - nonce provides uniqueness) + let mut sorted_signers = signers.clone(); + sorted_signers.sort(); + + // Check for duplicate signers + for i in 1..sorted_signers.len() { + ensure!(sorted_signers[i] != sorted_signers[i - 1], Error::::DuplicateSigner); + } + + // Get and increment global nonce + let nonce = GlobalNonce::::get(); + GlobalNonce::::put(nonce.saturating_add(1)); + + // Generate multisig address from hash of (sorted_signers, nonce) + let multisig_address = Self::derive_multisig_address(&sorted_signers, nonce); + + // Ensure multisig doesn't already exist + ensure!( + !Multisigs::::contains_key(&multisig_address), + Error::::MultisigAlreadyExists + ); + + // Charge non-refundable fee (burned) + let fee = T::MultisigFee::get(); + let _ = T::Currency::withdraw( + &creator, + fee, + frame_support::traits::WithdrawReasons::FEE, + frame_support::traits::ExistenceRequirement::KeepAlive, + ) + .map_err(|_| Error::::InsufficientBalance)?; + + // Reserve deposit from creator (will be returned on dissolve) + let deposit = T::MultisigDeposit::get(); + T::Currency::reserve(&creator, deposit).map_err(|_| Error::::InsufficientBalance)?; + + // Convert sorted signers to bounded vec + let bounded_signers: BoundedSignersOf = + sorted_signers.try_into().map_err(|_| Error::::TooManySigners)?; + + // Get current block for last_activity + let current_block = frame_system::Pallet::::block_number(); + + // Store multisig data + Multisigs::::insert( + &multisig_address, + MultisigDataOf:: { + signers: bounded_signers.clone(), + threshold, + nonce, + proposal_nonce: 0, + creator: creator.clone(), + deposit, + last_activity: current_block, + active_proposals: 0, + proposals_per_signer: Default::default(), + }, + ); + + // Emit event with sorted signers + Self::deposit_event(Event::MultisigCreated { + creator, + multisig_address, + signers: bounded_signers.to_vec(), + threshold, + nonce, + }); + + Ok(()) + } + + /// Propose a transaction to be executed by the multisig + /// + /// Parameters: + /// - `multisig_address`: The multisig account that will execute the call + /// - `call`: The encoded call to execute + /// - `expiry`: Block number when this proposal expires + /// + /// The proposer must be a signer and must pay: + /// - A deposit (refundable - returned immediately on execution/cancellation) + /// - A fee (non-refundable, burned immediately) + /// + /// **Auto-cleanup:** Before creating a new proposal, ALL expired proposals are + /// automatically removed and deposits returned to original proposers. This is the primary + /// cleanup mechanism. + /// + /// **For threshold=1:** If the multisig threshold is 1, the proposal executes immediately. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::propose( + call.len() as u32, + T::MaxTotalProposalsInStorage::get() + ))] + pub fn propose( + origin: OriginFor, + multisig_address: T::AccountId, + call: Vec, + expiry: BlockNumberFor, + ) -> DispatchResult { + let proposer = ensure_signed(origin)?; + + // Check if proposer is a signer + let multisig_data = + Multisigs::::get(&multisig_address).ok_or(Error::::MultisigNotFound)?; + ensure!(multisig_data.signers.contains(&proposer), Error::::NotASigner); + + // Auto-cleanup expired proposals before creating new one + // This is the primary cleanup mechanism for active multisigs + Self::auto_cleanup_expired_proposals(&multisig_address, &proposer); + + // Reload multisig data after potential cleanup + let multisig_data = + Multisigs::::get(&multisig_address).ok_or(Error::::MultisigNotFound)?; + let current_block = frame_system::Pallet::::block_number(); + + // Get signers count (used for multiple checks below) + let signers_count = multisig_data.signers.len() as u32; + + // Check total proposals in storage limit (Active + Executed + Cancelled) + // This incentivizes cleanup and prevents unbounded storage growth + let total_proposals_in_storage = + Proposals::::iter_prefix(&multisig_address).count() as u32; + ensure!( + total_proposals_in_storage < T::MaxTotalProposalsInStorage::get(), + Error::::TooManyProposalsInStorage + ); + + // Check per-signer proposal limit (filibuster protection) + // Each signer can have at most (MaxTotal / NumSigners) proposals in storage + // This prevents a single signer from monopolizing the proposal queue + // Use saturating_div to handle edge cases (division by 0, etc.) and ensure at least 1 + let max_per_signer = T::MaxTotalProposalsInStorage::get() + .checked_div(signers_count) + .unwrap_or(1) // If division fails (shouldn't happen), allow at least 1 + .max(1); // Ensure minimum of 1 proposal per signer + let proposer_count = + multisig_data.proposals_per_signer.get(&proposer).copied().unwrap_or(0); + ensure!(proposer_count < max_per_signer, Error::::TooManyProposalsPerSigner); + + // Check call size + ensure!(call.len() as u32 <= T::MaxCallSize::get(), Error::::CallTooLarge); + + // Validate expiry is in the future + ensure!(expiry > current_block, Error::::ExpiryInPast); + + // Validate expiry is not too far in the future + let max_expiry = current_block.saturating_add(T::MaxExpiryDuration::get()); + ensure!(expiry <= max_expiry, Error::::ExpiryTooFar); + + // Calculate dynamic fee based on number of signers + // Fee = Base + (Base * SignerCount * StepFactor) + let base_fee = T::ProposalFee::get(); + let step_factor = T::SignerStepFactor::get(); + + // Calculate extra fee: (Base * Factor) * Count + // mul_floor returns the part of the fee corresponding to the percentage + let fee_increase_per_signer = step_factor.mul_floor(base_fee); + let total_increase = fee_increase_per_signer.saturating_mul(signers_count.into()); + let fee = base_fee.saturating_add(total_increase); + + // Charge non-refundable fee (burned) + let _ = T::Currency::withdraw( + &proposer, + fee, + frame_support::traits::WithdrawReasons::FEE, + frame_support::traits::ExistenceRequirement::KeepAlive, + ) + .map_err(|_| Error::::InsufficientBalance)?; + + // Reserve deposit from proposer (will be returned) + let deposit = T::ProposalDeposit::get(); + T::Currency::reserve(&proposer, deposit) + .map_err(|_| Error::::InsufficientBalance)?; + + // Update multisig last_activity + Multisigs::::mutate(&multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + multisig.last_activity = current_block; + } + }); + + // Convert to bounded vec + let bounded_call: BoundedCallOf = + call.try_into().map_err(|_| Error::::CallTooLarge)?; + + // Get and increment proposal nonce for unique ID + let proposal_id = Multisigs::::mutate(&multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + let nonce = multisig.proposal_nonce; + multisig.proposal_nonce = multisig.proposal_nonce.saturating_add(1); + nonce + } else { + 0 // Should never happen due to earlier check + } + }); + + // Create proposal with proposer as first approval + let mut approvals = BoundedApprovalsOf::::default(); + let _ = approvals.try_push(proposer.clone()); + + let proposal = ProposalData { + proposer: proposer.clone(), + call: bounded_call, + expiry, + approvals, + deposit, + status: ProposalStatus::Active, + }; + + // Store proposal with nonce as key (simple and efficient) + Proposals::::insert(&multisig_address, proposal_id, proposal); + + // Increment active proposals counter and per-signer counter + Multisigs::::mutate(&multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + multisig.active_proposals = multisig.active_proposals.saturating_add(1); + + // Update per-signer counter for filibuster protection + let current_count = + multisig.proposals_per_signer.get(&proposer).copied().unwrap_or(0); + let _ = multisig + .proposals_per_signer + .try_insert(proposer.clone(), current_count.saturating_add(1)); + } + }); + + // Emit event + Self::deposit_event(Event::ProposalCreated { + multisig_address: multisig_address.clone(), + proposer, + proposal_id, + }); + + // Check if threshold is reached immediately (threshold=1 case) + // Proposer is already counted as first approval + if 1 >= multisig_data.threshold { + // Threshold reached - execute immediately + // Need to get proposal again since we inserted it + let proposal = Proposals::::get(&multisig_address, proposal_id) + .ok_or(Error::::ProposalNotFound)?; + Self::do_execute(multisig_address, proposal_id, proposal)?; + } + + Ok(()) + } + + /// Approve a proposed transaction + /// + /// If this approval brings the total approvals to or above the threshold, + /// the transaction will be automatically executed. + /// + /// **Auto-cleanup:** Before processing the approval, ALL expired proposals are + /// automatically removed and deposits returned to original proposers. + /// + /// Parameters: + /// - `multisig_address`: The multisig account + /// - `proposal_id`: ID (nonce) of the proposal to approve + /// + /// Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual + #[pallet::call_index(2)] + #[pallet::weight(::WeightInfo::approve( + T::MaxCallSize::get(), + T::MaxTotalProposalsInStorage::get() + ))] + #[allow(clippy::useless_conversion)] + pub fn approve( + origin: OriginFor, + multisig_address: T::AccountId, + proposal_id: u32, + ) -> DispatchResultWithPostInfo { + let approver = ensure_signed(origin)?; + + // Check if approver is a signer + let multisig_data = Self::ensure_is_signer(&multisig_address, &approver)?; + + // Auto-cleanup expired proposals on any multisig activity + // Returns count of proposals in storage (which determines iteration cost) + let iterated_count = Self::auto_cleanup_expired_proposals(&multisig_address, &approver); + + // Get proposal + let mut proposal = Proposals::::get(&multisig_address, proposal_id) + .ok_or(Error::::ProposalNotFound)?; + + // Calculate actual weight based on real call size and actual storage size + // We charge for worst-case (e=Max), but refund based on actual storage size + let actual_call_size = proposal.call.len() as u32; + let actual_weight = + ::WeightInfo::approve(actual_call_size, iterated_count); + + // Check if not expired + let current_block = frame_system::Pallet::::block_number(); + ensure!(current_block <= proposal.expiry, Error::::ProposalExpired); + + // Check if already approved + ensure!(!proposal.approvals.contains(&approver), Error::::AlreadyApproved); + + // Add approval + proposal + .approvals + .try_push(approver.clone()) + .map_err(|_| Error::::TooManySigners)?; + + let approvals_count = proposal.approvals.len() as u32; + + // Emit approval event + Self::deposit_event(Event::ProposalApproved { + multisig_address: multisig_address.clone(), + approver, + proposal_id, + approvals_count, + }); + + // Check if threshold is reached - if so, execute immediately + if approvals_count >= multisig_data.threshold { + // Execute the transaction + Self::do_execute(multisig_address, proposal_id, proposal)?; + } else { + // Not ready yet, just save the proposal + Proposals::::insert(&multisig_address, proposal_id, proposal); + + // Update multisig last_activity + Multisigs::::mutate(&multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + multisig.last_activity = frame_system::Pallet::::block_number(); + } + }); + } + + // Return actual weight (refund overpayment) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) + } + + /// Cancel a proposed transaction (only by proposer) + /// + /// **Auto-cleanup:** Before processing the cancellation, ALL expired proposals are + /// automatically removed and deposits returned to original proposers. + /// + /// Parameters: + /// - `multisig_address`: The multisig account + /// - `proposal_id`: ID (nonce) of the proposal to cancel + /// + /// Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual + #[pallet::call_index(3)] + #[pallet::weight(::WeightInfo::cancel( + T::MaxCallSize::get(), + T::MaxTotalProposalsInStorage::get() + ))] + #[allow(clippy::useless_conversion)] + pub fn cancel( + origin: OriginFor, + multisig_address: T::AccountId, + proposal_id: u32, + ) -> DispatchResultWithPostInfo { + let canceller = ensure_signed(origin)?; + + // Auto-cleanup expired proposals on any multisig activity + // Returns count of proposals in storage (which determines iteration cost) + let iterated_count = + Self::auto_cleanup_expired_proposals(&multisig_address, &canceller); + + // Get proposal + let proposal = Proposals::::get(&multisig_address, proposal_id) + .ok_or(Error::::ProposalNotFound)?; + + // Calculate actual weight based on real call size and actual storage size + // We charge for worst-case (e=Max), but refund based on actual storage size + let actual_call_size = proposal.call.len() as u32; + let actual_weight = ::WeightInfo::cancel(actual_call_size, iterated_count); + + // Check if caller is the proposer + ensure!(canceller == proposal.proposer, Error::::NotProposer); + + // Check if proposal is still active + ensure!(proposal.status == ProposalStatus::Active, Error::::ProposalNotActive); + + // Remove proposal from storage and return deposit immediately + Self::remove_proposal_and_return_deposit( + &multisig_address, + proposal_id, + &proposal.proposer, + proposal.deposit, + ); + + // Emit event + Self::deposit_event(Event::ProposalCancelled { + multisig_address, + proposer: canceller, + proposal_id, + }); + + // Return actual weight (refund overpayment) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) + } + + /// Remove expired proposals and return deposits to proposers + /// + /// Can only be called by signers of the multisig. + /// Only removes Active proposals that have expired (past expiry block). + /// Executed and Cancelled proposals are automatically cleaned up immediately. + /// + /// The deposit is always returned to the original proposer, not the caller. + /// This allows any signer to help clean up storage even if proposer is inactive. + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::remove_expired())] + pub fn remove_expired( + origin: OriginFor, + multisig_address: T::AccountId, + proposal_id: u32, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + + // Verify caller is a signer + let _multisig_data = Self::ensure_is_signer(&multisig_address, &caller)?; + + // Get proposal + let proposal = Proposals::::get(&multisig_address, proposal_id) + .ok_or(Error::::ProposalNotFound)?; + + // Only Active proposals can be manually removed (Executed/Cancelled already + // auto-removed) + ensure!(proposal.status == ProposalStatus::Active, Error::::ProposalNotActive); + + // Check if expired + let current_block = frame_system::Pallet::::block_number(); + ensure!(current_block > proposal.expiry, Error::::ProposalNotExpired); + + // Remove proposal from storage and return deposit + Self::remove_proposal_and_return_deposit( + &multisig_address, + proposal_id, + &proposal.proposer, + proposal.deposit, + ); + + // Emit event + Self::deposit_event(Event::ProposalRemoved { + multisig_address, + proposal_id, + proposer: proposal.proposer.clone(), + removed_by: caller, + }); + + Ok(()) + } + + /// Claim all deposits from expired proposals + /// + /// This is a batch operation that removes all expired proposals where: + /// - Caller is the proposer + /// - Proposal is Active and past expiry block + /// + /// Note: Executed and Cancelled proposals are automatically cleaned up immediately, + /// so only Active+Expired proposals need manual cleanup. + /// + /// Returns all proposal deposits to the proposer in a single transaction. + #[pallet::call_index(5)] + #[pallet::weight(::WeightInfo::claim_deposits( + T::MaxTotalProposalsInStorage::get() + ))] + pub fn claim_deposits( + origin: OriginFor, + multisig_address: T::AccountId, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + + let current_block = frame_system::Pallet::::block_number(); + + let mut total_returned = BalanceOf::::zero(); + let mut removed_count = 0u32; + + // Iterate through all proposals for this multisig + // Only Active+Expired proposals exist (Executed/Cancelled are auto-removed) + let proposals_to_remove: Vec<(u32, ProposalDataOf)> = + Proposals::::iter_prefix(&multisig_address) + .filter(|(_, proposal)| { + // Only proposals where caller is proposer + if proposal.proposer != caller { + return false; + } + + // Only Active proposals can exist (Executed/Cancelled auto-removed) + // Must be expired to remove + proposal.status == ProposalStatus::Active && current_block > proposal.expiry + }) + .collect(); + + // Remove proposals and return deposits + for (id, proposal) in proposals_to_remove { + total_returned = total_returned.saturating_add(proposal.deposit); + removed_count = removed_count.saturating_add(1); + + // Remove from storage and return deposit + Self::remove_proposal_and_return_deposit( + &multisig_address, + id, + &proposal.proposer, + proposal.deposit, + ); + + // Emit event for each removed proposal + Self::deposit_event(Event::ProposalRemoved { + multisig_address: multisig_address.clone(), + proposal_id: id, + proposer: caller.clone(), + removed_by: caller.clone(), + }); + } + + // Emit summary event + Self::deposit_event(Event::DepositsClaimed { + multisig_address: multisig_address.clone(), + claimer: caller, + total_returned, + proposals_removed: removed_count, + multisig_removed: false, // Multisig is never auto-removed now + }); + + Ok(()) + } + + /// Dissolve (remove) a multisig and recover the creation deposit. + /// + /// Requirements: + /// - No proposals exist (active, executed, or cancelled) - must be fully cleaned up. + /// - Multisig account balance must be zero. + /// - Can be called by the creator OR any signer. + /// + /// The deposit is ALWAYS returned to the original `creator` stored in `MultisigData`. + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::dissolve_multisig())] + pub fn dissolve_multisig( + origin: OriginFor, + multisig_address: T::AccountId, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + + // 1. Get multisig data + let multisig_data = + Multisigs::::get(&multisig_address).ok_or(Error::::MultisigNotFound)?; + + // 2. Check permissions: Creator OR Any Signer + let is_signer = multisig_data.signers.contains(&caller); + let is_creator = multisig_data.creator == caller; + ensure!(is_signer || is_creator, Error::::NotASigner); + + // 3. Check if account is clean (no proposals at all) + // iter_prefix is efficient enough here as we just need to check if ANY exist + if Proposals::::iter_prefix(&multisig_address).next().is_some() { + return Err(Error::::ProposalsExist.into()); + } + + // 4. Check if account balance is zero + let balance = T::Currency::total_balance(&multisig_address); + ensure!(balance.is_zero(), Error::::MultisigAccountNotZero); + + // 5. Return deposit to creator + T::Currency::unreserve(&multisig_data.creator, multisig_data.deposit); + + // 6. Remove multisig from storage + Multisigs::::remove(&multisig_address); + + // 7. Emit event + Self::deposit_event(Event::MultisigDissolved { + multisig_address, + caller, + deposit_returned: multisig_data.deposit, + }); + + Ok(()) + } + } + + impl Pallet { + /// Derive a multisig address from signers and nonce + pub fn derive_multisig_address(signers: &[T::AccountId], nonce: u64) -> T::AccountId { + // Create a unique identifier from pallet id + signers + nonce. + // + // IMPORTANT: + // - Do NOT `Decode` directly from a finite byte-slice and then "fallback" to a constant + // address on error: that can cause address collisions / DoS. + // - Using `TrailingZeroInput` makes decoding deterministic and infallible by providing + // an infinite stream (hash bytes padded with zeros). + let pallet_id = T::PalletId::get(); + let mut data = Vec::new(); + data.extend_from_slice(&pallet_id.0); + data.extend_from_slice(&signers.encode()); + data.extend_from_slice(&nonce.encode()); + + // Hash the data and map it deterministically into an AccountId. + let hash = T::Hashing::hash(&data); + T::AccountId::decode(&mut TrailingZeroInput::new(hash.as_ref())) + .expect("TrailingZeroInput provides sufficient bytes; qed") + } + + /// Check if an account is a signer for a given multisig + pub fn is_signer(multisig_address: &T::AccountId, account: &T::AccountId) -> bool { + if let Some(multisig_data) = Multisigs::::get(multisig_address) { + multisig_data.signers.contains(account) + } else { + false + } + } + + /// Ensure account is a signer, otherwise return error + /// Returns multisig data if successful + fn ensure_is_signer( + multisig_address: &T::AccountId, + account: &T::AccountId, + ) -> Result, DispatchError> { + let multisig_data = + Multisigs::::get(multisig_address).ok_or(Error::::MultisigNotFound)?; + ensure!(multisig_data.signers.contains(account), Error::::NotASigner); + Ok(multisig_data) + } + + /// Auto-cleanup expired proposals at the start of any multisig activity + /// This is the primary cleanup mechanism for active multisigs + /// Returns deposits to original proposers and emits cleanup events + fn auto_cleanup_expired_proposals( + multisig_address: &T::AccountId, + caller: &T::AccountId, + ) -> u32 { + let current_block = frame_system::Pallet::::block_number(); + let mut iterated_count = 0u32; + let mut expired_proposals: Vec<(u32, T::AccountId, BalanceOf)> = Vec::new(); + + // Iterate through all proposals to count them AND identify expired ones + for (id, proposal) in Proposals::::iter_prefix(multisig_address) { + iterated_count += 1; + if proposal.status == ProposalStatus::Active && current_block > proposal.expiry { + expired_proposals.push((id, proposal.proposer, proposal.deposit)); + } + } + + // Remove expired proposals and return deposits + for (id, expired_proposer, deposit) in expired_proposals.iter() { + Self::remove_proposal_and_return_deposit( + multisig_address, + *id, + expired_proposer, + *deposit, + ); + + // Emit event for each removed proposal + Self::deposit_event(Event::ProposalRemoved { + multisig_address: multisig_address.clone(), + proposal_id: *id, + proposer: expired_proposer.clone(), + removed_by: caller.clone(), + }); + } + + // Return total number of proposals iterated (not cleaned) + // This reflects the actual storage read cost + iterated_count + } + + /// Decrement proposal counters (active_proposals and per-signer counter) + /// Used when removing proposals from storage + fn decrement_proposal_counters(multisig_address: &T::AccountId, proposer: &T::AccountId) { + Multisigs::::mutate(multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + multisig.active_proposals = multisig.active_proposals.saturating_sub(1); + + // Decrement per-signer counter + if let Some(count) = multisig.proposals_per_signer.get_mut(proposer) { + *count = count.saturating_sub(1); + if *count == 0 { + multisig.proposals_per_signer.remove(proposer); + } + } + } + }); + } + + /// Remove a proposal from storage and return deposit to proposer + /// Used for cleanup operations + fn remove_proposal_and_return_deposit( + multisig_address: &T::AccountId, + proposal_id: u32, + proposer: &T::AccountId, + deposit: BalanceOf, + ) { + // Remove from storage + Proposals::::remove(multisig_address, proposal_id); + + // Return deposit to proposer + T::Currency::unreserve(proposer, deposit); + + // Decrement counters + Self::decrement_proposal_counters(multisig_address, proposer); + } + + /// Internal function to execute a proposal + /// Called automatically from `approve()` when threshold is reached + /// + /// Removes the proposal immediately and returns deposit. + /// + /// This function is private and cannot be called from outside the pallet + /// + /// SECURITY: Uses Checks-Effects-Interactions pattern to prevent reentrancy attacks. + /// Storage is updated BEFORE dispatching the call. + fn do_execute( + multisig_address: T::AccountId, + proposal_id: u32, + proposal: ProposalDataOf, + ) -> DispatchResult { + // CHECKS: Decode the call (validation) + let call = ::RuntimeCall::decode(&mut &proposal.call[..]) + .map_err(|_| Error::::InvalidCall)?; + + // EFFECTS: Remove proposal from storage and return deposit BEFORE external interaction + // (reentrancy protection) + Self::remove_proposal_and_return_deposit( + &multisig_address, + proposal_id, + &proposal.proposer, + proposal.deposit, + ); + + // EFFECTS: Update multisig last_activity BEFORE external interaction + Multisigs::::mutate(&multisig_address, |maybe_multisig| { + if let Some(multisig) = maybe_multisig { + multisig.last_activity = frame_system::Pallet::::block_number(); + } + }); + + // INTERACTIONS: NOW execute the call as the multisig account + // Proposal already removed, so reentrancy cannot affect storage + let result = + call.dispatch(frame_system::RawOrigin::Signed(multisig_address.clone()).into()); + + // Emit event with all execution details for SubSquid indexing + Self::deposit_event(Event::ProposalExecuted { + multisig_address, + proposal_id, + proposer: proposal.proposer, + call: proposal.call.to_vec(), + approvers: proposal.approvals.to_vec(), + result: result.map(|_| ()).map_err(|e| e.error), + }); + + Ok(()) + } + } +} diff --git a/pallets/multisig/src/mock.rs b/pallets/multisig/src/mock.rs new file mode 100644 index 00000000..38f241d6 --- /dev/null +++ b/pallets/multisig/src/mock.rs @@ -0,0 +1,143 @@ +//! Mock runtime for testing pallet-multisig + +use crate as pallet_multisig; +use frame_support::{ + parameter_types, + traits::{ConstU32, Everything}, + PalletId, +}; +use sp_core::{crypto::AccountId32, H256}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, Permill, +}; + +type Block = frame_system::mocking::MockBlock; +type Balance = u128; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Balances: pallet_balances, + Multisig: pallet_multisig, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Test { + type RuntimeEvent = RuntimeEvent; + type BaseCallFilter = Everything; + type Block = Block; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; + type RuntimeTask = (); + type SingleBlockMigrations = (); + type MultiBlockMigrator = (); + type PreInherents = (); + type PostInherents = (); + type PostTransactions = (); + type ExtensionsWeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; + pub const MaxFreezes: u32 = 50; + pub const MintingAccount: AccountId32 = AccountId32::new([99u8; 32]); +} + +impl pallet_balances::Config for Test { + type WeightInfo = (); + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxFreezes = MaxFreezes; + type DoneSlashHandler = (); + type MintingAccount = MintingAccount; +} + +parameter_types! { + pub const MultisigPalletId: PalletId = PalletId(*b"py/mltsg"); + pub const MaxSignersParam: u32 = 10; + pub const MaxTotalProposalsInStorageParam: u32 = 20; + pub const MaxCallSizeParam: u32 = 1024; + pub const MultisigFeeParam: Balance = 1000; // Non-refundable fee + pub const MultisigDepositParam: Balance = 500; // Refundable deposit + pub const ProposalDepositParam: Balance = 100; + pub const ProposalFeeParam: Balance = 1000; // Non-refundable fee + pub const SignerStepFactorParam: Permill = Permill::from_parts(10_000); // 1% + pub const MaxExpiryDurationParam: u64 = 10000; // 10000 blocks for testing (enough for all test scenarios) +} + +impl pallet_multisig::Config for Test { + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type MaxSigners = MaxSignersParam; + type MaxTotalProposalsInStorage = MaxTotalProposalsInStorageParam; + type MaxCallSize = MaxCallSizeParam; + type MultisigFee = MultisigFeeParam; + type MultisigDeposit = MultisigDepositParam; + type ProposalDeposit = ProposalDepositParam; + type ProposalFee = ProposalFeeParam; + type SignerStepFactor = SignerStepFactorParam; + type MaxExpiryDuration = MaxExpiryDurationParam; + type PalletId = MultisigPalletId; + type WeightInfo = (); +} + +// Helper to create AccountId32 from u64 +pub fn account_id(id: u64) -> AccountId32 { + let mut data = [0u8; 32]; + data[0..8].copy_from_slice(&id.to_le_bytes()); + AccountId32::new(data) +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (account_id(1), 100000), // Alice + (account_id(2), 200000), // Bob + (account_id(3), 300000), // Charlie + (account_id(4), 400000), // Dave + (account_id(5), 500000), // Eve + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} diff --git a/pallets/multisig/src/tests.rs b/pallets/multisig/src/tests.rs new file mode 100644 index 00000000..277672ac --- /dev/null +++ b/pallets/multisig/src/tests.rs @@ -0,0 +1,1325 @@ +//! Unit tests for pallet-multisig + +use crate::{mock::*, Error, Event, GlobalNonce, Multisigs, ProposalStatus, Proposals}; +use codec::Encode; +use frame_support::{assert_noop, assert_ok, traits::fungible::Mutate}; +use sp_core::crypto::AccountId32; + +/// Helper function to get Alice's account ID +fn alice() -> AccountId32 { + account_id(1) +} + +/// Helper function to get Bob's account ID +fn bob() -> AccountId32 { + account_id(2) +} + +/// Helper function to get Charlie's account ID +fn charlie() -> AccountId32 { + account_id(3) +} + +/// Helper function to get Dave's account ID +fn dave() -> AccountId32 { + account_id(4) +} + +/// Helper function to create a simple encoded call +fn make_call(remark: Vec) -> Vec { + let call = RuntimeCall::System(frame_system::Call::remark { remark }); + call.encode() +} + +/// Helper function to get the ID of the last proposal created +/// Returns the current proposal_nonce - 1 (last used ID) +fn get_last_proposal_id(multisig_address: &AccountId32) -> u32 { + let multisig = Multisigs::::get(multisig_address).expect("Multisig should exist"); + multisig.proposal_nonce.saturating_sub(1) +} + +// ==================== MULTISIG CREATION TESTS ==================== + +#[test] +fn create_multisig_works() { + new_test_ext().execute_with(|| { + // Initialize block number for events + System::set_block_number(1); + + // Setup + let creator = alice(); + let signers = vec![bob(), charlie(), dave()]; + let threshold = 2; + + // Get initial balance + let initial_balance = Balances::free_balance(creator.clone()); + let fee = 1000; // MultisigFeeParam + let deposit = 500; // MultisigDepositParam + + // Create multisig + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + threshold, + )); + + // Check balances + // Deposit is reserved, fee is burned + assert_eq!(Balances::reserved_balance(creator.clone()), deposit); + assert_eq!(Balances::free_balance(creator.clone()), initial_balance - fee - deposit); + + // Check that multisig was created + let global_nonce = GlobalNonce::::get(); + assert_eq!(global_nonce, 1); + + // Get multisig address + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Check storage + let multisig_data = Multisigs::::get(&multisig_address).unwrap(); + assert_eq!(multisig_data.threshold, threshold); + assert_eq!(multisig_data.nonce, 0); + assert_eq!(multisig_data.signers.to_vec(), signers); + assert_eq!(multisig_data.active_proposals, 0); + assert_eq!(multisig_data.creator, creator.clone()); + assert_eq!(multisig_data.deposit, deposit); + + // Check that event was emitted + System::assert_last_event( + Event::MultisigCreated { creator, multisig_address, signers, threshold, nonce: 0 } + .into(), + ); + }); +} + +#[test] +fn create_multisig_fails_with_threshold_zero() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers = vec![bob(), charlie()]; + let threshold = 0; + + assert_noop!( + Multisig::create_multisig(RuntimeOrigin::signed(creator.clone()), signers, threshold,), + Error::::ThresholdZero + ); + }); +} + +#[test] +fn create_multisig_fails_with_empty_signers() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers = vec![]; + let threshold = 1; + + assert_noop!( + Multisig::create_multisig(RuntimeOrigin::signed(creator.clone()), signers, threshold,), + Error::::NotEnoughSigners + ); + }); +} + +#[test] +fn create_multisig_fails_with_threshold_too_high() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers = vec![bob(), charlie()]; + let threshold = 3; // More than number of signers + + assert_noop!( + Multisig::create_multisig(RuntimeOrigin::signed(creator.clone()), signers, threshold,), + Error::::ThresholdTooHigh + ); + }); +} + +#[test] +fn create_multisig_fails_with_duplicate_signers() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers = vec![bob(), bob(), charlie()]; // Bob twice + let threshold = 2; + + assert_noop!( + Multisig::create_multisig(RuntimeOrigin::signed(creator.clone()), signers, threshold,), + Error::::DuplicateSigner + ); + }); +} + +#[test] +fn create_multiple_multisigs_increments_nonce() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers1 = vec![bob(), charlie()]; + let signers2 = vec![bob(), dave()]; + + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers1.clone(), + 2 + )); + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers2.clone(), + 2 + )); + + // Check both multisigs exist + let multisig1 = Multisig::derive_multisig_address(&signers1, 0); + let multisig2 = Multisig::derive_multisig_address(&signers2, 1); + + assert!(Multisigs::::contains_key(multisig1)); + assert!(Multisigs::::contains_key(multisig2)); + }); +} + +// ==================== PROPOSAL CREATION TESTS ==================== + +#[test] +fn propose_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Propose a transaction + let proposer = bob(); + let call = make_call(vec![1, 2, 3]); + let expiry = 1000; + + let initial_balance = Balances::free_balance(proposer.clone()); + let proposal_deposit = 100; // ProposalDepositParam (Changed in mock) + // Fee calculation: Base(1000) + (Base(1000) * 1% * 2 signers) = 1000 + 20 = 1020 + let proposal_fee = 1020; + + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(proposer.clone()), + multisig_address.clone(), + call.clone(), + expiry + )); + + // Check balances - deposit reserved, fee sent to treasury + assert_eq!(Balances::reserved_balance(proposer.clone()), proposal_deposit); + assert_eq!( + Balances::free_balance(proposer.clone()), + initial_balance - proposal_deposit - proposal_fee + ); + // Fee is burned (reduces total issuance) + + // Check event + let proposal_id = get_last_proposal_id(&multisig_address); + System::assert_last_event( + Event::ProposalCreated { multisig_address, proposer, proposal_id }.into(), + ); + }); +} + +#[test] +fn propose_fails_if_not_signer() { + new_test_ext().execute_with(|| { + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Try to propose as non-signer + let call = make_call(vec![1, 2, 3]); + assert_noop!( + Multisig::propose(RuntimeOrigin::signed(dave()), multisig_address.clone(), call, 1000), + Error::::NotASigner + ); + }); +} + +// ==================== APPROVAL TESTS ==================== + +#[test] +fn approve_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie(), dave()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 3 + )); // Need 3 approvals + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Charlie approves (now 2/3) + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + + // Check event + System::assert_last_event( + Event::ProposalApproved { + multisig_address: multisig_address.clone(), + approver: charlie(), + proposal_id, + approvals_count: 2, + } + .into(), + ); + + // Proposal should still exist (not executed yet) + assert!(crate::Proposals::::contains_key(&multisig_address, proposal_id)); + }); +} + +#[test] +fn approve_auto_executes_when_threshold_reached() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Charlie approves - threshold reached (2/2), auto-executes and removes + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + + // Check that proposal was executed and immediately removed from storage + assert!(crate::Proposals::::get(&multisig_address, proposal_id).is_none()); + + // Deposit should be returned immediately + assert_eq!(Balances::reserved_balance(bob()), 0); // No longer reserved + + // Check event was emitted + System::assert_has_event( + Event::ProposalExecuted { + multisig_address, + proposal_id, + proposer: bob(), + call: call.clone(), + approvers: vec![bob(), charlie()], + result: Ok(()), + } + .into(), + ); + }); +} + +// ==================== CANCELLATION TESTS ==================== + +#[test] +fn cancel_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let proposer = bob(); + let call = make_call(vec![1, 2, 3]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(proposer.clone()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Cancel the proposal - immediately removes and returns deposit + assert_ok!(Multisig::cancel( + RuntimeOrigin::signed(proposer.clone()), + multisig_address.clone(), + proposal_id + )); + + // Proposal should be immediately removed from storage + assert!(crate::Proposals::::get(&multisig_address, proposal_id).is_none()); + + // Deposit should be returned immediately + assert_eq!(Balances::reserved_balance(proposer.clone()), 0); + + // Check event + System::assert_last_event( + Event::ProposalCancelled { multisig_address, proposer, proposal_id }.into(), + ); + }); +} + +#[test] +fn cancel_fails_if_already_executed() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Approve to execute (auto-executes and removes proposal) + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + + // Try to cancel executed proposal (already removed, so ProposalNotFound) + assert_noop!( + Multisig::cancel(RuntimeOrigin::signed(bob()), multisig_address.clone(), proposal_id), + Error::::ProposalNotFound + ); + }); +} + +// ==================== DEPOSIT RECOVERY TESTS ==================== + +#[test] +fn remove_expired_works_after_grace_period() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + let expiry = 100; + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + expiry + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Move past expiry + grace period (100 blocks) + System::set_block_number(expiry + 101); + + // Any signer can remove after grace period (charlie is a signer) + assert_ok!(Multisig::remove_expired( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + + // Proposal should be gone + assert!(!crate::Proposals::::contains_key(&multisig_address, proposal_id)); + + // Deposit should be returned to proposer + assert_eq!(Balances::reserved_balance(bob()), 0); + }); +} + +#[test] +fn executed_proposals_auto_removed() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Execute - should auto-remove proposal and return deposit + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + + // Proposal should be immediately removed + assert!(crate::Proposals::::get(&multisig_address, proposal_id).is_none()); + + // Deposit should be immediately returned + assert_eq!(Balances::reserved_balance(bob()), 0); + + // Trying to remove again should fail (already removed) + assert_noop!( + Multisig::remove_expired( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + ), + Error::::ProposalNotFound + ); + }); +} + +#[test] +fn remove_expired_fails_for_non_signer() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + let expiry = 1000; + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + expiry + )); + + let proposal_id = get_last_proposal_id(&multisig_address); + + // Move past expiry + System::set_block_number(expiry + 1); + + // Dave is not a signer, should fail + assert_noop!( + Multisig::remove_expired( + RuntimeOrigin::signed(dave()), + multisig_address.clone(), + proposal_id + ), + Error::::NotASigner + ); + + // But charlie (who is a signer) can do it + assert_ok!(Multisig::remove_expired( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + proposal_id + )); + }); +} + +#[test] +fn claim_deposits_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Bob creates 3 proposals + for i in 0..3 { + let call = make_call(vec![i as u8; 32]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call, + 100 + )); + } + + // All reserved + assert_eq!(Balances::reserved_balance(bob()), 300); // 3 * 100 + + // Move past expiry + grace period + System::set_block_number(201); + + // Bob claims all deposits at once + assert_ok!(Multisig::claim_deposits( + RuntimeOrigin::signed(bob()), + multisig_address.clone() + )); + + // All deposits returned + assert_eq!(Balances::reserved_balance(bob()), 0); + + // Check event + System::assert_has_event( + Event::DepositsClaimed { + multisig_address, + claimer: bob(), + total_returned: 300, + proposals_removed: 3, + multisig_removed: false, + } + .into(), + ); + }); +} + +// ==================== HELPER FUNCTION TESTS ==================== + +#[test] +fn derive_multisig_address_is_deterministic() { + new_test_ext().execute_with(|| { + let signers = vec![bob(), charlie(), dave()]; + let nonce = 42; + + let address1 = Multisig::derive_multisig_address(&signers, nonce); + let address2 = Multisig::derive_multisig_address(&signers, nonce); + + assert_eq!(address1, address2); + }); +} + +#[test] +fn derive_multisig_address_different_for_different_nonce() { + new_test_ext().execute_with(|| { + let signers = vec![bob(), charlie(), dave()]; + + let address1 = Multisig::derive_multisig_address(&signers, 0); + let address2 = Multisig::derive_multisig_address(&signers, 1); + + assert_ne!(address1, address2); + }); +} + +#[test] +fn is_signer_works() { + new_test_ext().execute_with(|| { + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig(RuntimeOrigin::signed(alice()), signers.clone(), 2)); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + assert!(Multisig::is_signer(&multisig_address, &bob())); + assert!(Multisig::is_signer(&multisig_address, &charlie())); + assert!(!Multisig::is_signer(&multisig_address, &dave())); + }); +} + +#[test] +fn too_many_proposals_in_storage_fails() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // MaxTotal = 20, 2 signers = 10 each + // Executed/Cancelled proposals are auto-removed, so only Active count toward storage + // Create 10 active proposals from Bob + for i in 0..10 { + let call = make_call(vec![i as u8]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + } + // Bob has 10 active = 10 total (at per-signer limit) + + // Create 10 active proposals from Charlie + for i in 10..20 { + let call = make_call(vec![i as u8]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + call.clone(), + 1000 + )); + } + // Charlie has 10 active = 10 total (at per-signer limit) + // Total: 20 active (AT LIMIT) + + // Try to add 21st - should fail on total limit + let call = make_call(vec![99]); + assert_noop!( + Multisig::propose(RuntimeOrigin::signed(bob()), multisig_address.clone(), call, 2000), + Error::::TooManyProposalsInStorage + ); + }); +} + +#[test] +fn only_active_proposals_remain_in_storage() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Test that only Active proposals remain in storage (Executed/Cancelled auto-removed) + + // Bob creates 10, executes 5, cancels 1 - only 4 active remain + for i in 0..10 { + let call = make_call(vec![i as u8]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 1000 + )); + + if i < 5 { + let id = get_last_proposal_id(&multisig_address); + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + id + )); + } else if i == 5 { + let id = get_last_proposal_id(&multisig_address); + assert_ok!(Multisig::cancel( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + id + )); + } + } + // Bob now has 4 Active in storage (i=6,7,8,9), 5 executed + 1 cancelled were removed + + // Bob can create 6 more to reach his per-signer limit (10) + for i in 10..16 { + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![i]), + 2000 + )); + } + // Bob: 10 Active (at per-signer limit) + + // Bob cannot create 11th + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![99]), + 3000 + ), + Error::::TooManyProposalsPerSigner + ); + }); +} + +#[test] +fn auto_cleanup_allows_new_proposals() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Bob creates 10 proposals, all expire at block 100 (at per-signer limit) + for i in 0..10 { + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![i]), + 100 + )); + } + // Bob: 10 Active (at per-signer limit) + + // Bob cannot create more (at limit) + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![99]), + 200 + ), + Error::::TooManyProposalsPerSigner + ); + + // Move past expiry + System::set_block_number(101); + + // Now Bob can create new - propose() auto-cleans expired + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![99]), + 200 + )); + + // Verify old proposals were removed + let count = crate::Proposals::::iter_prefix(&multisig_address).count(); + assert_eq!(count, 1); // Only the new one remains + }); +} + +#[test] +fn propose_fails_with_expiry_in_past() { + new_test_ext().execute_with(|| { + System::set_block_number(100); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + + // Try to create proposal with expiry in the past (< current_block) + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 50 + ), + Error::::ExpiryInPast + ); + + // Try with expiry equal to current block (not > current_block) + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 100 + ), + Error::::ExpiryInPast + ); + + // Valid: expiry in the future + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call, + 101 + )); + }); +} + +#[test] +fn propose_fails_with_expiry_too_far() { + new_test_ext().execute_with(|| { + System::set_block_number(100); + + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let call = make_call(vec![1, 2, 3]); + + // MaxExpiryDurationParam = 10000 blocks (from mock.rs) + // Current block = 100 + // Max allowed expiry = 100 + 10000 = 10100 + + // Try to create proposal with expiry too far in the future + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 10101 + ), + Error::::ExpiryTooFar + ); + + // Try with expiry way beyond the limit + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 20000 + ), + Error::::ExpiryTooFar + ); + + // Valid: expiry exactly at max allowed + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call.clone(), + 10100 + )); + + // Move to next block and try again + System::set_block_number(101); + // Now max allowed = 101 + 10000 = 10101 + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call, + 10101 + )); + }); +} + +#[test] +fn propose_charges_correct_fee_with_signer_factor() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + // 3 Signers: Bob, Charlie, Dave + let signers = vec![bob(), charlie(), dave()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + let proposer = bob(); + let call = make_call(vec![1, 2, 3]); + let initial_balance = Balances::free_balance(proposer.clone()); + + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(proposer.clone()), + multisig_address, + call, + 1000 + )); + + // ProposalFeeParam = 1000 + // SignerStepFactor = 1% + // Signers = 3 + // Calculation: 1000 + (1000 * 1% * 3) = 1000 + 30 = 1030 + let expected_fee = 1030; + let deposit = 100; // ProposalDepositParam + + assert_eq!( + Balances::free_balance(proposer.clone()), + initial_balance - deposit - expected_fee + ); + // Fee is burned (reduces total issuance) + }); +} + +#[test] +fn dissolve_multisig_works() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let creator = alice(); + let signers = vec![bob(), charlie()]; + let deposit = 500; + let fee = 1000; + let initial_balance = Balances::free_balance(creator.clone()); + + // Create + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + assert_eq!(Balances::reserved_balance(creator.clone()), deposit); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Try to dissolve immediately (success) + assert_ok!(Multisig::dissolve_multisig( + RuntimeOrigin::signed(creator.clone()), + multisig_address.clone() + )); + + // Check cleanup + assert!(!Multisigs::::contains_key(&multisig_address)); + assert_eq!(Balances::reserved_balance(creator.clone()), 0); + // Balance returned (minus burned fee) + assert_eq!(Balances::free_balance(creator.clone()), initial_balance - fee); + }); +} + +#[test] +fn dissolve_multisig_fails_with_proposals() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Create proposal + let call = make_call(vec![1]); + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + call, + 100 + )); + + // Try to dissolve + assert_noop!( + Multisig::dissolve_multisig( + RuntimeOrigin::signed(creator.clone()), + multisig_address.clone() + ), + Error::::ProposalsExist + ); + }); +} + +#[test] +fn per_signer_proposal_limit_enforced() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let creator = alice(); + let signers = vec![bob(), charlie()]; + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + 2 + )); + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // MaxTotalProposalsInStorage = 20 + // With 2 signers, each can have max 20/2 = 10 proposals + // Only Active proposals count (Executed/Cancelled auto-removed) + + // Bob creates 10 active proposals (at per-signer limit) + for i in 0..10 { + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![i]), + 1000 + )); + } + + // Bob at limit - tries to create 11th + assert_noop!( + Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![99]), + 2000 + ), + Error::::TooManyProposalsPerSigner + ); + + // But Charlie can still create (independent limit) + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + make_call(vec![100]), + 2000 + )); + }); +} + +#[test] +fn propose_with_threshold_one_executes_immediately() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![alice(), bob(), charlie()]; + let threshold = 1; // Only 1 approval needed + + // Create multisig with threshold=1 + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + threshold + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Fund multisig account for balance transfer + as Mutate<_>>::mint_into(&multisig_address, 50000).unwrap(); + + let initial_dave_balance = Balances::free_balance(dave()); + + // Alice proposes a transfer - should execute immediately since threshold=1 + let transfer_call = RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { + dest: dave(), + value: 1000, + }); + + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(alice()), + multisig_address.clone(), + transfer_call.encode(), + 100 + )); + + let proposal_id = 0; // First proposal + + // Verify the proposal was executed immediately (should NOT exist anymore) + assert!(Proposals::::get(&multisig_address, proposal_id).is_none()); + + // Verify the transfer actually happened + assert_eq!(Balances::free_balance(dave()), initial_dave_balance + 1000); + + // Verify ProposalExecuted event was emitted + System::assert_has_event( + Event::ProposalExecuted { + multisig_address: multisig_address.clone(), + proposal_id, + proposer: alice(), + call: transfer_call.encode(), + approvers: vec![alice()], + result: Ok(()), + } + .into(), + ); + + // Verify deposit was returned to Alice (execution removes proposal) + let alice_reserved = Balances::reserved_balance(alice()); + assert_eq!(alice_reserved, 500); // Only MultisigDeposit, no ProposalDeposit + + // Verify active_proposals counter was decremented back to 0 + let multisig_data = Multisigs::::get(&multisig_address).unwrap(); + assert_eq!(multisig_data.active_proposals, 0); + }); +} + +#[test] +fn propose_with_threshold_two_waits_for_approval() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![alice(), bob(), charlie()]; + let threshold = 2; // Need 2 approvals + + // Create multisig with threshold=2 + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + threshold + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Fund multisig account + as Mutate<_>>::mint_into(&multisig_address, 50000).unwrap(); + + let initial_dave_balance = Balances::free_balance(dave()); + + // Alice proposes a transfer - should NOT execute yet + let transfer_call = RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { + dest: dave(), + value: 1000, + }); + + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(alice()), + multisig_address.clone(), + transfer_call.encode(), + 100 + )); + + let proposal_id = 0; + + // Verify the proposal still exists (waiting for more approvals) + let proposal = Proposals::::get(&multisig_address, proposal_id).unwrap(); + assert_eq!(proposal.status, ProposalStatus::Active); + assert_eq!(proposal.approvals.len(), 1); // Only Alice so far + + // Verify the transfer did NOT happen yet + assert_eq!(Balances::free_balance(dave()), initial_dave_balance); + + // Bob approves - NOW it should execute (threshold=2 reached) + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + proposal_id + )); + + // Now proposal should be executed and removed + assert!(Proposals::::get(&multisig_address, proposal_id).is_none()); + + // Verify the transfer happened + assert_eq!(Balances::free_balance(dave()), initial_dave_balance + 1000); + }); +} + +#[test] +fn auto_cleanup_on_approve_and_cancel() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let creator = alice(); + let signers = vec![alice(), bob(), charlie()]; + let threshold = 3; // Need all 3 signers - prevents auto-execution during test + + // Create multisig + assert_ok!(Multisig::create_multisig( + RuntimeOrigin::signed(creator.clone()), + signers.clone(), + threshold + )); + + let multisig_address = Multisig::derive_multisig_address(&signers, 0); + + // Create two proposals + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(alice()), + multisig_address.clone(), + make_call(vec![1]), + 100 // expires at block 100 + )); + + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(bob()), + multisig_address.clone(), + make_call(vec![2]), + 200 // expires at block 200 + )); + + // Verify both proposals exist + assert!(Proposals::::get(&multisig_address, 0).is_some()); + assert!(Proposals::::get(&multisig_address, 1).is_some()); + + // Move time forward past first proposal expiry + System::set_block_number(101); + + // Charlie approves proposal #1 (should trigger auto-cleanup of proposal #0) + // Note: Bob is the proposer of #1, so Charlie must approve + assert_ok!(Multisig::approve( + RuntimeOrigin::signed(charlie()), + multisig_address.clone(), + 1 + )); + + // Verify proposal #0 was auto-cleaned + assert!(Proposals::::get(&multisig_address, 0).is_none()); + // Proposal #1 still exists (not expired, waiting for approval) + assert!(Proposals::::get(&multisig_address, 1).is_some()); + + // Create another proposal that will expire + assert_ok!(Multisig::propose( + RuntimeOrigin::signed(alice()), + multisig_address.clone(), + make_call(vec![3]), + 150 // expires at block 150 + )); + + // Move time forward past proposal #2 expiry + System::set_block_number(151); + + // Charlie cancels proposal #1 (should trigger auto-cleanup of proposal #2) + assert_ok!(Multisig::cancel(RuntimeOrigin::signed(bob()), multisig_address.clone(), 1)); + + // Verify proposal #2 was auto-cleaned + assert!(Proposals::::get(&multisig_address, 2).is_none()); + // Proposal #1 was cancelled + assert!(Proposals::::get(&multisig_address, 1).is_none()); + + // Verify active_proposals counter is correct (should be 0) + let multisig_data = Multisigs::::get(&multisig_address).unwrap(); + assert_eq!(multisig_data.active_proposals, 0); + }); +} diff --git a/pallets/multisig/src/weights.rs b/pallets/multisig/src/weights.rs new file mode 100644 index 00000000..65a25230 --- /dev/null +++ b/pallets/multisig/src/weights.rs @@ -0,0 +1,343 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 49.1.0 +//! DATE: 2026-01-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `coldbook.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/quantus-node +// benchmark +// pallet +// --chain=dev +// --pallet=pallet_multisig +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=./pallets/multisig/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_multisig`. +pub trait WeightInfo { + fn create_multisig() -> Weight; + fn propose(c: u32, e: u32, ) -> Weight; + fn approve(c: u32, e: u32, ) -> Weight; + fn approve_and_execute(c: u32, ) -> Weight; + fn cancel(c: u32, e: u32, ) -> Weight; + fn remove_expired() -> Weight; + fn claim_deposits(p: u32, ) -> Weight; + fn dissolve_multisig() -> Weight; +} + +/// Weights for `pallet_multisig` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `Multisig::GlobalNonce` (r:1 w:1) + /// Proof: `Multisig::GlobalNonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + fn create_multisig() -> Weight { + // Proof Size summary in bytes: + // Measured: `152` + // Estimated: `10389` + // Minimum execution time: 192_000_000 picoseconds. + Weight::from_parts(195_000_000, 10389) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:201 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn propose(_c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + e * (215 ±0)` + // Estimated: `17022 + e * (16032 ±0)` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(140_354_473, 17022) + // Standard Error: 30_916 + .saturating_add(Weight::from_parts(14_183_732, 0).saturating_mul(e.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:202 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn approve(_c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `657 + c * (1 ±0) + e * (215 ±0)` + // Estimated: `33054 + e * (16032 ±0)` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(31_012_674, 33054) + // Standard Error: 25_877 + .saturating_add(Weight::from_parts(13_708_908, 0).saturating_mul(e.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:2 w:1) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + fn approve_and_execute(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `790 + c * (1 ±0)` + // Estimated: `33054` + // Minimum execution time: 29_000_000 picoseconds. + Weight::from_parts(29_907_548, 33054) + // Standard Error: 17 + .saturating_add(Weight::from_parts(782, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Proposals` (r:202 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn cancel(c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `625 + c * (1 ±0) + e * (215 ±0)` + // Estimated: `33054 + e * (16032 ±0)` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(22_414_315, 33054) + // Standard Error: 576 + .saturating_add(Weight::from_parts(1_526, 0).saturating_mul(c.into())) + // Standard Error: 29_178 + .saturating_add(Weight::from_parts(13_866_655, 0).saturating_mul(e.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:1 w:1) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + fn remove_expired() -> Weight { + // Proof Size summary in bytes: + // Measured: `764` + // Estimated: `17022` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 17022) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Proposals` (r:201 w:200) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 200]`. + fn claim_deposits(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `625 + p * (237 ±0)` + // Estimated: `17022 + p * (16032 ±0)` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(28_491_742, 17022) + // Standard Error: 16_103 + .saturating_add(Weight::from_parts(13_535_595, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(p.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:1 w:0) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn dissolve_multisig() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `17022` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(30_000_000, 17022) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `Multisig::GlobalNonce` (r:1 w:1) + /// Proof: `Multisig::GlobalNonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + fn create_multisig() -> Weight { + // Proof Size summary in bytes: + // Measured: `152` + // Estimated: `10389` + // Minimum execution time: 192_000_000 picoseconds. + Weight::from_parts(195_000_000, 10389) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:201 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn propose(_c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + e * (215 ±0)` + // Estimated: `17022 + e * (16032 ±0)` + // Minimum execution time: 40_000_000 picoseconds. + Weight::from_parts(140_354_473, 17022) + // Standard Error: 30_916 + .saturating_add(Weight::from_parts(14_183_732, 0).saturating_mul(e.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:202 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn approve(_c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `657 + c * (1 ±0) + e * (215 ±0)` + // Estimated: `33054 + e * (16032 ±0)` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(31_012_674, 33054) + // Standard Error: 25_877 + .saturating_add(Weight::from_parts(13_708_908, 0).saturating_mul(e.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:2 w:1) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + fn approve_and_execute(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `790 + c * (1 ±0)` + // Estimated: `33054` + // Minimum execution time: 29_000_000 picoseconds. + Weight::from_parts(29_907_548, 33054) + // Standard Error: 17 + .saturating_add(Weight::from_parts(782, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Proposals` (r:202 w:201) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 10140]`. + /// The range of component `e` is `[0, 200]`. + fn cancel(c: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `625 + c * (1 ±0) + e * (215 ±0)` + // Estimated: `33054 + e * (16032 ±0)` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(22_414_315, 33054) + // Standard Error: 576 + .saturating_add(Weight::from_parts(1_526, 0).saturating_mul(c.into())) + // Standard Error: 29_178 + .saturating_add(Weight::from_parts(13_866_655, 0).saturating_mul(e.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(e.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(e.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:1 w:1) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + fn remove_expired() -> Weight { + // Proof Size summary in bytes: + // Measured: `764` + // Estimated: `17022` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 17022) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Multisig::Proposals` (r:201 w:200) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 200]`. + fn claim_deposits(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `625 + p * (237 ±0)` + // Estimated: `17022 + p * (16032 ±0)` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(28_491_742, 17022) + // Standard Error: 16_103 + .saturating_add(Weight::from_parts(13_535_595, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 16032).saturating_mul(p.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(6924), added: 9399, mode: `MaxEncodedLen`) + /// Storage: `Multisig::Proposals` (r:1 w:0) + /// Proof: `Multisig::Proposals` (`max_values`: None, `max_size`: Some(13557), added: 16032, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn dissolve_multisig() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `17022` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(30_000_000, 17022) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/pallets/reversible-transfers/src/benchmarking.rs b/pallets/reversible-transfers/src/benchmarking.rs index 1d0b3a3c..ae69f5b0 100644 --- a/pallets/reversible-transfers/src/benchmarking.rs +++ b/pallets/reversible-transfers/src/benchmarking.rs @@ -172,14 +172,9 @@ mod benchmarks { #[benchmark] fn execute_transfer() -> Result<(), BenchmarkError> { let owner: T::AccountId = whitelisted_caller(); - fund_account::(&owner, BalanceOf::::from(10000u128)); // Fund owner + fund_account::(&owner, BalanceOf::::from(10000u128)); let recipient: T::AccountId = benchmark_account("recipient", 0, SEED); - // Fund recipient with minimum_balance * 100 to match assertion expectation - let initial_balance = as frame_support::traits::Currency< - T::AccountId, - >>::minimum_balance() * - 100_u128.into(); - fund_account::(&recipient, initial_balance); + fund_account::(&recipient, BalanceOf::::from(100u128)); let interceptor: T::AccountId = benchmark_account("interceptor", 1, SEED); let transfer_amount = 100u128; @@ -210,21 +205,25 @@ mod benchmarks { #[extrinsic_call] _(execute_origin, tx_id); - // Check state cleaned up assert_eq!(AccountPendingIndex::::get(&owner), 0); assert!(!PendingTransfers::::contains_key(tx_id)); - // Check side effect of inner call (balance transfer) - let initial_balance = as frame_support::traits::Currency< - T::AccountId, - >>::minimum_balance() * - 100_u128.into(); - let expected_balance = initial_balance.saturating_add(transfer_amount.into()); - assert_eq!( - as frame_support::traits::Currency>::free_balance( - &recipient - ), - expected_balance - ); + + Ok(()) + } + + #[benchmark] + fn recover_funds() -> Result<(), BenchmarkError> { + let account: T::AccountId = whitelisted_caller(); + let guardian: T::AccountId = benchmark_account("guardian", 0, SEED); + + fund_account::(&account, BalanceOf::::from(10000u128)); + fund_account::(&guardian, BalanceOf::::from(10000u128)); + + let delay = T::DefaultDelay::get(); + setup_high_security_account::(account.clone(), delay, guardian.clone()); + + #[extrinsic_call] + _(RawOrigin::Signed(guardian.clone()), account.clone()); Ok(()) } diff --git a/pallets/reversible-transfers/src/lib.rs b/pallets/reversible-transfers/src/lib.rs index 2867692c..de235288 100644 --- a/pallets/reversible-transfers/src/lib.rs +++ b/pallets/reversible-transfers/src/lib.rs @@ -8,8 +8,8 @@ //! ## Volume Fee for High-Security Accounts //! //! When high-security accounts reverse transactions, a configurable volume fee -//! (expressed as a Permill) is deducted from the transaction amount and sent -//! to the treasury. Regular accounts do not incur any fees when reversing transactions. +//! (expressed as a Permill) is deducted from the transaction amount and burned. +//! Regular accounts do not incur any fees when reversing transactions. #![cfg_attr(not(feature = "std"), no_std)] @@ -41,10 +41,6 @@ use sp_runtime::traits::StaticLookup; pub type BlockNumberOrTimestampOf = BlockNumberOrTimestamp, ::Moment>; -/// Type alias for the Recovery pallet's expected block number type -pub type RecoveryBlockNumberOf = - <::BlockNumberProvider as sp_runtime::traits::BlockNumberProvider>::BlockNumber; - /// High security account details #[derive(Encode, Decode, MaxEncodedLen, Clone, Default, TypeInfo, Debug, PartialEq, Eq)] pub struct HighSecurityAccountData { @@ -123,7 +119,6 @@ pub mod pallet { > + pallet_balances::Config::RuntimeHoldReason> + pallet_assets::Config::Balance> + pallet_assets_holder::Config::RuntimeHoldReason> - + pallet_recovery::Config { /// Scheduler for the runtime. We use the Named scheduler for cancellability. type Scheduler: ScheduleNamed< @@ -189,12 +184,9 @@ pub mod pallet { /// Volume fee taken from reversed transactions for high-security accounts only, /// expressed as a Permill (e.g., Permill::from_percent(1) = 1%). Regular accounts incur no - /// fees. + /// fees. The fee is burned (removed from total issuance). #[pallet::constant] type VolumeFee: Get; - - /// Treasury account ID where volume fees are sent. - type TreasuryAccountId: Get; } /// Maps accounts to their chosen reversibility delay period (in milliseconds). @@ -287,11 +279,11 @@ pub mod pallet { execute_at: DispatchTime, T::Moment>, }, /// A scheduled transaction has been successfully cancelled by the owner. - /// [who, tx_id] TransactionCancelled { who: T::AccountId, tx_id: T::Hash }, /// A scheduled transaction was executed by the scheduler. - /// [tx_id, dispatch_result] TransactionExecuted { tx_id: T::Hash, result: DispatchResultWithPostInfo }, + /// Funds were recovered from a high security account by its guardian. + FundsRecovered { account: T::AccountId, guardian: T::AccountId }, } #[pallet::error] @@ -353,7 +345,7 @@ pub mod pallet { delay: BlockNumberOrTimestampOf, interceptor: T::AccountId, ) -> DispatchResult { - let who = ensure_signed(origin.clone())?; + let who = ensure_signed(origin)?; ensure!(interceptor != who.clone(), Error::::InterceptorCannotBeSelf); ensure!( @@ -363,17 +355,6 @@ pub mod pallet { Self::validate_delay(&delay)?; - // Set up zero delay recovery for interceptor - // The interceptor then simply needs to claim the recovery in order to be able - // to make calls on behalf of the high security account. - let recovery_delay_blocks: RecoveryBlockNumberOf = Zero::zero(); - pallet_recovery::Pallet::::create_recovery( - origin, - alloc::vec![interceptor.clone()], - One::one(), - recovery_delay_blocks, - )?; - let high_security_account_data = HighSecurityAccountData { interceptor: interceptor.clone(), delay }; @@ -500,6 +481,37 @@ pub mod pallet { Self::do_schedule_transfer_inner(who.clone(), dest, who, amount, delay, Some(asset_id)) } + + /// Allows the guardian (interceptor) to recover all funds from a high security + /// account by transferring the entire balance to themselves. + /// + /// This is an emergency function for when the high security account may be compromised. + #[pallet::call_index(7)] + #[pallet::weight(::WeightInfo::recover_funds())] + #[allow(clippy::useless_conversion)] + pub fn recover_funds( + origin: OriginFor, + account: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let high_security_account_data = HighSecurityAccounts::::get(&account) + .ok_or(Error::::AccountNotHighSecurity)?; + + ensure!(who == high_security_account_data.interceptor, Error::::InvalidReverser); + + let call: RuntimeCallOf = pallet_balances::Call::::transfer_all { + dest: T::Lookup::unlookup(who.clone()), + keep_alive: false, + } + .into(); + + let result = call.dispatch(frame_system::RawOrigin::Signed(account.clone()).into()); + + Self::deposit_event(Event::FundsRecovered { account, guardian: who }); + + result + } } #[pallet::hooks] @@ -833,11 +845,8 @@ pub mod pallet { // No fee for regular accounts (Zero::zero(), pending.amount) }; - let treasury_account = T::TreasuryAccountId::get(); - - // For assets, transfer held funds to treasury (fee) and interceptor (remaining) - // For native balances, transfer held funds to treasury (fee) and interceptor - // (remaining) + // For assets, burn held funds (fee) and transfer remaining to interceptor + // For native balances, burn held funds (fee) and transfer remaining to interceptor if let Ok((call, _)) = T::Preimages::peek::>(&pending.call) { if let Ok(pallet_assets::Call::transfer_keep_alive { id, .. }) = call.clone().try_into() @@ -845,15 +854,13 @@ pub mod pallet { let reason = Self::asset_hold_reason(); let asset_id = id.into(); - // Transfer fee to treasury if fee_amount > 0 - let _ = as AssetsHold>>::transfer_on_hold( + // Burn fee amount if fee_amount > 0 + let _ = as AssetsHold>>::burn_held( asset_id.clone(), &reason, &pending.from, - &treasury_account, fee_amount, Precision::Exact, - Restriction::Free, Fortitude::Polite, )?; @@ -872,14 +879,12 @@ pub mod pallet { if let Ok(pallet_balances::Call::transfer_keep_alive { .. }) = call.clone().try_into() { - // Transfer fee to treasury - pallet_balances::Pallet::::transfer_on_hold( + // Burn fee amount + pallet_balances::Pallet::::burn_held( &HoldReason::ScheduledTransfer.into(), &pending.from, - &treasury_account, fee_amount, Precision::Exact, - Restriction::Free, Fortitude::Polite, )?; diff --git a/pallets/reversible-transfers/src/tests/mock.rs b/pallets/reversible-transfers/src/tests/mock.rs index 39aa6052..e1d829b7 100644 --- a/pallets/reversible-transfers/src/tests/mock.rs +++ b/pallets/reversible-transfers/src/tests/mock.rs @@ -49,9 +49,6 @@ pub fn eve() -> AccountId { pub fn ferdie() -> AccountId { account_id(255) } -pub fn treasury() -> AccountId { - account_id(99) -} /// Helper function for interceptor account (avoiding + 100 calculations) pub fn interceptor_1() -> AccountId { @@ -139,6 +136,10 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } +parameter_types! { + pub MintingAccount: AccountId = AccountId::new([1u8; 32]); +} + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { type Balance = Balance; @@ -148,6 +149,7 @@ impl pallet_balances::Config for Test { type WeightInfo = (); type RuntimeHoldReason = RuntimeHoldReason; type MaxFreezes = MaxReversibleTransfers; + type MintingAccount = MintingAccount; } // In memory storage @@ -195,8 +197,6 @@ parameter_types! { pub const MaxReversibleTransfers: u32 = 100; pub const MaxInterceptorAccounts: u32 = 10; pub const HighSecurityVolumeFee: Permill = Permill::from_percent(1); - /// Mock treasury account ID for tests - pub const TreasuryAccount: AccountId = AccountId::new([99u8; 32]); } impl pallet_reversible_transfers::Config for Test { @@ -215,7 +215,6 @@ impl pallet_reversible_transfers::Config for Test { type TimeProvider = MockTimestamp; type MaxInterceptorAccounts = MaxInterceptorAccounts; type VolumeFee = HighSecurityVolumeFee; - type TreasuryAccountId = TreasuryAccount; } parameter_types! { @@ -346,8 +345,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (account_id(109), 100_000_000_000), (account_id(110), 100_000_000_000), (account_id(111), 100_000_000_000), - // Treasury account for fee collection tests (must meet existential deposit) - (account_id(99), 1), ], } .assimilate_storage(&mut t) diff --git a/pallets/reversible-transfers/src/tests/test_high_security_account.rs b/pallets/reversible-transfers/src/tests/test_high_security_account.rs index eb6f92d8..c9c8aa73 100644 --- a/pallets/reversible-transfers/src/tests/test_high_security_account.rs +++ b/pallets/reversible-transfers/src/tests/test_high_security_account.rs @@ -1,11 +1,66 @@ -use crate::tests::{ - mock::*, - test_reversible_transfers::{calculate_tx_id, transfer_call}, +use crate::{ + tests::{ + mock::*, + test_reversible_transfers::{calculate_tx_id, transfer_call}, + }, + Event, }; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_ok}; +use pallet_balances::TotalIssuance; // NOTE: Many of the high security / reversibility behaviors are enforced via SignedExtension or -// external pallets (Recovery/Proxy). They are covered by integration tests in runtime. +// external pallets (Proxy). They are covered by integration tests in runtime. + +#[test] +fn guardian_can_recover_all_funds_from_high_security_account() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + let hs_user = alice(); + let guardian = bob(); + + let initial_hs_balance = Balances::free_balance(&hs_user); + let initial_guardian_balance = Balances::free_balance(&guardian); + + assert_ok!(ReversibleTransfers::recover_funds( + RuntimeOrigin::signed(guardian.clone()), + hs_user.clone() + )); + + assert_eq!(Balances::free_balance(&hs_user), 0); + assert_eq!( + Balances::free_balance(&guardian), + initial_guardian_balance + initial_hs_balance + ); + + System::assert_has_event(Event::FundsRecovered { account: hs_user, guardian }.into()); + }); +} + +#[test] +fn recover_funds_fails_if_caller_is_not_guardian() { + new_test_ext().execute_with(|| { + let hs_user = alice(); + let not_guardian = charlie(); + + assert_err!( + ReversibleTransfers::recover_funds(RuntimeOrigin::signed(not_guardian), hs_user), + crate::Error::::InvalidReverser + ); + }); +} + +#[test] +fn recover_funds_fails_for_non_high_security_account() { + new_test_ext().execute_with(|| { + let regular_user = charlie(); + let attacker = dave(); + + assert_err!( + ReversibleTransfers::recover_funds(RuntimeOrigin::signed(attacker), regular_user), + crate::Error::::AccountNotHighSecurity + ); + }); +} #[test] fn guardian_can_cancel_reversible_transactions_for_hs_account() { @@ -13,12 +68,11 @@ fn guardian_can_cancel_reversible_transactions_for_hs_account() { let hs_user = alice(); // reversible from genesis with interceptor=2 let guardian = bob(); let dest = charlie(); - let treasury = treasury(); let amount = 10_000u128; // Use larger amount so volume fee is visible // Record initial balances let initial_guardian_balance = Balances::free_balance(&guardian); - let initial_treasury_balance = Balances::free_balance(&treasury); + let initial_total_issuance = TotalIssuance::::get(); // Compute tx_id BEFORE scheduling (matches pallet logic using current GlobalNonce) let call = transfer_call(dest.clone(), amount); @@ -36,7 +90,7 @@ fn guardian_can_cancel_reversible_transactions_for_hs_account() { assert!(ReversibleTransfers::pending_dispatches(tx_id).is_none()); // Verify volume fee was applied for high-security account - // Expected fee: 10,000 * 100 / 10,000 = 100 tokens + // Expected fee: 10,000 * 1% = 100 tokens let expected_fee = 100; let expected_remaining = amount - expected_fee; @@ -47,11 +101,11 @@ fn guardian_can_cancel_reversible_transactions_for_hs_account() { "Guardian should receive remaining amount after volume fee deduction" ); - // Check that treasury received the fee + // Check that fee was burned (total issuance decreased) assert_eq!( - Balances::free_balance(&treasury), - initial_treasury_balance + expected_fee, - "Treasury should receive volume fee from high-security account cancellation" + TotalIssuance::::get(), + initial_total_issuance - expected_fee, + "Volume fee should be burned from total issuance" ); }); } diff --git a/pallets/reversible-transfers/src/tests/test_reversible_transfers.rs b/pallets/reversible-transfers/src/tests/test_reversible_transfers.rs index 002ce3b3..21ca06b5 100644 --- a/pallets/reversible-transfers/src/tests/test_reversible_transfers.rs +++ b/pallets/reversible-transfers/src/tests/test_reversible_transfers.rs @@ -592,7 +592,6 @@ fn cancel_dispatch_works() { System::set_block_number(1); let user = alice(); // High-security account from genesis let interceptor = bob(); - let treasury = treasury(); let amount = 10_000; let call = transfer_call(interceptor.clone(), amount); let tx_id = calculate_tx_id::(user.clone(), &call); @@ -604,7 +603,7 @@ fn cancel_dispatch_works() { // Record initial balances let initial_interceptor_balance = Balances::free_balance(&interceptor); - let initial_treasury_balance = Balances::free_balance(&treasury); + let initial_total_issuance = pallet_balances::TotalIssuance::::get(); assert_eq!(Agenda::::get(execute_block).len(), 0); @@ -633,7 +632,7 @@ fn cancel_dispatch_works() { assert_eq!(Agenda::::get(execute_block).len(), 0); // Verify volume fee was applied for high-security account - // Expected fee: 10,000 * 100 / 10,000 = 100 tokens + // Expected fee: 10,000 * 1% = 100 tokens let expected_fee = 100; let expected_remaining = amount - expected_fee; @@ -645,10 +644,11 @@ fn cancel_dispatch_works() { "High-security account should have volume fee deducted" ); + // Check that fee was burned (total issuance decreased) assert_eq!( - Balances::free_balance(&treasury), - initial_treasury_balance + expected_fee, - "Treasury should receive volume fee from high-security account cancellation" + pallet_balances::TotalIssuance::::get(), + initial_total_issuance - expected_fee, + "Volume fee should be burned from total issuance" ); // Check event @@ -662,13 +662,12 @@ fn no_volume_fee_for_regular_reversible_accounts() { System::set_block_number(1); let user = charlie(); // Regular account (not high-security) let recipient = dave(); - let treasury = treasury(); let amount = 10_000; // Check initial balances let initial_user_balance = Balances::free_balance(&user); let initial_recipient_balance = Balances::free_balance(&recipient); - let initial_treasury_balance = Balances::free_balance(&treasury); + let initial_total_issuance = pallet_balances::TotalIssuance::::get(); let call = transfer_call(recipient.clone(), amount); let tx_id = calculate_tx_id::(user.clone(), &call); @@ -700,11 +699,11 @@ fn no_volume_fee_for_regular_reversible_accounts() { "Recipient should not receive funds when transaction is cancelled" ); - // Verify treasury balance unchanged + // Verify total issuance unchanged (no fee burned for regular accounts) assert_eq!( - Balances::free_balance(&treasury), - initial_treasury_balance, - "Treasury should not receive fee from regular account cancellation" + pallet_balances::TotalIssuance::::get(), + initial_total_issuance, + "Total issuance should not change for regular account cancellation" ); // Should still have TransactionCancelled event diff --git a/pallets/reversible-transfers/src/weights.rs b/pallets/reversible-transfers/src/weights.rs index 9fbe3a66..b5c0f5e2 100644 --- a/pallets/reversible-transfers/src/weights.rs +++ b/pallets/reversible-transfers/src/weights.rs @@ -18,27 +18,23 @@ //! Autogenerated weights for `pallet_reversible_transfers` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 47.2.0 -//! DATE: 2025-06-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 49.1.0 +//! DATE: 2026-01-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook-Pro-4.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` +//! HOSTNAME: `arunachala.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// frame-omni-bencher -// v1 +// ./target/release/quantus-node // benchmark // pallet -// --runtime -// ./target/release/wbuild/quantus-runtime/quantus_runtime.wasm -// --pallet -// pallet-reversible-transfers -// --extrinsic -// * -// --template -// ./.maintain/frame-weight-template.hbs -// --output -// ./pallets/reversible-transfers/src/weights.rs +// --chain=dev +// --pallet=pallet_reversible_transfers +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=pallets/reversible-transfers/src/weights.rs +// --template=.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,28 +51,35 @@ pub trait WeightInfo { fn schedule_transfer() -> Weight; fn cancel() -> Weight; fn execute_transfer() -> Weight; + fn recover_funds() -> Weight; } /// Weights for `pallet_reversible_transfers` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:1) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:1) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::InterceptorIndex` (r:1 w:1) + /// Proof: `ReversibleTransfers::InterceptorIndex` (`max_values`: None, `max_size`: Some(1073), added: 3548, mode: `MaxEncodedLen`) fn set_high_security() -> Weight { // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3556` - // Minimum execution time: 9_000_000 picoseconds. - Weight::from_parts(9_000_000, 3556) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Measured: `192` + // Estimated: `4538` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(80_000_000, 4538) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:0) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::GlobalNonce` (r:1 w:1) + /// Proof: `ReversibleTransfers::GlobalNonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -85,78 +88,113 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(10718), added: 13193, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfers` (r:0 w:1) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) fn schedule_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `203` + // Measured: `637` // Estimated: `14183` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(180_000_000, 14183) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Minimum execution time: 536_000_000 picoseconds. + Weight::from_parts(550_000_000, 14183) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:0) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(10718), added: 13193, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `2224` // Estimated: `14183` - // Minimum execution time: 122_000_000 picoseconds. - Weight::from_parts(123_000_000, 14183) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(349_000_000, 14183) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferCount` (r:1 w:1) + /// Proof: `Balances::TransferCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::TransferProof` (r:0 w:1) /// Proof: `Balances::TransferProof` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `MaxEncodedLen`) fn execute_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `3696` - // Minimum execution time: 86_000_000 picoseconds. - Weight::from_parts(88_000_000, 3696) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) + // Measured: `1360` + // Estimated: `3834` + // Minimum execution time: 276_000_000 picoseconds. + Weight::from_parts(290_000_000, 3834) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) + } + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferCount` (r:1 w:1) + /// Proof: `Balances::TransferCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferProof` (r:0 w:1) + /// Proof: `Balances::TransferProof` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `MaxEncodedLen`) + fn recover_funds() -> Weight { + // Proof Size summary in bytes: + // Measured: `477` + // Estimated: `3593` + // Minimum execution time: 103_000_000 picoseconds. + Weight::from_parts(106_000_000, 3593) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } } // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:1) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:1) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::InterceptorIndex` (r:1 w:1) + /// Proof: `ReversibleTransfers::InterceptorIndex` (`max_values`: None, `max_size`: Some(1073), added: 3548, mode: `MaxEncodedLen`) fn set_high_security() -> Weight { // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3556` - // Minimum execution time: 9_000_000 picoseconds. - Weight::from_parts(9_000_000, 3556) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Measured: `192` + // Estimated: `4538` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(80_000_000, 4538) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:0) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::GlobalNonce` (r:1 w:1) + /// Proof: `ReversibleTransfers::GlobalNonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -165,55 +203,84 @@ impl WeightInfo for () { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(10718), added: 13193, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfers` (r:0 w:1) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) fn schedule_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `203` + // Measured: `637` // Estimated: `14183` - // Minimum execution time: 179_000_000 picoseconds. - Weight::from_parts(180_000_000, 14183) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Minimum execution time: 536_000_000 picoseconds. + Weight::from_parts(550_000_000, 14183) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) - /// Storage: `ReversibleTransfers::ReversibleAccounts` (r:1 w:0) - /// Proof: `ReversibleTransfers::ReversibleAccounts` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(10718), added: 13193, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `809` + // Measured: `2224` // Estimated: `14183` - // Minimum execution time: 122_000_000 picoseconds. - Weight::from_parts(123_000_000, 14183) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Minimum execution time: 342_000_000 picoseconds. + Weight::from_parts(349_000_000, 14183) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `ReversibleTransfers::PendingTransfers` (r:1 w:1) - /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(231), added: 2706, mode: `MaxEncodedLen`) + /// Proof: `ReversibleTransfers::PendingTransfers` (`max_values`: None, `max_size`: Some(291), added: 2766, mode: `MaxEncodedLen`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `ReversibleTransfers::AccountPendingIndex` (r:1 w:1) /// Proof: `ReversibleTransfers::AccountPendingIndex` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersBySender` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersBySender` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `ReversibleTransfers::PendingTransfersByRecipient` (r:1 w:1) + /// Proof: `ReversibleTransfers::PendingTransfersByRecipient` (`max_values`: None, `max_size`: Some(369), added: 2844, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferCount` (r:1 w:1) + /// Proof: `Balances::TransferCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) /// Storage: `Balances::TransferProof` (r:0 w:1) /// Proof: `Balances::TransferProof` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `MaxEncodedLen`) fn execute_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `510` - // Estimated: `3696` - // Minimum execution time: 86_000_000 picoseconds. - Weight::from_parts(88_000_000, 3696) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) + // Measured: `1360` + // Estimated: `3834` + // Minimum execution time: 276_000_000 picoseconds. + Weight::from_parts(290_000_000, 3834) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) + } + /// Storage: `ReversibleTransfers::HighSecurityAccounts` (r:1 w:0) + /// Proof: `ReversibleTransfers::HighSecurityAccounts` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferCount` (r:1 w:1) + /// Proof: `Balances::TransferCount` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Balances::TransferProof` (r:0 w:1) + /// Proof: `Balances::TransferProof` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `MaxEncodedLen`) + fn recover_funds() -> Weight { + // Proof Size summary in bytes: + // Measured: `477` + // Estimated: `3593` + // Minimum execution time: 103_000_000 picoseconds. + Weight::from_parts(106_000_000, 3593) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } } diff --git a/pallets/wormhole/src/mock.rs b/pallets/wormhole/src/mock.rs index 4c764f26..574ac336 100644 --- a/pallets/wormhole/src/mock.rs +++ b/pallets/wormhole/src/mock.rs @@ -89,6 +89,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type MaxFreezes = (); type DoneSlashHandler = (); + type MintingAccount = MintingAccount; } // --- PALLET WORMHOLE --- diff --git a/primitives/header/Cargo.toml b/primitives/header/Cargo.toml new file mode 100644 index 00000000..a164947d --- /dev/null +++ b/primitives/header/Cargo.toml @@ -0,0 +1,40 @@ +[package] +authors.workspace = true +description = "Fork of sp-runtime's Header type with a custom hash function that's felt aligned for our wormhole circuits" +edition.workspace = true +homepage.workspace = true +license = "Apache-2.0" +name = "qp-header" +publish = false +repository.workspace = true +version = "0.1.0" + +[dependencies] +codec = { features = ["derive"], workspace = true } +log.workspace = true +p3-field = { workspace = true } +p3-goldilocks = { workspace = true } +qp-poseidon = { workspace = true, features = ["serde"] } +qp-poseidon-core = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +serde = { workspace = true, features = ["derive"], optional = true } +sp-core = { features = ["serde"], workspace = true } +sp-runtime = { features = ["serde"], workspace = true } + +[dev-dependencies] +hex = { workspace = true } +serde_json = { workspace = true, default-features = false, features = [ + "alloc", + "std", +] } + + +[features] +default = ["serde", "std"] +std = [ + "codec/std", + "qp-poseidon/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", +] diff --git a/primitives/header/src/lib.rs b/primitives/header/src/lib.rs new file mode 100644 index 00000000..00dac653 --- /dev/null +++ b/primitives/header/src/lib.rs @@ -0,0 +1,349 @@ +//! Fork of sp-runtime's generic implementation of a block header. +//! We override the hashing function to ensure a felt aligned pre-image for the block hash. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Codec, Decode, DecodeWithMemTracking, Encode}; +use p3_field::integers::QuotientMap; +use p3_goldilocks::Goldilocks; +use qp_poseidon_core::{ + hash_variable_length, + serialization::{injective_bytes_to_felts, unsafe_digest_bytes_to_felts}, +}; +use scale_info::TypeInfo; +use sp_core::U256; +use sp_runtime::{ + generic::Digest, + traits::{AtLeast32BitUnsigned, BlockNumber, Hash as HashT, MaybeDisplay, Member}, + RuntimeDebug, +}; +extern crate alloc; + +use alloc::vec::Vec; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// Custom block header that hashes itself with Poseidon over Goldilocks field elements. +#[derive(Encode, Decode, PartialEq, Eq, Clone, RuntimeDebug, TypeInfo, DecodeWithMemTracking)] +#[scale_info(skip_type_params(Hash))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "serde", serde(deny_unknown_fields))] +pub struct Header +where + Number: Copy + Into + TryFrom, +{ + pub parent_hash: Hash::Output, + #[cfg_attr( + feature = "serde", + serde(serialize_with = "serialize_number", deserialize_with = "deserialize_number") + )] + pub number: Number, + pub state_root: Hash::Output, + pub extrinsics_root: Hash::Output, + pub digest: Digest, +} + +#[cfg(feature = "serde")] +pub fn serialize_number + TryFrom>( + val: &T, + s: S, +) -> Result +where + S: serde::Serializer, +{ + let u256: U256 = (*val).into(); + serde::Serialize::serialize(&u256, s) +} + +#[cfg(feature = "serde")] +pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ + let u256: U256 = serde::Deserialize::deserialize(d)?; + TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) +} + +impl sp_runtime::traits::Header for Header +where + Number: BlockNumber, + Hash: HashT, + Hash::Output: From<[u8; 32]>, +{ + type Number = Number; + type Hash = ::Output; + type Hashing = Hash; + + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Digest, + ) -> Self { + Self { number, extrinsics_root, state_root, parent_hash, digest } + } + fn number(&self) -> &Self::Number { + &self.number + } + + fn set_number(&mut self, num: Self::Number) { + self.number = num + } + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } + + fn digest(&self) -> &Digest { + &self.digest + } + + fn digest_mut(&mut self) -> &mut Digest { + #[cfg(feature = "std")] + log::debug!(target: "header", "Retrieving mutable reference to digest"); + &mut self.digest + } + // We override the default hashing function to use + // a felt aligned pre-image for poseidon hashing. + fn hash(&self) -> Self::Hash { + Header::hash(self) + } +} + +impl Header +where + Number: Member + + core::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + Into + + TryFrom, + Hash: HashT, + Hash::Output: From<[u8; 32]>, +{ + /// Convenience helper for computing the hash of the header without having + /// to import the trait. + pub fn hash(&self) -> Hash::Output { + let max_encoded_felts = 4 * 3 + 1 + 28; // 3 hashout fields + 1 u32 + 28 felts for injective digest encoding + let mut felts = Vec::with_capacity(max_encoded_felts); + + // parent_hash : 32 bytes → 4 felts + felts.extend(unsafe_digest_bytes_to_felts::( + self.parent_hash.as_ref().try_into().expect("hash is 32 bytes"), + )); + + // block number as u64 (compact encoded, but we only need the value) + // constrain the block number to be with u32 range for simplicity + let number = self.number.into(); + felts.push(Goldilocks::from_int(number.as_u32() as u64)); + + // state_root : 32 bytes → 4 felts + felts.extend(unsafe_digest_bytes_to_felts::( + self.state_root.as_ref().try_into().expect("hash is 32 bytes"), + )); + + // extrinsics_root : 32 bytes → 4 felts + felts.extend(unsafe_digest_bytes_to_felts::( + self.extrinsics_root.as_ref().try_into().expect("hash is 32 bytes"), + )); + + // digest – injective encoding + felts.extend(injective_bytes_to_felts::(&self.digest.encode())); + + let poseidon_hash: [u8; 32] = hash_variable_length(felts); + poseidon_hash.into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use qp_poseidon::PoseidonHasher; + use sp_core::H256; + use sp_runtime::{traits::BlakeTwo256, DigestItem}; + + #[test] + fn should_serialize_numbers() { + fn serialize(num: u128) -> String { + let mut v = vec![]; + { + let mut ser = serde_json::Serializer::new(std::io::Cursor::new(&mut v)); + serialize_number(&num, &mut ser).unwrap(); + } + String::from_utf8(v).unwrap() + } + + assert_eq!(serialize(0), "\"0x0\"".to_owned()); + assert_eq!(serialize(1), "\"0x1\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128), "\"0xffffffffffffffff\"".to_owned()); + assert_eq!(serialize(u64::MAX as u128 + 1), "\"0x10000000000000000\"".to_owned()); + } + + #[test] + fn should_deserialize_number() { + fn deserialize(num: &str) -> u128 { + let mut der = serde_json::Deserializer::from_str(num); + deserialize_number(&mut der).unwrap() + } + + assert_eq!(deserialize("\"0x0\""), 0); + assert_eq!(deserialize("\"0x1\""), 1); + assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::MAX as u128); + assert_eq!(deserialize("\"0x10000000000000000\""), u64::MAX as u128 + 1); + } + + #[test] + fn ensure_format_is_unchanged() { + let header = Header:: { + parent_hash: BlakeTwo256::hash(b"1"), + number: 2, + state_root: BlakeTwo256::hash(b"3"), + extrinsics_root: BlakeTwo256::hash(b"4"), + digest: Digest { logs: vec![sp_runtime::generic::DigestItem::Other(b"6".to_vec())] }, + }; + + let header_encoded = header.encode(); + assert_eq!( + header_encoded, + vec![ + 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, + 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 2, 0, 0, 0, + 88, 19, 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, + 112, 93, 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, + 73, 151, 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, + 12, 73, 104, 49, 200, 204, 31, 143, 13, 4, 0, 4, 54 + ], + ); + assert_eq!(Header::::decode(&mut &header_encoded[..]).unwrap(), header); + + let header = Header:: { + parent_hash: BlakeTwo256::hash(b"1000"), + number: 2000, + state_root: BlakeTwo256::hash(b"3000"), + extrinsics_root: BlakeTwo256::hash(b"4000"), + digest: Digest { logs: vec![sp_runtime::generic::DigestItem::Other(b"5000".to_vec())] }, + }; + + let header_encoded = header.encode(); + assert_eq!( + header_encoded, + vec![ + 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, + 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 208, 7, 0, 0, 219, + 225, 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, + 232, 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, + 98, 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, + 44, 111, 126, 54, 34, 155, 220, 253, 124, 4, 0, 16, 53, 48, 48, 48 + ], + ); + assert_eq!(Header::::decode(&mut &header_encoded[..]).unwrap(), header); + } + + fn hash_header(x: &[u8]) -> [u8; 32] { + let mut y = x; + if let Ok(header) = Header::::decode(&mut y) { + // Only treat this as a header if we consumed the entire input. + if y.is_empty() { + let max_encoded_felts = 4 * 3 + 1 + 28; // 3 hashout fields + 1 u32 + 28 felts + let mut felts = Vec::with_capacity(max_encoded_felts); + + let parent_hash = header.parent_hash.as_bytes(); + let number = header.number; + let state_root = header.state_root.as_bytes(); + let extrinsics_root = header.extrinsics_root.as_bytes(); + let digest = header.digest.encode(); + + felts.extend(unsafe_digest_bytes_to_felts::( + parent_hash.try_into().expect("Parent hash expected to equal 32 bytes"), + )); + felts.push(Goldilocks::from_int(number as u64)); + felts.extend(unsafe_digest_bytes_to_felts::( + state_root.try_into().expect("State root expected to equal 32 bytes"), + )); + felts.extend(unsafe_digest_bytes_to_felts::( + extrinsics_root.try_into().expect("Extrinsics root expected to equal 32 bytes"), + )); + felts.extend(injective_bytes_to_felts::(&digest)); + + return hash_variable_length(felts); + } + } + // Fallback: canonical bytes hashing for non-header data + PoseidonHasher::hash_padded(x) + } + + #[test] + fn poseidon_header_hash_matches_old_path() { + use codec::Encode; + + // Example header from a real block on devnet + let parent_hash = "839b2d2ac0bf4aa71b18ad1ba5e2880b4ef06452cefacd255cfd76f6ad2c7966"; + let number = 4; + let state_root = "1688817041c572d6c971681465f401f06d0fdcfaed61d28c06d42dc2d07816d5"; + let extrinsics_root = "7c6cace2e91b6314e05410b91224c11f5dd4a4a2dbf0e39081fddbe4ac9ad252"; + let digest = Digest { + logs: vec![ + DigestItem::PreRuntime( + [112, 111, 119, 95], + [ + 233, 182, 183, 107, 158, 1, 115, 19, 219, 126, 253, 86, 30, 208, 176, 70, + 21, 45, 180, 229, 9, 62, 91, 4, 6, 53, 245, 52, 48, 38, 123, 225, + ] + .to_vec(), + ), + DigestItem::Seal( + [112, 111, 119, 95], + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 77, 142, + ] + .to_vec(), + ), + ], + }; + let header = Header:: { + parent_hash: H256::from_slice( + hex::decode(parent_hash).expect("valid hex parent hash").as_slice(), + ), + number, + state_root: H256::from_slice( + hex::decode(state_root).expect("valid hex state root").as_slice(), + ), + extrinsics_root: H256::from_slice( + hex::decode(extrinsics_root).expect("valid hex extrinsics root").as_slice(), + ), + digest, + }; + + let encoded = header.encode(); + + let old = hash_header(&encoded); // old path + let new: [u8; 32] = header.hash().into(); + println!("Old hash: 0x{}", hex::encode(old)); + + assert_eq!(old, new); + } +} diff --git a/qpow-math/Cargo.toml b/qpow-math/Cargo.toml index e241d510..2a6066bd 100644 --- a/qpow-math/Cargo.toml +++ b/qpow-math/Cargo.toml @@ -6,8 +6,6 @@ version = "0.1.0" [dependencies] hex = { workspace = true, features = ["alloc"] } log = { version = "0.4.22", default-features = false } -num-bigint = { version = "0.4", default-features = false } -num-traits = { version = "0.2", default-features = false } primitive-types = { version = "0.13.1", default-features = false } qp-poseidon-core = { workspace = true } diff --git a/qpow-math/src/lib.rs b/qpow-math/src/lib.rs index 50142ee9..af2a4f1f 100644 --- a/qpow-math/src/lib.rs +++ b/qpow-math/src/lib.rs @@ -49,49 +49,6 @@ pub fn get_nonce_hash( result } -/// Mine a contiguous range of nonces using simple incremental search. -/// Returns the first valid nonce and its hash if one is found. -/// This is called during local mining -pub fn mine_range( - block_hash: [u8; 32], - start_nonce: [u8; 64], - steps: u64, - difficulty: U512, -) -> Option<([u8; 64], U512)> { - if steps == 0 { - return None; - } - - if difficulty == U512::zero() { - log::error!( - "mine_range should not be called with 0 difficulty, but was for block_hash: {:?}", - block_hash - ); - return None; - } - - let mut nonce_u = U512::from_big_endian(&start_nonce); - let max_target = U512::MAX; - let target = max_target / difficulty; - - for _ in 0..steps { - let nonce_bytes = nonce_u.to_big_endian(); - let hash_result = get_nonce_hash(block_hash, nonce_bytes); - - if hash_result < target { - log::debug!(target: "math", "💎 Local miner found nonce {:x} with hash {:x} and target {:x} and block_hash {:?}", - nonce_u.low_u32() as u16, hash_result.low_u32() as u16, - target.low_u32() as u16, hex::encode(block_hash)); - return Some((nonce_bytes, hash_result)); - } - - // Advance to next nonce - nonce_u = nonce_u.saturating_add(U512::from(1u64)); - } - - None -} - #[cfg(test)] mod tests { use super::*; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 2c1ad93c..fe292af7 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -31,8 +31,8 @@ pallet-assets.workspace = true pallet-assets-holder = { workspace = true, default-features = false } pallet-balances.workspace = true pallet-conviction-voting.workspace = true -pallet-merkle-airdrop.workspace = true pallet-mining-rewards.workspace = true +pallet-multisig.workspace = true pallet-preimage.workspace = true pallet-qpow.workspace = true pallet-ranked-collective.workspace = true @@ -46,9 +46,9 @@ pallet-transaction-payment.workspace = true pallet-transaction-payment-rpc-runtime-api.workspace = true pallet-treasury.workspace = true pallet-utility.workspace = true -pallet-vesting.workspace = true primitive-types.workspace = true qp-dilithium-crypto.workspace = true +qp-header = { workspace = true, features = ["serde"] } qp-poseidon = { workspace = true, features = ["serde"] } qp-scheduler.workspace = true scale-info = { features = ["derive", "serde"], workspace = true } @@ -95,8 +95,8 @@ std = [ "pallet-assets/std", "pallet-balances/std", "pallet-conviction-voting/std", - "pallet-merkle-airdrop/std", "pallet-mining-rewards/std", + "pallet-multisig/std", "pallet-preimage/std", "pallet-qpow/std", "pallet-ranked-collective/std", @@ -110,14 +110,13 @@ std = [ "pallet-transaction-payment/std", "pallet-treasury/std", "pallet-utility/std", - "pallet-vesting/std", "primitive-types/std", "qp-dilithium-crypto/full_crypto", "qp-dilithium-crypto/std", + "qp-header/std", "qp-poseidon/std", "qp-scheduler/std", "scale-info/std", - "scale-info/std", "serde_json/std", "sp-api/std", "sp-block-builder/std", @@ -144,8 +143,8 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-conviction-voting/runtime-benchmarks", - "pallet-merkle-airdrop/runtime-benchmarks", "pallet-mining-rewards/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-qpow/runtime-benchmarks", "pallet-ranked-collective/runtime-benchmarks", @@ -157,7 +156,6 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", - "pallet-vesting/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -176,7 +174,6 @@ try-runtime = [ "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-treasury/try-runtime", - "pallet-vesting/try-runtime", "sp-runtime/try-runtime", ] diff --git a/runtime/src/benchmarks.rs b/runtime/src/benchmarks.rs index c670981c..cf13e3e1 100644 --- a/runtime/src/benchmarks.rs +++ b/runtime/src/benchmarks.rs @@ -30,8 +30,8 @@ frame_benchmarking::define_benchmarks!( [pallet_timestamp, Timestamp] [pallet_sudo, Sudo] [pallet_reversible_transfers, ReversibleTransfers] - [pallet_merkle_airdrop, MerkleAirdrop] [pallet_mining_rewards, MiningRewards] + [pallet_multisig, Multisig] [pallet_scheduler, Scheduler] [pallet_qpow, QPoW] ); diff --git a/runtime/src/configs/mod.rs b/runtime/src/configs/mod.rs index ea2b6bc1..a439621c 100644 --- a/runtime/src/configs/mod.rs +++ b/runtime/src/configs/mod.rs @@ -39,7 +39,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ AsEnsureOriginWithArg, ConstU128, ConstU32, ConstU8, EitherOf, Get, NeverEnsureOrigin, - VariantCountOf, WithdrawReasons, + VariantCountOf, }, weights::{ constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, @@ -55,17 +55,14 @@ use pallet_ranked_collective::Linear; use pallet_transaction_payment::{ConstFeeMultiplier, FungibleAdapter, Multiplier}; use qp_poseidon::PoseidonHasher; use qp_scheduler::BlockNumberOrTimestamp; -use sp_runtime::{ - traits::{AccountIdConversion, ConvertInto, One}, - FixedU128, Perbill, Permill, -}; +use sp_runtime::{traits::One, FixedU128, Perbill, Permill}; use sp_version::RuntimeVersion; // Local module imports use super::{ AccountId, Balance, Balances, Block, BlockNumber, Hash, Nonce, OriginCaller, PalletInfo, Preimage, Referenda, Runtime, RuntimeCall, RuntimeEvent, RuntimeFreezeReason, - RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Scheduler, System, Timestamp, Vesting, DAYS, + RuntimeHoldReason, RuntimeOrigin, RuntimeTask, Scheduler, System, Timestamp, DAYS, EXISTENTIAL_DEPOSIT, MICRO_UNIT, TARGET_BLOCK_TIME_MS, UNIT, VERSION, }; use sp_core::U512; @@ -85,8 +82,6 @@ parameter_types! { // To upload, 10Mbs link takes 4.1s and 100Mbs takes 500ms pub RuntimeBlockLength: BlockLength = BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 189; - pub const MerkleAirdropPalletId: PalletId = PalletId(*b"airdrop!"); - pub const UnsignedClaimPriority: u32 = 100; } /// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from @@ -196,6 +191,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type MaxFreezes = VariantCountOf; type DoneSlashHandler = (); + type MintingAccount = MintingAccount; } parameter_types! { @@ -426,25 +422,6 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = pallet_sudo::weights::SubstrateWeight; } -parameter_types! { - pub const MinVestedTransfer: Balance = UNIT; - /// Unvested funds can be transferred and reserved for any other means (reserves overlap) - pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = - WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); -} - -impl pallet_vesting::Config for Runtime { - type Currency = Balances; - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_vesting::weights::SubstrateWeight; - type MinVestedTransfer = MinVestedTransfer; - type BlockNumberToBalance = ConvertInto; - type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; - type BlockNumberProvider = System; - - const MAX_VESTING_SCHEDULES: u32 = 28; -} - impl pallet_utility::Config for Runtime { type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; @@ -481,10 +458,8 @@ parameter_types! { pub const MinDelayPeriodBlocks: BlockNumber = 2; pub const MaxReversibleTransfers: u32 = 10; pub const MaxInterceptorAccounts: u32 = 32; - /// Volume fee for reversed transactions from high-security accounts only, in basis points (10 = 0.1%) + /// Volume fee for reversed transactions from high-security accounts only (1% fee is burned) pub const HighSecurityVolumeFee: Permill = Permill::from_percent(1); - /// Treasury account ID - pub TreasuryAccountId: AccountId = TreasuryPalletId::get().into_account_truncating(); } impl pallet_reversible_transfers::Config for Runtime { @@ -503,21 +478,6 @@ impl pallet_reversible_transfers::Config for Runtime { type TimeProvider = Timestamp; type MaxInterceptorAccounts = MaxInterceptorAccounts; type VolumeFee = HighSecurityVolumeFee; - type TreasuryAccountId = TreasuryAccountId; -} - -parameter_types! { - pub const MaxProofs: u32 = 4096; -} - -impl pallet_merkle_airdrop::Config for Runtime { - type Vesting = Vesting; - type MaxProofs = MaxProofs; - type PalletId = MerkleAirdropPalletId; - type WeightInfo = pallet_merkle_airdrop::weights::SubstrateWeight; - type UnsignedClaimPriority = UnsignedClaimPriority; - type BlockNumberProvider = System; - type BlockNumberToBalance = ConvertInto; } parameter_types! { @@ -603,6 +563,37 @@ impl pallet_assets_holder::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; } +// Multisig configuration +parameter_types! { + pub const MultisigPalletId: PalletId = PalletId(*b"py/mltsg"); + pub const MaxSigners: u32 = 100; + pub const MaxTotalProposalsInStorage: u32 = 200; // Max total in storage (Active + Executed + Cancelled) + pub const MaxCallSize: u32 = 10240; // 10KB + pub const MultisigFee: Balance = 100 * MILLI_UNIT; // 0.1 UNIT (non-refundable) + pub const MultisigDeposit: Balance = 500 * MILLI_UNIT; // 0.5 UNIT (refundable) + pub const ProposalDeposit: Balance = 1000 * MILLI_UNIT; // 1 UNIT (locked until cleanup) + pub const ProposalFee: Balance = 1000 * MILLI_UNIT; // 1 UNIT (non-refundable) + pub const SignerStepFactorParam: Permill = Permill::from_percent(1); + pub const MaxExpiryDuration: BlockNumber = 100_800; // ~2 weeks at 12s blocks (14 days * 24h * 60m * 60s / 12s) +} + +/// Whitelist for calls that can be proposed in multisigs +impl pallet_multisig::Config for Runtime { + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type MaxSigners = MaxSigners; + type MaxTotalProposalsInStorage = MaxTotalProposalsInStorage; + type MaxCallSize = MaxCallSize; + type MultisigFee = MultisigFee; + type MultisigDeposit = MultisigDeposit; + type ProposalDeposit = ProposalDeposit; + type ProposalFee = ProposalFee; + type SignerStepFactor = SignerStepFactorParam; + type MaxExpiryDuration = MaxExpiryDuration; + type PalletId = MultisigPalletId; + type WeightInfo = pallet_multisig::weights::SubstrateWeight; +} + impl TryFrom for pallet_balances::Call { type Error = (); fn try_from(call: RuntimeCall) -> Result { diff --git a/runtime/src/genesis_config_presets.rs b/runtime/src/genesis_config_presets.rs index 3f42f727..ff91a2cd 100644 --- a/runtime/src/genesis_config_presets.rs +++ b/runtime/src/genesis_config_presets.rs @@ -80,6 +80,7 @@ pub fn development_config_genesis() -> Value { let ss58_version = sp_core::crypto::Ss58AddressFormat::custom(189); for account in endowed_accounts.iter() { log::info!("🍆 Endowed account: {:?}", account.to_ss58check_with_version(ss58_version)); + log::info!("🍆 Endowed account raw: {:?}", account); } genesis_template(endowed_accounts, crystal_alice().into_account()) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 28df014f..d1ef9c8f 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -22,7 +22,6 @@ use sp_version::RuntimeVersion; pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_merkle_airdrop; pub use pallet_reversible_transfers as ReversibleTransfersCall; pub use pallet_timestamp::Call as TimestampCall; @@ -35,7 +34,6 @@ pub mod transaction_extensions; use crate::governance::pallet_custom_origins; use qp_poseidon::PoseidonHasher; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -52,7 +50,7 @@ pub mod opaque { // However, some internal checks in dev build expect extrinsics_root to be computed with same // Hash function, so we change the configs/mod.rs Hashing type as well // Opaque block header type. - pub type Header = generic::Header; + pub type Header = qp_header::Header; // Opaque block type. pub type Block = generic::Block; @@ -134,7 +132,7 @@ pub type BlockNumber = u32; pub type Address = MultiAddress; /// Block header type as expected by this runtime. -pub type Header = generic::Header; +pub type Header = qp_header::Header; /// Block type as expected by this runtime. pub type Block = generic::Block; @@ -217,9 +215,6 @@ mod runtime { #[runtime::pallet_index(7)] pub type MiningRewards = pallet_mining_rewards; - #[runtime::pallet_index(8)] - pub type Vesting = pallet_vesting; - #[runtime::pallet_index(9)] pub type Preimage = pallet_preimage; @@ -244,9 +239,6 @@ mod runtime { #[runtime::pallet_index(16)] pub type TechReferenda = pallet_referenda::Pallet; - #[runtime::pallet_index(17)] - pub type MerkleAirdrop = pallet_merkle_airdrop; - #[runtime::pallet_index(18)] pub type TreasuryPallet = pallet_treasury; @@ -261,4 +253,7 @@ mod runtime { #[runtime::pallet_index(22)] pub type AssetsHolder = pallet_assets_holder; + + #[runtime::pallet_index(23)] + pub type Multisig = pallet_multisig; } diff --git a/runtime/tests/common.rs b/runtime/tests/common.rs index d4eed8c8..452351ae 100644 --- a/runtime/tests/common.rs +++ b/runtime/tests/common.rs @@ -38,8 +38,8 @@ impl TestCommons { /// Create a test externality with governance track timing based on feature flags /// - Without `production-governance-tests`: Uses fast 2-block periods for all governance tracks - /// - With `production-governance-tests`: Uses production timing (hours/days) - /// This allows CI to test both fast (for speed) and slow (for correctness) governance + /// - With `production-governance-tests`: Uses production timing (hours/days) This allows CI to + /// test both fast (for speed) and slow (for correctness) governance pub fn new_fast_governance_test_ext() -> sp_io::TestExternalities { #[cfg(feature = "production-governance-tests")] { diff --git a/runtime/tests/governance/mod.rs b/runtime/tests/governance/mod.rs index 13fa1876..1261a867 100644 --- a/runtime/tests/governance/mod.rs +++ b/runtime/tests/governance/mod.rs @@ -2,4 +2,3 @@ pub mod engine; pub mod logic; pub mod tech_collective; pub mod treasury; -pub mod vesting; diff --git a/runtime/tests/governance/vesting.rs b/runtime/tests/governance/vesting.rs deleted file mode 100644 index 02f7700f..00000000 --- a/runtime/tests/governance/vesting.rs +++ /dev/null @@ -1,619 +0,0 @@ -#[cfg(test)] -mod tests { - use crate::common::TestCommons; - use codec::Encode; - use frame_support::{ - assert_ok, - traits::{Bounded, Currency, VestingSchedule}, - }; - use pallet_conviction_voting::{AccountVote, Vote}; - use pallet_vesting::VestingInfo; - use quantus_runtime::{ - Balances, ConvictionVoting, Preimage, Referenda, RuntimeCall, RuntimeOrigin, System, - Utility, Vesting, DAYS, UNIT, - }; - use sp_runtime::{ - traits::{BlakeTwo256, Hash}, - MultiAddress, - }; - - /// Test case: Grant application through referendum with vesting payment schedule - /// - /// Scenario: - /// 1. Grant proposal submitted for referendum voting (treasury track) - /// 2. After positive voting, treasury spend is approved and executed - /// 3. Separate vesting implementation follows (two-stage governance pattern) - #[test] - fn test_grant_application_with_vesting_schedule() { - TestCommons::new_fast_governance_test_ext().execute_with(|| { - // Setup accounts - let proposer = TestCommons::account_id(1); - let beneficiary = TestCommons::account_id(2); - let voter1 = TestCommons::account_id(3); - let voter2 = TestCommons::account_id(4); - - // Give voters some balance for voting - Balances::make_free_balance_be(&voter1, 1000 * UNIT); - Balances::make_free_balance_be(&voter2, 1000 * UNIT); - Balances::make_free_balance_be(&proposer, 10000 * UNIT); // Proposer needs more funds for vesting transfer - - // Step 1: Create a treasury proposal for referendum - let grant_amount = 1000 * UNIT; - let vesting_period = 30; // Fast test: 30 blocks instead of 30 days - let per_block = grant_amount / vesting_period as u128; - - // Create the vesting info for later implementation - let vesting_info = VestingInfo::new(grant_amount, per_block, 1); - - // Treasury call for referendum approval - let treasury_call = RuntimeCall::TreasuryPallet(pallet_treasury::Call::spend { - asset_kind: Box::new(()), - amount: grant_amount, - beneficiary: Box::new(MultiAddress::Id(beneficiary.clone())), - valid_from: None, - }); - - // Note: Two-stage process - referendum approves principle, implementation follows - let _vesting_call = RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(beneficiary.clone()), - schedule: vesting_info, - }); - - // Two-stage governance flow: referendum approves treasury spend principle - // Implementation details (like vesting schedule) handled in separate execution phase - let referendum_call = treasury_call; - - // Step 2: Submit preimage for the referendum call - let encoded_proposal = referendum_call.encode(); - let preimage_hash = BlakeTwo256::hash(&encoded_proposal); - - assert_ok!(Preimage::note_preimage( - RuntimeOrigin::signed(proposer.clone()), - encoded_proposal.clone() - )); - - // Step 3: Submit referendum for treasury spending (using treasury track) - let bounded_call = - Bounded::Lookup { hash: preimage_hash, len: encoded_proposal.len() as u32 }; - assert_ok!(Referenda::submit( - RuntimeOrigin::signed(proposer.clone()), - Box::new( - quantus_runtime::governance::pallet_custom_origins::Origin::SmallSpender.into() - ), - bounded_call, - frame_support::traits::schedule::DispatchTime::After(1) - )); - - // Step 4: Vote on referendum - let referendum_index = 0; - - // Vote YES with conviction - assert_ok!(ConvictionVoting::vote( - RuntimeOrigin::signed(voter1.clone()), - referendum_index, - AccountVote::Standard { - vote: Vote { - aye: true, - conviction: pallet_conviction_voting::Conviction::Locked1x, - }, - balance: 500 * UNIT, - } - )); - - assert_ok!(ConvictionVoting::vote( - RuntimeOrigin::signed(voter2.clone()), - referendum_index, - AccountVote::Standard { - vote: Vote { - aye: true, - conviction: pallet_conviction_voting::Conviction::Locked2x, - }, - balance: 300 * UNIT, - } - )); - - // Step 5: Wait for referendum to pass and execute - // Fast forward blocks for voting period + confirmation period (using fast governance - // timing) - let blocks_to_advance = 2 + 2 + 2 + 2 + 1; // prepare + decision + confirm + enactment + 1 - TestCommons::run_to_block(System::block_number() + blocks_to_advance); - - // The referendum should now be approved and treasury spend executed - - // Step 6: Implementation phase - after referendum approval, implement with vesting - // This demonstrates a realistic two-stage governance pattern: - // 1. Community votes on grant approval (principle) - // 2. Treasury council/governance implements with appropriate safeguards (vesting) - // This separation allows for community input on allocation while maintaining - // implementation flexibility - - println!("Referendum approved treasury spend. Now implementing vesting..."); - - // Implementation of the approved grant with vesting schedule - // This would typically be done by treasury council or automated system - assert_ok!(Vesting::force_vested_transfer( - RuntimeOrigin::root(), - MultiAddress::Id(proposer.clone()), - MultiAddress::Id(beneficiary.clone()), - vesting_info, - )); - - let initial_balance = Balances::free_balance(&beneficiary); - let locked_balance = Vesting::vesting_balance(&beneficiary).unwrap_or(0); - - println!("Beneficiary balance: {:?}", initial_balance); - println!("Locked balance: {:?}", locked_balance); - - assert!(locked_balance > 0, "Vesting should have been created"); - - // Step 7: Test vesting unlock over time - let initial_block = System::block_number(); - let initial_locked_amount = locked_balance; // Save the initial locked amount - - // Check initial state - println!("Initial balance: {:?}", initial_balance); - println!("Initial locked: {:?}", locked_balance); - println!("Initial block: {:?}", initial_block); - - // Fast forward a few blocks and check unlocking - TestCommons::run_to_block(initial_block + 10); - - // Check after some blocks - let mid_balance = Balances::free_balance(&beneficiary); - let mid_locked = Vesting::vesting_balance(&beneficiary).unwrap_or(0); - - println!("Mid balance: {:?}", mid_balance); - println!("Mid locked: {:?}", mid_locked); - - // The test should pass if vesting is working correctly - // mid_locked should be less than the initial locked amount - assert!( - mid_locked < initial_locked_amount, - "Some funds should be unlocked over time: initial_locked={:?}, mid_locked={:?}", - initial_locked_amount, - mid_locked - ); - - // Fast-forward to end of vesting period - TestCommons::run_to_block(initial_block + vesting_period + 1); - - // All funds should be unlocked - let final_balance = Balances::free_balance(&beneficiary); - let final_locked = Vesting::vesting_balance(&beneficiary).unwrap_or(0); - - println!("Final balance: {:?}", final_balance); - println!("Final locked: {:?}", final_locked); - - assert_eq!(final_locked, 0, "All funds should be unlocked"); - // Note: In the vesting pallet, when funds are fully vested, they become available - // but the balance might not increase if the initial transfer was part of the vesting - // The main assertion is that the vesting worked correctly (final_locked == 0) - println!("Vesting test completed successfully - funds are fully unlocked"); - }); - } - - /// Test case: Multi-milestone grant with multiple vesting schedules - /// - /// Scenario: Grant paid out in multiple tranches (milestones) - /// after achieving specific goals - #[test] - fn test_milestone_based_grant_with_multiple_vesting() { - TestCommons::new_fast_governance_test_ext().execute_with(|| { - let grantee = TestCommons::account_id(1); - let grantor = TestCommons::account_id(2); - - Balances::make_free_balance_be(&grantor, 10000 * UNIT); - - // Atomic milestone funding: all operations succeed or fail together - let milestone1_amount = 300 * UNIT; - let milestone2_amount = 400 * UNIT; - let milestone3_amount = 300 * UNIT; - - let milestone1_vesting = VestingInfo::new(milestone1_amount, milestone1_amount / 30, 1); - let milestone2_vesting = - VestingInfo::new(milestone2_amount, milestone2_amount / 60, 31); - - // Create batch call for all milestone operations - let _milestone_batch = RuntimeCall::Utility(pallet_utility::Call::batch_all { - calls: vec![ - // Milestone 1: Initial funding with short vesting - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: milestone1_vesting, - }), - // Milestone 2: Mid-term funding with longer vesting - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: milestone2_vesting, - }), - // Milestone 3: Immediate payment - RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: MultiAddress::Id(grantee.clone()), - value: milestone3_amount, - }), - ], - }); - - // Execute all milestones atomically - let calls = vec![ - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: milestone1_vesting, - }), - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: milestone2_vesting, - }), - RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { - dest: MultiAddress::Id(grantee.clone()), - value: milestone3_amount, - }), - ]; - assert_ok!(Utility::batch_all(RuntimeOrigin::signed(grantor.clone()), calls)); - - // Check that multiple vesting schedules are active - let vesting_schedules = Vesting::vesting(grantee.clone()).unwrap(); - assert_eq!(vesting_schedules.len(), 2, "Should have 2 active vesting schedules"); - - // Fast forward and verify unlocking patterns - TestCommons::run_to_block(40); // Past first vesting period - - let balance_after_first = Balances::free_balance(&grantee); - assert!( - balance_after_first >= milestone1_amount + milestone3_amount, - "First milestone and immediate payment should be available" - ); - - // Fast forward past second vesting period - TestCommons::run_to_block(100); - - let final_balance = Balances::free_balance(&grantee); - let expected_total = milestone1_amount + milestone2_amount + milestone3_amount; - assert!(final_balance >= expected_total, "All grant funds should be available"); - }); - } - - /// Test case: Realistic grant process with Tech Collective milestone evaluation - /// - /// Scenario: - /// 1. Initial referendum approves entire grant plan - /// 2. For each milestone: grantee delivers proof → Tech Collective votes via referenda → - /// payment released - /// 3. Tech Collective determines vesting schedule based on milestone quality/risk assessment - #[test] - fn test_progressive_milestone_referenda() { - TestCommons::new_fast_governance_test_ext().execute_with(|| { - let grantee = TestCommons::account_id(1); - let proposer = TestCommons::account_id(2); - let voter1 = TestCommons::account_id(3); - let voter2 = TestCommons::account_id(4); - - // Tech Collective members - technical experts who evaluate milestones - let tech_member1 = TestCommons::account_id(5); - let tech_member2 = TestCommons::account_id(6); - let tech_member3 = TestCommons::account_id(7); - let treasury_account = TestCommons::account_id(8); - - // Setup balances for governance participation - Balances::make_free_balance_be(&voter1, 2000 * UNIT); - Balances::make_free_balance_be(&voter2, 2000 * UNIT); - Balances::make_free_balance_be(&proposer, 15000 * UNIT); - Balances::make_free_balance_be(&tech_member1, 3000 * UNIT); - Balances::make_free_balance_be(&tech_member2, 3000 * UNIT); - Balances::make_free_balance_be(&tech_member3, 3000 * UNIT); - Balances::make_free_balance_be(&treasury_account, 10000 * UNIT); - - // Add Tech Collective members - assert_ok!(quantus_runtime::TechCollective::add_member( - RuntimeOrigin::root(), - MultiAddress::Id(tech_member1.clone()) - )); - assert_ok!(quantus_runtime::TechCollective::add_member( - RuntimeOrigin::root(), - MultiAddress::Id(tech_member2.clone()) - )); - assert_ok!(quantus_runtime::TechCollective::add_member( - RuntimeOrigin::root(), - MultiAddress::Id(tech_member3.clone()) - )); - - let milestone1_amount = 400 * UNIT; - let milestone2_amount = 500 * UNIT; - let milestone3_amount = 600 * UNIT; - let total_grant = milestone1_amount + milestone2_amount + milestone3_amount; - - // === STEP 1: Initial referendum approves entire grant plan === - println!("=== REFERENDUM: Grant Plan Approval ==="); - - let grant_approval_call = RuntimeCall::TreasuryPallet(pallet_treasury::Call::spend { - asset_kind: Box::new(()), - amount: total_grant, - beneficiary: Box::new(MultiAddress::Id(treasury_account.clone())), - valid_from: None, - }); - - let encoded_proposal = grant_approval_call.encode(); - let preimage_hash = BlakeTwo256::hash(&encoded_proposal); - - assert_ok!(Preimage::note_preimage( - RuntimeOrigin::signed(proposer.clone()), - encoded_proposal.clone() - )); - - let bounded_call = - Bounded::Lookup { hash: preimage_hash, len: encoded_proposal.len() as u32 }; - assert_ok!(Referenda::submit( - RuntimeOrigin::signed(proposer.clone()), - Box::new( - quantus_runtime::governance::pallet_custom_origins::Origin::SmallSpender.into() - ), - bounded_call, - frame_support::traits::schedule::DispatchTime::After(1) - )); - - // Community votes on the grant plan - assert_ok!(ConvictionVoting::vote( - RuntimeOrigin::signed(voter1.clone()), - 0, - AccountVote::Standard { - vote: Vote { - aye: true, - conviction: pallet_conviction_voting::Conviction::Locked1x, - }, - balance: 800 * UNIT, - } - )); - - assert_ok!(ConvictionVoting::vote( - RuntimeOrigin::signed(voter2.clone()), - 0, - AccountVote::Standard { - vote: Vote { - aye: true, - conviction: pallet_conviction_voting::Conviction::Locked2x, - }, - balance: 600 * UNIT, - } - )); - - let blocks_to_advance = 2 + 2 + 2 + 2 + 1; // Fast governance timing: prepare + decision + confirm + enactment + 1 - TestCommons::run_to_block(System::block_number() + blocks_to_advance); - - println!("✅ Grant plan approved by referendum!"); - - // === STEP 2: Tech Collective milestone evaluations via referenda === - - // === MILESTONE 1: Tech Collective Decision === - println!("=== MILESTONE 1: Tech Collective Decision ==="); - - println!("📋 Grantee delivers milestone 1: Basic protocol implementation"); - TestCommons::run_to_block(System::block_number() + 10); - - // Tech Collective evaluates and decides on milestone 1 payment - let milestone1_vesting = VestingInfo::new( - milestone1_amount, - milestone1_amount / 60, // Fast test: 60 blocks instead of 60 days - System::block_number() + 1, - ); - - println!("🔍 Tech Collective evaluates milestone 1..."); - - // Tech Collective implements milestone payment directly (as technical body with - // authority) In practice this could be through their own governance or automated - // after technical review - assert_ok!(Vesting::force_vested_transfer( - RuntimeOrigin::root(), /* Tech Collective has root-level authority for technical - * decisions */ - MultiAddress::Id(treasury_account.clone()), - MultiAddress::Id(grantee.clone()), - milestone1_vesting, - )); - - println!("✅ Tech Collective approved milestone 1 with 60-day vesting"); - - let milestone1_locked = Vesting::vesting_balance(&grantee).unwrap_or(0); - println!("Grantee locked (vesting): {:?}", milestone1_locked); - assert!(milestone1_locked > 0, "Milestone 1 should be vesting"); - - // === MILESTONE 2: Tech Collective Decision === - println!("=== MILESTONE 2: Tech Collective Decision ==="); - - TestCommons::run_to_block(System::block_number() + 20); - println!("📋 Grantee delivers milestone 2: Advanced features + benchmarks"); - - // Reduced vesting due to high quality - let milestone2_vesting = VestingInfo::new( - milestone2_amount, - milestone2_amount / 30, // Fast test: 30 blocks instead of 30 days - System::block_number() + 1, - ); - - println!("🔍 Tech Collective evaluates milestone 2 (high quality work)..."); - - // Tech Collective approves with reduced vesting due to excellent work - assert_ok!(Vesting::force_vested_transfer( - RuntimeOrigin::root(), - MultiAddress::Id(treasury_account.clone()), - MultiAddress::Id(grantee.clone()), - milestone2_vesting, - )); - - println!("✅ Tech Collective approved milestone 2 with reduced 30-day vesting"); - - // === MILESTONE 3: Final Tech Collective Decision === - println!("=== MILESTONE 3: Final Tech Collective Decision ==="); - - TestCommons::run_to_block(System::block_number() + 20); - println!( - "📋 Grantee delivers final milestone: Production deployment + maintenance plan" - ); - - println!("🔍 Tech Collective evaluates final milestone (project completion)..."); - - // Immediate payment for completed project - no vesting needed - assert_ok!(Balances::transfer_allow_death( - RuntimeOrigin::signed(treasury_account.clone()), - MultiAddress::Id(grantee.clone()), - milestone3_amount, - )); - - println!("✅ Tech Collective approved final milestone with immediate payment"); - - // === Verify Tech Collective governance worked === - let final_balance = Balances::free_balance(&grantee); - let remaining_locked = Vesting::vesting_balance(&grantee).unwrap_or(0); - - println!("Final grantee balance: {:?}", final_balance); - println!("Remaining locked: {:?}", remaining_locked); - - let vesting_schedules = Vesting::vesting(grantee.clone()).unwrap_or_default(); - assert!( - !vesting_schedules.is_empty(), - "Should have active vesting schedules from Tech Collective decisions" - ); - - assert!( - final_balance >= milestone3_amount, - "Tech Collective milestone process should have provided controlled funding" - ); - - println!("🎉 Tech Collective governance process completed successfully!"); - println!(" - One community referendum approved the overall grant plan"); - println!(" - Tech Collective evaluated each milestone with technical expertise"); - println!(" - Vesting schedules determined by technical quality assessment:"); - println!(" * Milestone 1: 60-day vesting (conservative, early stage)"); - println!(" * Milestone 2: 30-day vesting (high confidence, quality work)"); - println!(" * Milestone 3: Immediate payment (project completed successfully)"); - }); - } - - /// Test case: Treasury proposal with automatic vesting integration - /// - /// Scenario: Treasury spend and vesting creation executed atomically - /// through batch calls for integrated fund management - #[test] - fn test_treasury_auto_vesting_integration() { - TestCommons::new_fast_governance_test_ext().execute_with(|| { - let beneficiary = TestCommons::account_id(1); - let amount = 1000 * UNIT; - - // Create atomic treasury spend + vesting creation through batch calls - let vesting_info = VestingInfo::new(amount, amount / (30 * DAYS) as u128, 1); - - let _treasury_vesting_batch = RuntimeCall::Utility(pallet_utility::Call::batch_all { - calls: vec![ - // Treasury spend - RuntimeCall::TreasuryPallet(pallet_treasury::Call::spend { - asset_kind: Box::new(()), - amount, - beneficiary: Box::new(MultiAddress::Id(beneficiary.clone())), - valid_from: None, - }), - // Vesting creation as part of same atomic transaction - RuntimeCall::Vesting(pallet_vesting::Call::force_vested_transfer { - source: MultiAddress::Id(beneficiary.clone()), /* Simplified - in - * practice treasury - * account */ - target: MultiAddress::Id(beneficiary.clone()), - schedule: vesting_info, - }), - ], - }); - - // Execute atomic treasury spend + vesting batch - let calls = vec![ - RuntimeCall::TreasuryPallet(pallet_treasury::Call::spend { - asset_kind: Box::new(()), - amount, - beneficiary: Box::new(MultiAddress::Id(beneficiary.clone())), - valid_from: None, - }), - RuntimeCall::Vesting(pallet_vesting::Call::force_vested_transfer { - source: MultiAddress::Id(beneficiary.clone()), - target: MultiAddress::Id(beneficiary.clone()), - schedule: vesting_info, - }), - ]; - assert_ok!(Utility::batch_all(RuntimeOrigin::root(), calls)); - - // Verify the integration worked - let locked_amount = Vesting::vesting_balance(&beneficiary).unwrap_or(0); - assert!(locked_amount > 0, "Vesting should be active"); - }); - } - - /// Test case: Emergency vesting operations with batch calls - /// - /// Scenario: Emergency handling of vesting schedules through - /// atomic batch operations for intervention scenarios - #[test] - fn test_emergency_vesting_cancellation() { - TestCommons::new_fast_governance_test_ext().execute_with(|| { - let grantee = TestCommons::account_id(1); - let grantor = TestCommons::account_id(2); - - Balances::make_free_balance_be(&grantor, 2000 * UNIT); - - // Create vesting schedule with atomic batch call setup - let total_amount = 1000 * UNIT; - let vesting_info = VestingInfo::new(total_amount, total_amount / 100, 1); - - // Example of comprehensive grant setup through batch operations - let _grant_batch = RuntimeCall::Utility(pallet_utility::Call::batch_all { - calls: vec![ - // Initial grant setup - RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: vesting_info, - }), - // Could include additional setup calls (metadata, tracking, etc.) - ], - }); - - let calls = vec![RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { - target: MultiAddress::Id(grantee.clone()), - schedule: vesting_info, - })]; - assert_ok!(Utility::batch_all(RuntimeOrigin::signed(grantor.clone()), calls)); - - // Let some time pass and some funds unlock - TestCommons::run_to_block(50); - - let balance_before_cancellation = Balances::free_balance(&grantee); - let locked_before = Vesting::vesting_balance(&grantee).unwrap_or(0); - - assert!(locked_before > 0, "Should still have locked funds"); - - // Emergency intervention through atomic batch operations - let _emergency_batch = RuntimeCall::Utility(pallet_utility::Call::batch_all { - calls: vec![ - // Emergency action: schedule management operations - RuntimeCall::Vesting(pallet_vesting::Call::merge_schedules { - schedule1_index: 0, - schedule2_index: 0, - }), - // Could include additional emergency measures like fund recovery or - // notifications - ], - }); - - // Execute emergency intervention if vesting exists - if !Vesting::vesting(grantee.clone()).unwrap().is_empty() { - let calls = vec![RuntimeCall::Vesting(pallet_vesting::Call::merge_schedules { - schedule1_index: 0, - schedule2_index: 0, - })]; - assert_ok!(Utility::batch_all(RuntimeOrigin::signed(grantee.clone()), calls)); - } - - let balance_after = Balances::free_balance(&grantee); - - // Verify that emergency operations maintained system integrity - // (In practice, this would involve more sophisticated intervention mechanisms) - assert!( - balance_after >= balance_before_cancellation, - "Emergency handling should maintain or improve user's position" - ); - }); - } -} diff --git a/runtime/tests/transactions/integration.rs b/runtime/tests/transactions/integration.rs index 4ae80b98..55ea57bc 100644 --- a/runtime/tests/transactions/integration.rs +++ b/runtime/tests/transactions/integration.rs @@ -106,11 +106,11 @@ mod tests { // Extract components into individual variables for debugging let decoded_address: Address = address; let decoded_signature: DilithiumSignatureScheme = signature; - let decoded_extra: SignedExtra = extra; + let _: SignedExtra = extra; // Debug output for each component println!("Decoded Address: {:?}", decoded_address); - println!("Decoded Extra: {:?}", decoded_extra); + println!("Decoded Extra: ()"); let DilithiumSignatureScheme::Dilithium(sig_public) = decoded_signature.clone(); let sig = sig_public.signature(); diff --git a/runtime/tests/transactions/reversible_integration.rs b/runtime/tests/transactions/reversible_integration.rs index b2a49c9a..28f2d0d0 100644 --- a/runtime/tests/transactions/reversible_integration.rs +++ b/runtime/tests/transactions/reversible_integration.rs @@ -1,9 +1,7 @@ use crate::common::TestCommons; use frame_support::{assert_err, assert_ok}; use qp_scheduler::BlockNumberOrTimestamp; -use quantus_runtime::{ - Balances, Recovery, ReversibleTransfers, RuntimeCall, RuntimeOrigin, EXISTENTIAL_DEPOSIT, UNIT, -}; +use quantus_runtime::{Balances, ReversibleTransfers, RuntimeOrigin, EXISTENTIAL_DEPOSIT}; use sp_runtime::MultiAddress; fn acc(n: u8) -> sp_core::crypto::AccountId32 { @@ -16,9 +14,6 @@ fn high_security_account() -> sp_core::crypto::AccountId32 { fn interceptor() -> sp_core::crypto::AccountId32 { TestCommons::account_id(2) } -fn recoverer() -> sp_core::crypto::AccountId32 { - TestCommons::account_id(3) -} #[test] fn high_security_end_to_end_flow() { @@ -97,64 +92,38 @@ fn high_security_end_to_end_flow() { pallet_reversible_transfers::Error::::AccountAlreadyHighSecurity ); - // 6) Interceptor recovers all funds from high sec account via Recovery pallet - - // 6.1 Interceptor initiates recovery - assert_ok!(Recovery::initiate_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - )); - - // 6.2 Interceptor vouches on recovery - assert_ok!(Recovery::vouch_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - MultiAddress::Id(interceptor()), - )); - - // 6.3 Interceptor claims recovery - assert_ok!(Recovery::claim_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - )); - + // 6) Interceptor recovers all funds from high sec account via recover_funds let interceptor_before_recovery = Balances::free_balance(interceptor()); - // 6.4 Interceptor recovers all funds - let call = RuntimeCall::Balances(pallet_balances::Call::transfer_all { - dest: MultiAddress::Id(interceptor()), - keep_alive: false, - }); - assert_ok!(Recovery::as_recovered( + assert_ok!(ReversibleTransfers::recover_funds( RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - Box::new(call), + high_security_account(), )); let hs_after_recovery = Balances::free_balance(high_security_account()); let interceptor_after_recovery = Balances::free_balance(interceptor()); - // HS should be drained to existential deposit; account 2 increased accordingly - assert_eq!(hs_after_recovery, EXISTENTIAL_DEPOSIT); - - // Fees - Interceptor spends 11 units in total for all the calls they are making. + // HS account should be drained completely (keep_alive: false) + assert_eq!(hs_after_recovery, 0); - // Interceptor has hs account's balance now - let estimated_fees = UNIT/100 * 101; // The final recover call costs 1.01 units. + // Interceptor should have received all the HS account's remaining funds assert!( - interceptor_after_recovery >= (hs_after_cancel + interceptor_before_recovery - estimated_fees), - "recoverer {interceptor_after_recovery} should be at least {hs_after_cancel} + {interceptor_start} - {estimated_fees}" + interceptor_after_recovery > interceptor_before_recovery, + "interceptor should have received funds from HS account" + ); + assert_eq!( + interceptor_after_recovery, + interceptor_before_recovery + hs_after_cancel, + "interceptor should have received the HS account's remaining balance" ); }); } #[test] -fn test_recovery_allows_multiple_recovery_configs() { - // Test that Account 3 can recover both Account 1 (HS) and Account 2 (interceptor) - // This proves our inheritance + high security use case will work +fn test_recover_funds_only_works_for_guardian() { + // Test that only the guardian (interceptor) can call recover_funds let mut ext = TestCommons::new_test_ext(); ext.execute_with(|| { - // Set up Account 1 as high security with Account 2 as interceptor let delay = BlockNumberOrTimestamp::BlockNumber(5); assert_ok!(ReversibleTransfers::set_high_security( RuntimeOrigin::signed(high_security_account()), @@ -162,88 +131,33 @@ fn test_recovery_allows_multiple_recovery_configs() { interceptor(), )); - // Account 2 initiates recovery of Account 1 - assert_ok!(Recovery::initiate_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - )); - assert_ok!(Recovery::vouch_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - MultiAddress::Id(interceptor()), - )); - assert_ok!(Recovery::claim_recovery( - RuntimeOrigin::signed(interceptor()), - MultiAddress::Id(high_security_account()), - )); - - // Set up recovery for Account 2 with Account 3 as friend - assert_ok!(Recovery::create_recovery( - RuntimeOrigin::signed(interceptor()), - vec![recoverer()], - 1, - 0, - )); - - // Now Account 3 can recover Account 2 - assert_ok!(Recovery::initiate_recovery( - RuntimeOrigin::signed(recoverer()), - MultiAddress::Id(interceptor()), - )); - assert_ok!(Recovery::vouch_recovery( - RuntimeOrigin::signed(recoverer()), - MultiAddress::Id(interceptor()), - MultiAddress::Id(recoverer()), - )); - - // This should succeed - Account 3 can recover Account 2 - assert_ok!(Recovery::claim_recovery( - RuntimeOrigin::signed(recoverer()), - MultiAddress::Id(interceptor()), - )); - - // Verify both proxies exist - // Account 2 proxies Account 1 - assert_eq!(Recovery::proxy(interceptor()), Some(high_security_account())); - // Account 3 proxies Account 2 - assert_eq!(Recovery::proxy(recoverer()), Some(interceptor())); - - // Give Account 1 some funds to test transfer - let transfer_amount = 100 * UNIT; - assert_ok!(Balances::force_set_balance( - RuntimeOrigin::root(), - MultiAddress::Id(high_security_account()), - transfer_amount, - )); + // Non-guardian (account 3) tries to recover funds - should fail + assert_err!( + ReversibleTransfers::recover_funds( + RuntimeOrigin::signed(acc(3)), + high_security_account(), + ), + pallet_reversible_transfers::Error::::InvalidReverser + ); - // Capture balances before nested transfer + // Guardian (account 2) can recover funds let hs_balance_before = Balances::free_balance(high_security_account()); - let recoverer_balance_before = Balances::free_balance(recoverer()); - - // Now test nested as_recovered: Account 3 -> Account 2 -> Account 1 - let inner_call = RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { - dest: MultiAddress::Id(recoverer()), - value: transfer_amount / 2, // Transfer half the amount - }); - let outer_call = RuntimeCall::Recovery(pallet_recovery::Call::as_recovered { - account: MultiAddress::Id(high_security_account()), - call: Box::new(inner_call), - }); - - // Account 3 calls as_recovered on Account 2, which contains as_recovered on Account 1 - // This should succeed and transfer funds: Account 1 -> Account 3 - assert_ok!(Recovery::as_recovered( - RuntimeOrigin::signed(recoverer()), - MultiAddress::Id(interceptor()), - Box::new(outer_call), + let interceptor_balance_before = Balances::free_balance(interceptor()); + + assert_ok!(ReversibleTransfers::recover_funds( + RuntimeOrigin::signed(interceptor()), + high_security_account(), )); - // Verify the transfer happened + // Verify funds were transferred let hs_balance_after = Balances::free_balance(high_security_account()); - let recoverer_balance_after = Balances::free_balance(recoverer()); - - assert_eq!(hs_balance_before, transfer_amount); - assert!(hs_balance_after < hs_balance_before); // Account 1 lost funds - assert!(recoverer_balance_after > recoverer_balance_before); // Account 3 gained funds + let interceptor_balance_after = Balances::free_balance(interceptor()); + + assert_eq!(hs_balance_after, 0); + assert_eq!( + interceptor_balance_after, + interceptor_balance_before + hs_balance_before, + "guardian should have received all HS account funds" + ); }); }