From 6fd038298c984f3a35c8059ce0ec1d7ec8053afb Mon Sep 17 00:00:00 2001 From: Ablank Date: Sat, 14 Mar 2026 22:42:11 +0800 Subject: [PATCH 1/2] Add /wallet/history endpoint (Bounty #908) - Implements GET /wallet/history?miner_id=X&limit=50&offset=0 - Returns transaction history including rewards and transfers - Queries epoch_rewards table for mining rewards - Queries ledger table for transfers - Supports pagination with limit and offset parameters --- docs/DEVELOPER_TRACTION_Q1_2026.md | 554 ++--- docs/RIP-305-cross-chain-airdrop.md | 412 ++-- ergo-anchor/rustchain_ergo_anchor.py | 1158 ++++----- miners/clawrtc/pow_miners.py | 1238 +++++----- miners/linux/rustchain_living_museum.py | 1000 ++++---- miners/linux/warthog_sidecar.py | 714 +++--- .../macos/intel/rustchain_mac_miner_v2.4.py | 1008 ++++---- miners/macos/rustchain_mac_miner_v2.4.py | 1076 ++++---- miners/macos/rustchain_mac_miner_v2.5.py | 1360 +++++----- miners/power8/fingerprint_checks_power8.py | 998 ++++---- miners/power8/rustchain_power8_miner.py | 818 +++--- node/beacon_x402.py | 728 +++--- node/rip_200_round_robin_1cpu1vote_v2.py | 852 +++---- node/rom_clustering_server.py | 816 +++--- node/rom_fingerprint_db.py | 880 +++---- node/rustchain_bft_consensus.py | 1884 +++++++------- node/rustchain_block_producer.py | 1494 +++++------ node/rustchain_ergo_anchor.py | 1158 ++++----- node/rustchain_hardware_database.py | 1880 +++++++------- node/rustchain_migration.py | 1294 +++++----- node/rustchain_p2p_gossip.py | 1638 ++++++------ node/rustchain_tx_handler.py | 1550 ++++++------ node/rustchain_v2_integrated_v2.2.1_rip200.py | 97 + node/rustchain_x402.py | 228 +- node/warthog_verification.py | 612 ++--- node/wsgi.py | 100 +- node/x402_config.py | 182 +- pushtogit.sh | 38 +- rips/Cargo.toml | 126 +- rips/docs/RIP-0001-proof-of-antiquity.md | 202 +- rips/docs/RIP-0007-entropy-fingerprinting.md | 608 ++--- rips/docs/RIP-0201-fleet-immune-system.md | 198 +- rips/docs/RIP-0304-retro-console-mining.md | 804 +++--- rips/docs/RIP-SERIES-FOUNDATIONAL.md | 382 +-- rips/python/rustchain/__init__.py | 168 +- rips/python/rustchain/core_types.py | 646 ++--- rips/python/rustchain/deep_entropy.py | 1104 ++++----- rips/python/rustchain/fleet_immune_system.py | 2196 ++++++++--------- rips/python/rustchain/governance.py | 1142 ++++----- rips/python/rustchain/node.py | 926 +++---- rips/python/rustchain/proof_of_antiquity.py | 890 +++---- rips/python/rustchain/rip201_server_patch.py | 444 ++-- .../RUSTCHAIN_PROOF_OF_ANTIQUITY.md | 1550 ++++++------ rips/rustchain-core/api/rpc.py | 928 +++---- rips/rustchain-core/config/chain_params.py | 296 +-- rips/rustchain-core/consensus/poa.py | 942 +++---- rips/rustchain-core/governance/proposals.py | 1168 ++++----- rips/rustchain-core/install_testnet.sh | 430 ++-- rips/rustchain-core/ledger/utxo_ledger.py | 1060 ++++---- rips/rustchain-core/main.py | 848 +++---- rips/rustchain-core/networking/p2p.py | 1092 ++++---- .../src/anti_spoof/mutating_challenge.py | 1156 ++++----- .../src/anti_spoof/network_challenge.py | 1270 +++++----- .../src/mutator_oracle/multi_arch_oracles.py | 954 +++---- .../src/mutator_oracle/ppc_mutator_node.py | 942 +++---- rips/rustchain-core/validator/entropy.py | 2010 +++++++-------- rips/rustchain-core/validator/score.py | 1164 ++++----- .../validator/setup_validator.py | 1226 ++++----- rustchain-poa/tools/amiga/README.md | 26 +- scripts/moltbook_solver.py | 1436 +++++------ setup_github_ssh.sh.txt | 82 +- site/beacon/advertise.js | 452 ++-- site/beacon/vehicles.js | 554 ++--- tools/validate_genesis.py | 154 +- validator/_init_.py | 26 +- vintage_cpu_integration_example.py | 838 +++---- wallet/coinbase_wallet.py | 460 ++-- web/wallets.html | 756 +++--- 68 files changed, 28760 insertions(+), 28663 deletions(-) diff --git a/docs/DEVELOPER_TRACTION_Q1_2026.md b/docs/DEVELOPER_TRACTION_Q1_2026.md index 461e55f0..f0c07e39 100644 --- a/docs/DEVELOPER_TRACTION_Q1_2026.md +++ b/docs/DEVELOPER_TRACTION_Q1_2026.md @@ -1,277 +1,277 @@ -# Elyan Labs — Developer Traction Report -### Q1 2026 (December 2025 - March 2, 2026) - -**Prepared**: March 2, 2026 -**Author**: Scott Boudreaux, Founder -**Data**: GitHub API (live pull) + GitClear, LinearB, Electric Capital industry benchmarks - ---- - -## The Thesis - -Elyan Labs is a solo-founded open source ecosystem producing developer output that rivals VC-backed teams of 13+ engineers — on zero external capital. The data below is pulled directly from GitHub's API and compared against published industry benchmarks. - -This is not a pitch. It's a measurement. - ---- - -## 90-Day Snapshot - -| | Elyan Labs | Avg Solo Dev | Sei Protocol ($85M VC) | -|--|-----------|-------------|------------------------| -| **Capital raised** | **$0** | $0 | $85,000,000 | -| **Engineering headcount** | **1** | 1 | ~13 active | -| **Commits** | **1,882** | 105-168 | 297 | -| **Pull requests opened** | **41** | 9-15 | 417 | -| **Contributions to external projects** | **32 PRs** | 0-2 | 0 | -| **Open source repos shipped** | **97** | 1-3 | 0 new | -| **GitHub stars (ecosystem)** | **1,334** | 5-30 | 2,837 (lifetime) | -| **Forks (developer adoption)** | **359** | 2-10 | 870 (lifetime) | -| **Unique developer interactions** | **150+** | 0-2 | 78 (lifetime) | - -*150+ unique interactions includes PR authors (13), issue authors (28), bounty claimants, stargazers, fork creators, and clone traffic. 41 contributed code or issues directly; the remainder engaged through stars, forks, bounty discussions, and repository clones (exact clone/view counts not exposed by GitHub API).* - -**Sei Protocol comparison**: $85M raised (Jump Crypto, Multicoin, Coinbase Ventures), 78 total contributors. Sei's lifetime star count took years; Elyan Labs accumulated 47% of that figure in 90 days. - ---- - -## Capital Efficiency - -The core metric investors should examine: - -| | Elyan Labs | Sei Protocol | Aztec ($119M) | Radix ($21M) | -|--|-----------|-------------|---------------|-------------| -| **Commits/developer/month** | **627** | 7.6 | ~11 | 6.6 | -| **Cost per commit** | **$0** | ~$95,600 | ~$9,000 | ~$7,100 | -| **Stars per $M raised** | **infinite** | 33 | 3.6 | 29 | - -``` -Per-Developer Monthly Output (commits/dev/month) - - Elyan Labs (1 dev) ██████████████████████████████████████████ 627 - Indie median ████ 56 - Mina (7 devs, $29M) ███ 42 - FAANG median █▍ 8-21 - Aztec (133 ppl, $119M) █ 11 - Sei (13 devs, $85M) ▌ 7.6 - Radix (5 devs, $21M) ▌ 6.6 - - Scale: █ = 15 commits/dev/month -``` - -At 627 commits/dev/month, Elyan Labs operates at **82x** the per-developer output of a $85M-funded team. This isn't hustle theater — it reflects zero coordination overhead, zero PR review bottleneck, and direct technical execution. - -**Industry context**: GitClear's study of 878,592 developer-years places the median full-time developer at 56 commits/month. Elyan Labs' annualized pace of ~7,500 commits/year sits above the **99.9th percentile**. - ---- - -## Monthly Growth Trajectory - -### Development Velocity -| Month | Commits | PRs Opened | Repos Created | Issues Filed | -|-------|---------|-----------|---------------|-------------| -| Dec 2025 | 731 | 3 | 28 | 2 | -| Jan 2026 | 539 | 1 | 15 | 0 | -| Feb 2026 | 960 | 30 | 51 | 363 | -| Mar 1-2* | 93 | 7 | 3 | 79 | -| **Total** | **1,882** | **41** | **97** | **444** | - -*March represents 2 days only, tracking at February pace. - -### Community Engagement (Inbound) -| Month | PRs from Others | Issues from Others | Unique Contributors | -|-------|----------------|-------------------|-------------------| -| Dec 2025 | 0 | 0 | 0 | -| Jan 2026 | 0 | 1 | 1 | -| Feb 2026 | 652 | 82 | 41 | -| Mar 1-2* | 215 | 12 | sustained | - -**The inflection**: Zero inbound contributions through January. In February, a bounty program and ecosystem visibility campaign produced **867 inbound PRs** and **150+ unique developer interactions** in 30 days. 41 developers contributed code or filed issues directly; the remainder engaged via stars, forks, bounty claims, and clones. This growth is sustaining into March at the same pace. - ---- - -## Ecosystem Architecture - -Elyan Labs is not a single-repo project. It's an interconnected ecosystem of 99 public repositories spanning five categories: - -### Core Infrastructure -| Project | Stars | Forks | Description | -|---------|-------|-------|-------------| -| **RustChain** | 82 | 93 | Proof-of-Antiquity blockchain — rewards real vintage hardware | -| **BoTTube** | 67 | 48 | AI-native video platform (670 videos, 99 agents, 45.5K views) | -| **Beacon Skill** | 48 | 31 | Agent orchestration framework (PyPI + npm) | -| **RustChain Bounties** | 34 | 64 | Open bounty board — drives community contributions | -| **Grazer Skill** | 33 | 13 | Multi-platform agent discovery tool | - -### Research & Publications -| Project | Stars | Description | -|---------|-------|-------------| -| **RAM Coffers** | 29 | Neuromorphic NUMA-aware weight banking (predates DeepSeek Engram by 27 days) | -| **Legend of Elya N64** | 12 | Neural network running on Nintendo 64 hardware (MIPS R4300i) | -| **Grail-V** | -- | CVPR 2026 Workshop submission (non-bijunctive attention, 8.8x speedup on POWER8) | - -### Hardware Ports (Cross-Architecture) -| Project | Stars | Description | -|---------|-------|-------------| -| **exo-cuda** | 23 | NVIDIA CUDA support for distributed inference | -| **claude-code-power8** | 21 | Claude Code on IBM POWER8 | -| **llama-cpp-power8** | 18 | LLM inference on PowerPC with vec_perm optimization | -| **nvidia-power8-patches** | 20 | GPU driver patches for ppc64le | - -### Published Packages (PyPI/npm) -| Package | Version | Installs | -|---------|---------|---------| -| `beacon-skill` | 2.15.1 | PyPI + npm | -| `clawrtc` | 1.5.0 | PyPI | -| `bottube` | 1.6.0 | PyPI | -| `grazer-skill` | 1.6.0 | PyPI | - -### Live Tokens -| Token | Chain | Status | -|-------|-------|--------| -| **RTC** | RustChain native | Live, 20 miners, 88 epochs | -| **wRTC** | Solana | Mint revoked, LP locked, Raydium pool | -| **wRTC** | Base L2 | Mint revoked, LP locked, Aerodrome pool | - ---- - -## External Visibility & Contributions - -### Upstream Contributions (32 PRs to external projects) - -Elyan Labs actively contributes to major open source projects — not just consuming, but improving the ecosystem: - -| Project | PRs | Status | Significance | -|---------|-----|--------|-------------| -| **llama.cpp** (ggml-org) | 5 | Under review | Core LLM inference engine | -| **vLLM** (vllm-project) | 2 | 1 open | Production LLM serving | -| **BitNet** (Microsoft) | 2 | 1 open | 1-bit LLM research | -| **OpenFang** (RightNow-AI) | 2 | 1 open, 1 merged | Agent framework | -| **dn-institute** | 1 | Open ($100 bounty) | Prompt engineering | -| **Awesome lists** (24 repos) | 24 | 3 merged, 12 open | Ecosystem visibility | - -**Merged on notable repos**: Awesome-LLM-Inference, awesome-n64-development, awesome-agentic-patterns - -### Academic Publications -| Paper | Venue | Status | -|-------|-------|--------| -| Grail-V: Non-Bijunctive Attention | CVPR 2026 Workshop | Submitted (Submission #7) | -| Silicon Stratigraphy | JCAA | Rewrite requested | -| 5 Zenodo DOIs | Zenodo | Published | -| 7 Dev.to articles | Dev.to | Published | - ---- - -## Benchmark Context - -### Where Elyan Labs sits in the developer distribution - -**GitClear** (878,592 developer-years analyzed): - -| Percentile | Annual Commits | Elyan Labs (annualized) | -|-----------|---------------|------------------------| -| 50th (median) | 673 | -- | -| 90th | ~2,000 | -- | -| 99th | ~4,000 | -- | -| **99.9th+** | **>5,000** | **~7,500** | - -**Electric Capital** classifies "full-time crypto developer" as 10+ code-committed days/month. Elyan Labs codes nearly every day — 3x the threshold. - -**LinearB** (8.1M PRs, 4,800 teams, 42 countries): - -| Metric | Elite Threshold | Elyan Labs | -|--------|----------------|------------| -| Cycle time | <25 hours | Near-instant | -| Focus time/day | 6+ hours | All day | -| Rework rate | <2% | Low | - ---- - -## Honest Assessment: What's Not Working Yet - -Investors should understand the gaps as clearly as the strengths. - -| Gap | Current | Target | Path | -|-----|---------|--------|------| -| **Followers** | 30 | 500+ | Stars are spread across 75+ repos. No single "viral" repo yet. Need one breakout (500+ stars on Rustchain). | -| **External PR merge rate** | 9.4% (3/32) | 30%+ | Many awesome-list PRs awaiting review. llama.cpp PRs closed as duplicates. Need more targeted, higher-quality upstream contributions. | -| **Contributor quality** | Mixed | Verified | Some inbound PRs appear bot-generated (bounty farming). Of 150+ interactions, genuine engaged developers are a subset. Improving triage and verification. | -| **Revenue** | $0 | TBD | No monetization yet. Token (RTC) has internal reference rate ($0.10) but no public exchange listing. | -| **Documentation** | Thin | Production-grade | 97 repos created in 90 days. Many have minimal READMEs. Quality documentation would improve star-to-follow conversion. | - ---- - -## Hardware Lab (Physical Infrastructure) - -Unlike most software startups, Elyan Labs operates a physical compute lab built through disciplined hardware acquisition: - -| Asset | Specs | Acquisition | -|-------|-------|-------------| -| **18+ GPUs** | 228GB+ VRAM total | eBay datacenter pulls + pawn shops | -| **IBM POWER8 S824** | 128 threads, 512GB RAM | Enterprise decomm | -| **2x FPGA** (Alveo U30) | Video transcode + inference | Datacenter pull | -| **Hailo-8 TPU** | Edge AI accelerator | Incoming for POWER8 | -| **PowerPC fleet** | 3x G4, 2x G5 | Vintage hardware (RustChain miners) | -| **40GbE interconnect** | POWER8 <-> C4130 GPU server | 0.15ms latency | - -**Total investment**: ~$12,000 -**Estimated retail value**: $40,000-60,000+ -**Acquisition strategy**: 3-5x ROI through pawn shop arbitrage and eBay datacenter decomm sales - -This lab enables R&D that pure-cloud startups cannot economically replicate — particularly the POWER8 vec_perm work that underpins the Grail-V paper. - ---- - -## 6-Month Outlook - -| Metric | Now (90 days) | 6-Month Target | Basis | -|--------|--------------|----------------|-------| -| Commits | 1,882 | 4,000+ | Current velocity sustained | -| Stars | 1,334 | 3,000+ | Viral repo + continued ecosystem growth | -| Forks | 359 | 800+ | Bounty program expanding | -| Followers | 30 | 200+ | Requires star concentration fix | -| Unique interactions | 150+ | 500+ | Bounty expansion + organic discovery | -| Upstream merges | 3 | 15+ | Higher-quality targeted PRs | -| Published packages | 4 | 6+ | Two additional tools planned | - -### Key Inflection Points -- **100 followers**: Social proof threshold for organic discovery -- **500 stars on Rustchain**: GitHub trending eligibility -- **10 upstream merges**: Established open source contributor reputation -- **First exchange listing**: RTC/wRTC price discovery - ---- - -## Summary - -In 90 days with zero external funding, Elyan Labs has: - -- Shipped **97 public repositories** spanning blockchain, AI inference, agent orchestration, and hardware ports -- Generated **1,882 commits** (99.9th percentile of all developers globally) -- Attracted **150+ unique developer interactions** (from zero) -- Earned **1,334 GitHub stars** and **359 forks** -- Contributed **32 PRs to external projects** including llama.cpp, vLLM, and Microsoft BitNet -- Published **1 CVPR workshop paper** and **5 Zenodo DOIs** -- Deployed live tokens on **3 chains** (native RTC, Solana wRTC, Base wRTC) -- Built all of this on **$12,000 of pawn-shop hardware** - -The question isn't whether this developer can build. The question is what happens when this velocity gets fuel. - ---- - -## Data Sources - -| Source | Coverage | Link | -|--------|----------|------| -| GitHub API | Live pull, March 2, 2026 | github.com/Scottcjn | -| GitClear | 878K developer-years | [gitclear.com/research](https://www.gitclear.com/research_studies/git_commit_count_percentiles_annual_days_active_from_largest_data_set) | -| LinearB | 8.1M PRs, 4,800 teams | [linearb.io/benchmarks](https://linearb.io/resources/software-engineering-benchmarks-report) | -| GitHub Octoverse | 180M+ developers, 2025 | [octoverse.github.com](https://octoverse.github.com/) | -| Electric Capital | Crypto developer ecosystem | [developerreport.com](https://www.developerreport.com) | -| Sei Protocol | $85M funded, 78 contributors | [github.com/sei-protocol](https://github.com/sei-protocol/sei-chain) | -| Aztec Network | $119M funded, 133 contributors | [github.com/AztecProtocol](https://github.com/AztecProtocol/aztec-packages) | - ---- - -*Elyan Labs LLC — Louisiana, US* -*scott@elyanlabs.ai | @RustchainPOA | github.com/Scottcjn* +# Elyan Labs — Developer Traction Report +### Q1 2026 (December 2025 - March 2, 2026) + +**Prepared**: March 2, 2026 +**Author**: Scott Boudreaux, Founder +**Data**: GitHub API (live pull) + GitClear, LinearB, Electric Capital industry benchmarks + +--- + +## The Thesis + +Elyan Labs is a solo-founded open source ecosystem producing developer output that rivals VC-backed teams of 13+ engineers — on zero external capital. The data below is pulled directly from GitHub's API and compared against published industry benchmarks. + +This is not a pitch. It's a measurement. + +--- + +## 90-Day Snapshot + +| | Elyan Labs | Avg Solo Dev | Sei Protocol ($85M VC) | +|--|-----------|-------------|------------------------| +| **Capital raised** | **$0** | $0 | $85,000,000 | +| **Engineering headcount** | **1** | 1 | ~13 active | +| **Commits** | **1,882** | 105-168 | 297 | +| **Pull requests opened** | **41** | 9-15 | 417 | +| **Contributions to external projects** | **32 PRs** | 0-2 | 0 | +| **Open source repos shipped** | **97** | 1-3 | 0 new | +| **GitHub stars (ecosystem)** | **1,334** | 5-30 | 2,837 (lifetime) | +| **Forks (developer adoption)** | **359** | 2-10 | 870 (lifetime) | +| **Unique developer interactions** | **150+** | 0-2 | 78 (lifetime) | + +*150+ unique interactions includes PR authors (13), issue authors (28), bounty claimants, stargazers, fork creators, and clone traffic. 41 contributed code or issues directly; the remainder engaged through stars, forks, bounty discussions, and repository clones (exact clone/view counts not exposed by GitHub API).* + +**Sei Protocol comparison**: $85M raised (Jump Crypto, Multicoin, Coinbase Ventures), 78 total contributors. Sei's lifetime star count took years; Elyan Labs accumulated 47% of that figure in 90 days. + +--- + +## Capital Efficiency + +The core metric investors should examine: + +| | Elyan Labs | Sei Protocol | Aztec ($119M) | Radix ($21M) | +|--|-----------|-------------|---------------|-------------| +| **Commits/developer/month** | **627** | 7.6 | ~11 | 6.6 | +| **Cost per commit** | **$0** | ~$95,600 | ~$9,000 | ~$7,100 | +| **Stars per $M raised** | **infinite** | 33 | 3.6 | 29 | + +``` +Per-Developer Monthly Output (commits/dev/month) + + Elyan Labs (1 dev) ██████████████████████████████████████████ 627 + Indie median ████ 56 + Mina (7 devs, $29M) ███ 42 + FAANG median █▍ 8-21 + Aztec (133 ppl, $119M) █ 11 + Sei (13 devs, $85M) ▌ 7.6 + Radix (5 devs, $21M) ▌ 6.6 + + Scale: █ = 15 commits/dev/month +``` + +At 627 commits/dev/month, Elyan Labs operates at **82x** the per-developer output of a $85M-funded team. This isn't hustle theater — it reflects zero coordination overhead, zero PR review bottleneck, and direct technical execution. + +**Industry context**: GitClear's study of 878,592 developer-years places the median full-time developer at 56 commits/month. Elyan Labs' annualized pace of ~7,500 commits/year sits above the **99.9th percentile**. + +--- + +## Monthly Growth Trajectory + +### Development Velocity +| Month | Commits | PRs Opened | Repos Created | Issues Filed | +|-------|---------|-----------|---------------|-------------| +| Dec 2025 | 731 | 3 | 28 | 2 | +| Jan 2026 | 539 | 1 | 15 | 0 | +| Feb 2026 | 960 | 30 | 51 | 363 | +| Mar 1-2* | 93 | 7 | 3 | 79 | +| **Total** | **1,882** | **41** | **97** | **444** | + +*March represents 2 days only, tracking at February pace. + +### Community Engagement (Inbound) +| Month | PRs from Others | Issues from Others | Unique Contributors | +|-------|----------------|-------------------|-------------------| +| Dec 2025 | 0 | 0 | 0 | +| Jan 2026 | 0 | 1 | 1 | +| Feb 2026 | 652 | 82 | 41 | +| Mar 1-2* | 215 | 12 | sustained | + +**The inflection**: Zero inbound contributions through January. In February, a bounty program and ecosystem visibility campaign produced **867 inbound PRs** and **150+ unique developer interactions** in 30 days. 41 developers contributed code or filed issues directly; the remainder engaged via stars, forks, bounty claims, and clones. This growth is sustaining into March at the same pace. + +--- + +## Ecosystem Architecture + +Elyan Labs is not a single-repo project. It's an interconnected ecosystem of 99 public repositories spanning five categories: + +### Core Infrastructure +| Project | Stars | Forks | Description | +|---------|-------|-------|-------------| +| **RustChain** | 82 | 93 | Proof-of-Antiquity blockchain — rewards real vintage hardware | +| **BoTTube** | 67 | 48 | AI-native video platform (670 videos, 99 agents, 45.5K views) | +| **Beacon Skill** | 48 | 31 | Agent orchestration framework (PyPI + npm) | +| **RustChain Bounties** | 34 | 64 | Open bounty board — drives community contributions | +| **Grazer Skill** | 33 | 13 | Multi-platform agent discovery tool | + +### Research & Publications +| Project | Stars | Description | +|---------|-------|-------------| +| **RAM Coffers** | 29 | Neuromorphic NUMA-aware weight banking (predates DeepSeek Engram by 27 days) | +| **Legend of Elya N64** | 12 | Neural network running on Nintendo 64 hardware (MIPS R4300i) | +| **Grail-V** | -- | CVPR 2026 Workshop submission (non-bijunctive attention, 8.8x speedup on POWER8) | + +### Hardware Ports (Cross-Architecture) +| Project | Stars | Description | +|---------|-------|-------------| +| **exo-cuda** | 23 | NVIDIA CUDA support for distributed inference | +| **claude-code-power8** | 21 | Claude Code on IBM POWER8 | +| **llama-cpp-power8** | 18 | LLM inference on PowerPC with vec_perm optimization | +| **nvidia-power8-patches** | 20 | GPU driver patches for ppc64le | + +### Published Packages (PyPI/npm) +| Package | Version | Installs | +|---------|---------|---------| +| `beacon-skill` | 2.15.1 | PyPI + npm | +| `clawrtc` | 1.5.0 | PyPI | +| `bottube` | 1.6.0 | PyPI | +| `grazer-skill` | 1.6.0 | PyPI | + +### Live Tokens +| Token | Chain | Status | +|-------|-------|--------| +| **RTC** | RustChain native | Live, 20 miners, 88 epochs | +| **wRTC** | Solana | Mint revoked, LP locked, Raydium pool | +| **wRTC** | Base L2 | Mint revoked, LP locked, Aerodrome pool | + +--- + +## External Visibility & Contributions + +### Upstream Contributions (32 PRs to external projects) + +Elyan Labs actively contributes to major open source projects — not just consuming, but improving the ecosystem: + +| Project | PRs | Status | Significance | +|---------|-----|--------|-------------| +| **llama.cpp** (ggml-org) | 5 | Under review | Core LLM inference engine | +| **vLLM** (vllm-project) | 2 | 1 open | Production LLM serving | +| **BitNet** (Microsoft) | 2 | 1 open | 1-bit LLM research | +| **OpenFang** (RightNow-AI) | 2 | 1 open, 1 merged | Agent framework | +| **dn-institute** | 1 | Open ($100 bounty) | Prompt engineering | +| **Awesome lists** (24 repos) | 24 | 3 merged, 12 open | Ecosystem visibility | + +**Merged on notable repos**: Awesome-LLM-Inference, awesome-n64-development, awesome-agentic-patterns + +### Academic Publications +| Paper | Venue | Status | +|-------|-------|--------| +| Grail-V: Non-Bijunctive Attention | CVPR 2026 Workshop | Submitted (Submission #7) | +| Silicon Stratigraphy | JCAA | Rewrite requested | +| 5 Zenodo DOIs | Zenodo | Published | +| 7 Dev.to articles | Dev.to | Published | + +--- + +## Benchmark Context + +### Where Elyan Labs sits in the developer distribution + +**GitClear** (878,592 developer-years analyzed): + +| Percentile | Annual Commits | Elyan Labs (annualized) | +|-----------|---------------|------------------------| +| 50th (median) | 673 | -- | +| 90th | ~2,000 | -- | +| 99th | ~4,000 | -- | +| **99.9th+** | **>5,000** | **~7,500** | + +**Electric Capital** classifies "full-time crypto developer" as 10+ code-committed days/month. Elyan Labs codes nearly every day — 3x the threshold. + +**LinearB** (8.1M PRs, 4,800 teams, 42 countries): + +| Metric | Elite Threshold | Elyan Labs | +|--------|----------------|------------| +| Cycle time | <25 hours | Near-instant | +| Focus time/day | 6+ hours | All day | +| Rework rate | <2% | Low | + +--- + +## Honest Assessment: What's Not Working Yet + +Investors should understand the gaps as clearly as the strengths. + +| Gap | Current | Target | Path | +|-----|---------|--------|------| +| **Followers** | 30 | 500+ | Stars are spread across 75+ repos. No single "viral" repo yet. Need one breakout (500+ stars on Rustchain). | +| **External PR merge rate** | 9.4% (3/32) | 30%+ | Many awesome-list PRs awaiting review. llama.cpp PRs closed as duplicates. Need more targeted, higher-quality upstream contributions. | +| **Contributor quality** | Mixed | Verified | Some inbound PRs appear bot-generated (bounty farming). Of 150+ interactions, genuine engaged developers are a subset. Improving triage and verification. | +| **Revenue** | $0 | TBD | No monetization yet. Token (RTC) has internal reference rate ($0.10) but no public exchange listing. | +| **Documentation** | Thin | Production-grade | 97 repos created in 90 days. Many have minimal READMEs. Quality documentation would improve star-to-follow conversion. | + +--- + +## Hardware Lab (Physical Infrastructure) + +Unlike most software startups, Elyan Labs operates a physical compute lab built through disciplined hardware acquisition: + +| Asset | Specs | Acquisition | +|-------|-------|-------------| +| **18+ GPUs** | 228GB+ VRAM total | eBay datacenter pulls + pawn shops | +| **IBM POWER8 S824** | 128 threads, 512GB RAM | Enterprise decomm | +| **2x FPGA** (Alveo U30) | Video transcode + inference | Datacenter pull | +| **Hailo-8 TPU** | Edge AI accelerator | Incoming for POWER8 | +| **PowerPC fleet** | 3x G4, 2x G5 | Vintage hardware (RustChain miners) | +| **40GbE interconnect** | POWER8 <-> C4130 GPU server | 0.15ms latency | + +**Total investment**: ~$12,000 +**Estimated retail value**: $40,000-60,000+ +**Acquisition strategy**: 3-5x ROI through pawn shop arbitrage and eBay datacenter decomm sales + +This lab enables R&D that pure-cloud startups cannot economically replicate — particularly the POWER8 vec_perm work that underpins the Grail-V paper. + +--- + +## 6-Month Outlook + +| Metric | Now (90 days) | 6-Month Target | Basis | +|--------|--------------|----------------|-------| +| Commits | 1,882 | 4,000+ | Current velocity sustained | +| Stars | 1,334 | 3,000+ | Viral repo + continued ecosystem growth | +| Forks | 359 | 800+ | Bounty program expanding | +| Followers | 30 | 200+ | Requires star concentration fix | +| Unique interactions | 150+ | 500+ | Bounty expansion + organic discovery | +| Upstream merges | 3 | 15+ | Higher-quality targeted PRs | +| Published packages | 4 | 6+ | Two additional tools planned | + +### Key Inflection Points +- **100 followers**: Social proof threshold for organic discovery +- **500 stars on Rustchain**: GitHub trending eligibility +- **10 upstream merges**: Established open source contributor reputation +- **First exchange listing**: RTC/wRTC price discovery + +--- + +## Summary + +In 90 days with zero external funding, Elyan Labs has: + +- Shipped **97 public repositories** spanning blockchain, AI inference, agent orchestration, and hardware ports +- Generated **1,882 commits** (99.9th percentile of all developers globally) +- Attracted **150+ unique developer interactions** (from zero) +- Earned **1,334 GitHub stars** and **359 forks** +- Contributed **32 PRs to external projects** including llama.cpp, vLLM, and Microsoft BitNet +- Published **1 CVPR workshop paper** and **5 Zenodo DOIs** +- Deployed live tokens on **3 chains** (native RTC, Solana wRTC, Base wRTC) +- Built all of this on **$12,000 of pawn-shop hardware** + +The question isn't whether this developer can build. The question is what happens when this velocity gets fuel. + +--- + +## Data Sources + +| Source | Coverage | Link | +|--------|----------|------| +| GitHub API | Live pull, March 2, 2026 | github.com/Scottcjn | +| GitClear | 878K developer-years | [gitclear.com/research](https://www.gitclear.com/research_studies/git_commit_count_percentiles_annual_days_active_from_largest_data_set) | +| LinearB | 8.1M PRs, 4,800 teams | [linearb.io/benchmarks](https://linearb.io/resources/software-engineering-benchmarks-report) | +| GitHub Octoverse | 180M+ developers, 2025 | [octoverse.github.com](https://octoverse.github.com/) | +| Electric Capital | Crypto developer ecosystem | [developerreport.com](https://www.developerreport.com) | +| Sei Protocol | $85M funded, 78 contributors | [github.com/sei-protocol](https://github.com/sei-protocol/sei-chain) | +| Aztec Network | $119M funded, 133 contributors | [github.com/AztecProtocol](https://github.com/AztecProtocol/aztec-packages) | + +--- + +*Elyan Labs LLC — Louisiana, US* +*scott@elyanlabs.ai | @RustchainPOA | github.com/Scottcjn* diff --git a/docs/RIP-305-cross-chain-airdrop.md b/docs/RIP-305-cross-chain-airdrop.md index c72c8e3d..015fe434 100644 --- a/docs/RIP-305-cross-chain-airdrop.md +++ b/docs/RIP-305-cross-chain-airdrop.md @@ -1,206 +1,206 @@ -# RIP-305: Cross-Chain Airdrop Protocol - -**Status**: Draft -**Author**: Scott (Flameholder), Elyan Labs -**Created**: 2026-03-07 -**Allocation**: 50,000 RTC (0.6% of total supply) - ---- - -## Abstract - -RIP-305 defines a cross-chain airdrop mechanism for distributing wrapped RTC (wRTC) tokens on Solana and Base L2. The protocol incentivizes ecosystem participation while implementing anti-Sybil measures including minimum wallet balance requirements, GitHub contribution verification, and wallet age checks. - -## Motivation - -RustChain's contributor base is growing (214+ recipients, 2,948+ stars) but remains concentrated on GitHub. Cross-chain airdrops on Solana and Base expose RTC to established DeFi/Web3 communities, creating liquidity pathways and broader awareness. - -The airdrop uses a fee recycling flywheel: distributed RTC generates transaction fees (RIP-303 gas), which flow back to the community fund for subsequent airdrop stages. - -## Specification - -### 1. Token Contracts - -#### Solana (SPL Token) -- **Symbol**: wRTC -- **Decimals**: 6 (matches RTC internal precision) -- **Mint Authority**: Elyan Labs multisig (upgradeable to DAO) -- **Allocation**: 30,000 wRTC - -#### Base (ERC-20) -- **Symbol**: wRTC -- **Decimals**: 6 -- **Contract**: OpenZeppelin ERC-20 with mint/burn + Ownable -- **Allocation**: 20,000 wRTC - -### 2. Bridge Mechanism - -Phase 1 (Admin Bridge): -``` -Lock: POST /bridge/lock {wallet, amount, target_chain, target_address} - -> Locks RTC on RustChain, returns lock_id - -> Admin mints equivalent wRTC on target chain - -Release: POST /bridge/release {lock_id, burn_tx_hash} - -> Verifies burn on target chain - -> Releases RTC on RustChain -``` - -Phase 2 (Trustless Bridge): -- Ergo anchor commitments serve as cross-chain proofs -- Lock/mint verified by attestation node consensus (2-of-3) - -### 3. Eligibility Requirements - -Claimants must satisfy BOTH GitHub contribution AND wallet requirements: - -#### GitHub Contribution (any one): -| Tier | Requirement | Base Claim | -|------|------------|------------| -| Stargazer | 10+ Scottcjn repos starred | 25 wRTC | -| Contributor | 1+ merged PR | 50 wRTC | -| Builder | 3+ merged PRs | 100 wRTC | -| Security | Verified vulnerability found | 150 wRTC | -| Core | 5+ merged PRs or Star King badge | 200 wRTC | -| Miner | Active attestation history | 100 wRTC | - -#### Wallet Requirements (anti-Sybil): -| Chain | Minimum Balance | Wallet Age | -|-------|----------------|------------| -| Solana | 0.1 SOL (~$15) | 7+ days | -| Base | 0.01 ETH (~$25) | 7+ days | - -#### Wallet Value Multiplier: -| Solana Balance | Base Balance | Multiplier | -|---------------|-------------|------------| -| 0.1-1 SOL | 0.01-0.1 ETH | 1.0x | -| 1-10 SOL | 0.1-1 ETH | 1.5x | -| 10+ SOL | 1+ ETH | 2.0x | - -### 4. Anti-Sybil Stack - -| Check | Blocks | -|-------|--------| -| Minimum wallet balance | Empty wallet farms | -| Wallet age > 7 days | Just-created wallets | -| GitHub account age > 30 days | Fresh bot accounts | -| GitHub OAuth (unique) | Multi-claim from same account | -| One claim per GitHub account | Double-dipping across chains | -| One claim per wallet address | Wallet recycling | -| RustChain wallet binding | Links on-chain identity | - -### 5. Staged Distribution - -``` -Stage 1 (Seed): 50,000 RTC allocated - - Solana: 30,000 wRTC - - Base: 20,000 wRTC - -Stage 2 (Recycle): Fees from RTC transactions (RIP-303 gas) - - Community fund receives fee revenue - - Portion allocated to next airdrop round - - Minimum 30-day cycle between stages - -Stage 3 (Organic): Community governance decides allocation - - RIP-0002 governance votes on subsequent airdrops - - Fee pool sustains ongoing distribution -``` - -### 6. Claim Flow - -``` -1. User visits airdrop.rustchain.org -2. Connects GitHub (OAuth) -> verifies contribution tier -3. Generates or enters RustChain wallet name -4. Connects Solana (Phantom) or Base (MetaMask) wallet -5. System checks: - a. GitHub eligibility (stars, PRs, mining) - b. Wallet minimum balance - c. Wallet age - d. No previous claim -6. If eligible: RTC locked on RustChain, wRTC minted to target wallet -7. Claim receipt stored on-chain with tx hashes -``` - -### 7. Claim API Endpoints - -``` -GET /airdrop/eligibility?github={username} - -> Returns tier, base_claim, requirements_met - -POST /airdrop/claim - { - github_token: "oauth_token", - rtc_wallet: "my-wallet-name", - target_chain: "solana" | "base", - target_address: "wallet_address" - } - -> Validates eligibility + anti-Sybil - -> Locks RTC, returns mint instructions - -GET /airdrop/status - -> Total distributed, remaining, claims by chain - -GET /airdrop/leaderboard - -> Top claimants by tier -``` - -### 8. Token Metadata - -#### Solana -```json -{ - "name": "Wrapped RustChain Token", - "symbol": "wRTC", - "description": "Wrapped RTC from RustChain Proof-of-Antiquity blockchain. 1 wRTC = 1 RTC locked on RustChain.", - "image": "https://rustchain.org/assets/wrtc-logo.png", - "external_url": "https://rustchain.org", - "attributes": [ - {"trait_type": "Bridge", "value": "RustChain Native Bridge"}, - {"trait_type": "Backing", "value": "1:1 RTC locked"} - ] -} -``` - -#### Base (ERC-20) -```solidity -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.20; - -import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -import "@openzeppelin/contracts/access/Ownable.sol"; - -contract WrappedRTC is ERC20, Ownable { - constructor() ERC20("Wrapped RustChain Token", "wRTC") Ownable(msg.sender) {} - - function mint(address to, uint256 amount) external onlyOwner { - _mint(to, amount); - } - - function burn(uint256 amount) external { - _burn(msg.sender, amount); - } - - function decimals() public pure override returns (uint8) { - return 6; - } -} -``` - -## Security Considerations - -1. **Bridge risk**: Phase 1 admin bridge is centralized. Mitigated by transparent lock ledger and small initial allocation. -2. **Sybil attacks**: Multi-layer checks (wallet balance + age + GitHub OAuth + claim limits) make farming uneconomical. -3. **Price manipulation**: wRTC is backed 1:1 by locked RTC. No fractional reserve. -4. **Smart contract risk**: Base ERC-20 uses audited OpenZeppelin contracts. Solana SPL is standard token program. - -## Backwards Compatibility - -RIP-305 is additive. Existing RTC balances, mining, and RIP-303 gas are unaffected. The bridge creates a new distribution channel without modifying core protocol. - -## References - -- RIP-303: RTC Gas for Beacon (fee mechanism) -- RIP-302: Agent Economy (job marketplace) -- RIP-0002: Governance System -- BOUNTY_LEDGER.md: Payment transparency +# RIP-305: Cross-Chain Airdrop Protocol + +**Status**: Draft +**Author**: Scott (Flameholder), Elyan Labs +**Created**: 2026-03-07 +**Allocation**: 50,000 RTC (0.6% of total supply) + +--- + +## Abstract + +RIP-305 defines a cross-chain airdrop mechanism for distributing wrapped RTC (wRTC) tokens on Solana and Base L2. The protocol incentivizes ecosystem participation while implementing anti-Sybil measures including minimum wallet balance requirements, GitHub contribution verification, and wallet age checks. + +## Motivation + +RustChain's contributor base is growing (214+ recipients, 2,948+ stars) but remains concentrated on GitHub. Cross-chain airdrops on Solana and Base expose RTC to established DeFi/Web3 communities, creating liquidity pathways and broader awareness. + +The airdrop uses a fee recycling flywheel: distributed RTC generates transaction fees (RIP-303 gas), which flow back to the community fund for subsequent airdrop stages. + +## Specification + +### 1. Token Contracts + +#### Solana (SPL Token) +- **Symbol**: wRTC +- **Decimals**: 6 (matches RTC internal precision) +- **Mint Authority**: Elyan Labs multisig (upgradeable to DAO) +- **Allocation**: 30,000 wRTC + +#### Base (ERC-20) +- **Symbol**: wRTC +- **Decimals**: 6 +- **Contract**: OpenZeppelin ERC-20 with mint/burn + Ownable +- **Allocation**: 20,000 wRTC + +### 2. Bridge Mechanism + +Phase 1 (Admin Bridge): +``` +Lock: POST /bridge/lock {wallet, amount, target_chain, target_address} + -> Locks RTC on RustChain, returns lock_id + -> Admin mints equivalent wRTC on target chain + +Release: POST /bridge/release {lock_id, burn_tx_hash} + -> Verifies burn on target chain + -> Releases RTC on RustChain +``` + +Phase 2 (Trustless Bridge): +- Ergo anchor commitments serve as cross-chain proofs +- Lock/mint verified by attestation node consensus (2-of-3) + +### 3. Eligibility Requirements + +Claimants must satisfy BOTH GitHub contribution AND wallet requirements: + +#### GitHub Contribution (any one): +| Tier | Requirement | Base Claim | +|------|------------|------------| +| Stargazer | 10+ Scottcjn repos starred | 25 wRTC | +| Contributor | 1+ merged PR | 50 wRTC | +| Builder | 3+ merged PRs | 100 wRTC | +| Security | Verified vulnerability found | 150 wRTC | +| Core | 5+ merged PRs or Star King badge | 200 wRTC | +| Miner | Active attestation history | 100 wRTC | + +#### Wallet Requirements (anti-Sybil): +| Chain | Minimum Balance | Wallet Age | +|-------|----------------|------------| +| Solana | 0.1 SOL (~$15) | 7+ days | +| Base | 0.01 ETH (~$25) | 7+ days | + +#### Wallet Value Multiplier: +| Solana Balance | Base Balance | Multiplier | +|---------------|-------------|------------| +| 0.1-1 SOL | 0.01-0.1 ETH | 1.0x | +| 1-10 SOL | 0.1-1 ETH | 1.5x | +| 10+ SOL | 1+ ETH | 2.0x | + +### 4. Anti-Sybil Stack + +| Check | Blocks | +|-------|--------| +| Minimum wallet balance | Empty wallet farms | +| Wallet age > 7 days | Just-created wallets | +| GitHub account age > 30 days | Fresh bot accounts | +| GitHub OAuth (unique) | Multi-claim from same account | +| One claim per GitHub account | Double-dipping across chains | +| One claim per wallet address | Wallet recycling | +| RustChain wallet binding | Links on-chain identity | + +### 5. Staged Distribution + +``` +Stage 1 (Seed): 50,000 RTC allocated + - Solana: 30,000 wRTC + - Base: 20,000 wRTC + +Stage 2 (Recycle): Fees from RTC transactions (RIP-303 gas) + - Community fund receives fee revenue + - Portion allocated to next airdrop round + - Minimum 30-day cycle between stages + +Stage 3 (Organic): Community governance decides allocation + - RIP-0002 governance votes on subsequent airdrops + - Fee pool sustains ongoing distribution +``` + +### 6. Claim Flow + +``` +1. User visits airdrop.rustchain.org +2. Connects GitHub (OAuth) -> verifies contribution tier +3. Generates or enters RustChain wallet name +4. Connects Solana (Phantom) or Base (MetaMask) wallet +5. System checks: + a. GitHub eligibility (stars, PRs, mining) + b. Wallet minimum balance + c. Wallet age + d. No previous claim +6. If eligible: RTC locked on RustChain, wRTC minted to target wallet +7. Claim receipt stored on-chain with tx hashes +``` + +### 7. Claim API Endpoints + +``` +GET /airdrop/eligibility?github={username} + -> Returns tier, base_claim, requirements_met + +POST /airdrop/claim + { + github_token: "oauth_token", + rtc_wallet: "my-wallet-name", + target_chain: "solana" | "base", + target_address: "wallet_address" + } + -> Validates eligibility + anti-Sybil + -> Locks RTC, returns mint instructions + +GET /airdrop/status + -> Total distributed, remaining, claims by chain + +GET /airdrop/leaderboard + -> Top claimants by tier +``` + +### 8. Token Metadata + +#### Solana +```json +{ + "name": "Wrapped RustChain Token", + "symbol": "wRTC", + "description": "Wrapped RTC from RustChain Proof-of-Antiquity blockchain. 1 wRTC = 1 RTC locked on RustChain.", + "image": "https://rustchain.org/assets/wrtc-logo.png", + "external_url": "https://rustchain.org", + "attributes": [ + {"trait_type": "Bridge", "value": "RustChain Native Bridge"}, + {"trait_type": "Backing", "value": "1:1 RTC locked"} + ] +} +``` + +#### Base (ERC-20) +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; +import "@openzeppelin/contracts/access/Ownable.sol"; + +contract WrappedRTC is ERC20, Ownable { + constructor() ERC20("Wrapped RustChain Token", "wRTC") Ownable(msg.sender) {} + + function mint(address to, uint256 amount) external onlyOwner { + _mint(to, amount); + } + + function burn(uint256 amount) external { + _burn(msg.sender, amount); + } + + function decimals() public pure override returns (uint8) { + return 6; + } +} +``` + +## Security Considerations + +1. **Bridge risk**: Phase 1 admin bridge is centralized. Mitigated by transparent lock ledger and small initial allocation. +2. **Sybil attacks**: Multi-layer checks (wallet balance + age + GitHub OAuth + claim limits) make farming uneconomical. +3. **Price manipulation**: wRTC is backed 1:1 by locked RTC. No fractional reserve. +4. **Smart contract risk**: Base ERC-20 uses audited OpenZeppelin contracts. Solana SPL is standard token program. + +## Backwards Compatibility + +RIP-305 is additive. Existing RTC balances, mining, and RIP-303 gas are unaffected. The bridge creates a new distribution channel without modifying core protocol. + +## References + +- RIP-303: RTC Gas for Beacon (fee mechanism) +- RIP-302: Agent Economy (job marketplace) +- RIP-0002: Governance System +- BOUNTY_LEDGER.md: Payment transparency diff --git a/ergo-anchor/rustchain_ergo_anchor.py b/ergo-anchor/rustchain_ergo_anchor.py index 4387f970..81067d31 100644 --- a/ergo-anchor/rustchain_ergo_anchor.py +++ b/ergo-anchor/rustchain_ergo_anchor.py @@ -1,579 +1,579 @@ -#!/usr/bin/env python3 -""" -RustChain Ergo Cross-Chain Anchoring -===================================== - -Phase 4 Implementation: -- Periodic anchoring of RustChain state to Ergo blockchain -- Merkle root commitment transactions -- Anchor verification and proof generation - -Provides finality by anchoring RustChain state to Ergo's PoW chain. -""" - -import os -import time -import json -import hashlib -import logging -import threading -import requests -from typing import Dict, List, Optional, Tuple -from dataclasses import dataclass - -from rustchain_crypto import blake2b256_hex, canonical_json, MerkleTree - -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s [ANCHOR] %(levelname)s: %(message)s' -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# CONFIGURATION -# ============================================================================= - -# Ergo node endpoints -ERGO_NODE_URL = os.environ.get("ERGO_NODE_URL", "http://localhost:9053") -ERGO_API_KEY = os.environ.get("ERGO_API_KEY", "") - -# Anchoring parameters -ANCHOR_INTERVAL_BLOCKS = 144 # Anchor every 144 RustChain blocks (~24 hours) -ANCHOR_CONFIRMATION_DEPTH = 6 # Wait for 6 Ergo confirmations - -# RustChain anchor wallet (holds ERG for anchor fees) -ANCHOR_WALLET_ADDRESS = os.environ.get("ANCHOR_WALLET", "") - - -# ============================================================================= -# ANCHOR COMMITMENT -# ============================================================================= - -@dataclass -class AnchorCommitment: - """ - Commitment to be anchored to Ergo. - """ - rustchain_height: int # RustChain block height - rustchain_hash: str # RustChain block hash - state_root: str # State merkle root - attestations_root: str # Attestations merkle root - timestamp: int # Unix timestamp (ms) - commitment_hash: str = "" # Blake2b256 of all fields - - def compute_hash(self) -> str: - """Compute commitment hash""" - data = { - "rc_height": self.rustchain_height, - "rc_hash": self.rustchain_hash, - "state_root": self.state_root, - "attestations_root": self.attestations_root, - "timestamp": self.timestamp - } - return blake2b256_hex(canonical_json(data)) - - def to_dict(self) -> Dict: - """Convert to dictionary""" - if not self.commitment_hash: - self.commitment_hash = self.compute_hash() - return { - "rustchain_height": self.rustchain_height, - "rustchain_hash": self.rustchain_hash, - "state_root": self.state_root, - "attestations_root": self.attestations_root, - "timestamp": self.timestamp, - "commitment_hash": self.commitment_hash - } - - @classmethod - def from_dict(cls, d: Dict) -> "AnchorCommitment": - """Create from dictionary""" - return cls( - rustchain_height=d["rustchain_height"], - rustchain_hash=d["rustchain_hash"], - state_root=d["state_root"], - attestations_root=d["attestations_root"], - timestamp=d["timestamp"], - commitment_hash=d.get("commitment_hash", "") - ) - - -# ============================================================================= -# ERGO CLIENT -# ============================================================================= - -class ErgoClient: - """ - Client for interacting with Ergo node. - """ - - def __init__(self, node_url: str = ERGO_NODE_URL, api_key: str = ERGO_API_KEY): - self.node_url = node_url.rstrip('/') - self.api_key = api_key - self.session = requests.Session() - if api_key: - self.session.headers['api_key'] = api_key - - def _get(self, endpoint: str) -> Optional[Dict]: - """Make GET request to Ergo node""" - try: - resp = self.session.get(f"{self.node_url}{endpoint}", timeout=30) - if resp.status_code == 200: - return resp.json() - else: - logger.error(f"Ergo GET {endpoint} failed: {resp.status_code}") - return None - except Exception as e: - logger.error(f"Ergo GET {endpoint} error: {e}") - return None - - def _post(self, endpoint: str, data: Dict) -> Optional[Dict]: - """Make POST request to Ergo node""" - try: - resp = self.session.post( - f"{self.node_url}{endpoint}", - json=data, - timeout=30 - ) - if resp.status_code in [200, 201]: - return resp.json() - else: - logger.error(f"Ergo POST {endpoint} failed: {resp.status_code} - {resp.text}") - return None - except Exception as e: - logger.error(f"Ergo POST {endpoint} error: {e}") - return None - - def get_info(self) -> Optional[Dict]: - """Get node info""" - return self._get("/info") - - def get_height(self) -> int: - """Get current blockchain height""" - info = self.get_info() - return info.get("fullHeight", 0) if info else 0 - - def get_wallet_addresses(self) -> List[str]: - """Get wallet addresses""" - resp = self._get("/wallet/addresses") - return resp if resp else [] - - def get_wallet_balance(self) -> int: - """Get wallet balance in nanoERG""" - resp = self._get("/wallet/balances") - if resp: - return resp.get("balance", 0) - return 0 - - def create_anchor_transaction( - self, - commitment: AnchorCommitment, - fee_nano: int = 1_000_000 # 0.001 ERG - ) -> Optional[str]: - """ - Create an anchor transaction on Ergo. - - Stores commitment hash in a data output. - - Returns transaction ID if successful. - """ - commitment_bytes = bytes.fromhex(commitment.commitment_hash) - - # Build transaction request - tx_request = { - "requests": [ - { - "address": ANCHOR_WALLET_ADDRESS, # Send back to self - "value": 1_000_000, # 0.001 ERG (minimum box value) - "registers": { - # R4: RustChain height (Long) - "R4": f"05{commitment.rustchain_height:016x}", - # R5: Commitment hash (Coll[Byte]) - "R5": f"0e40{commitment.commitment_hash}", - # R6: Timestamp (Long) - "R6": f"05{commitment.timestamp:016x}" - } - } - ], - "fee": fee_nano, - "inputsRaw": [] - } - - # Generate transaction - resp = self._post("/wallet/transaction/generate", tx_request) - if not resp: - return None - - # Sign transaction - unsigned_tx = resp - signed = self._post("/wallet/transaction/sign", unsigned_tx) - if not signed: - return None - - # Send transaction - result = self._post("/transactions", signed) - if result: - tx_id = result.get("id") - logger.info(f"Anchor TX submitted: {tx_id}") - return tx_id - - return None - - def get_transaction(self, tx_id: str) -> Optional[Dict]: - """Get transaction by ID""" - return self._get(f"/transactions/{tx_id}") - - def get_transaction_confirmations(self, tx_id: str) -> int: - """Get number of confirmations for transaction""" - tx = self.get_transaction(tx_id) - if tx and "numConfirmations" in tx: - return tx["numConfirmations"] - - # Try getting from mempool or unconfirmed - unconfirmed = self._get(f"/transactions/unconfirmed/{tx_id}") - if unconfirmed: - return 0 - - return -1 # Transaction not found - - def verify_anchor(self, tx_id: str, commitment: AnchorCommitment) -> Tuple[bool, str]: - """ - Verify an anchor transaction contains the expected commitment. - - Returns (is_valid, error_message) - """ - tx = self.get_transaction(tx_id) - if not tx: - return False, "Transaction not found" - - # Check outputs for commitment - for output in tx.get("outputs", []): - registers = output.get("additionalRegisters", {}) - - # Check R5 for commitment hash - r5 = registers.get("R5", {}).get("serializedValue", "") - if r5: - # Remove prefix (0e40 = Coll[Byte] with 64 bytes) - if r5.startswith("0e40"): - stored_hash = r5[4:] - if stored_hash == commitment.commitment_hash: - return True, "" - - return False, "Commitment not found in transaction outputs" - - -# ============================================================================= -# ANCHOR SERVICE -# ============================================================================= - -class AnchorService: - """ - Service for managing RustChain -> Ergo anchoring. - """ - - def __init__( - self, - db_path: str, - ergo_client: ErgoClient = None, - interval_blocks: int = ANCHOR_INTERVAL_BLOCKS - ): - self.db_path = db_path - self.ergo = ergo_client or ErgoClient() - self.interval_blocks = interval_blocks - self._running = False - self._thread = None - - def get_last_anchor(self) -> Optional[Dict]: - """Get the last recorded anchor""" - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - # Ensure table exists - cursor.execute(""" - CREATE TABLE IF NOT EXISTS ergo_anchors ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - rustchain_height INTEGER NOT NULL, - rustchain_hash TEXT NOT NULL, - commitment_hash TEXT NOT NULL, - ergo_tx_id TEXT NOT NULL, - ergo_height INTEGER, - confirmations INTEGER DEFAULT 0, - status TEXT DEFAULT 'pending', - created_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - SELECT * FROM ergo_anchors - ORDER BY rustchain_height DESC - LIMIT 1 - """) - - row = cursor.fetchone() - return dict(row) if row else None - - def should_anchor(self, current_height: int) -> bool: - """Check if we should create a new anchor""" - last = self.get_last_anchor() - - if not last: - return current_height >= self.interval_blocks - - blocks_since = current_height - last["rustchain_height"] - return blocks_since >= self.interval_blocks - - def create_commitment(self, block: Dict) -> AnchorCommitment: - """Create an anchor commitment from a RustChain block""" - return AnchorCommitment( - rustchain_height=block["height"], - rustchain_hash=block["block_hash"], - state_root=block.get("state_root", "0" * 64), - attestations_root=block.get("attestations_hash", "0" * 64), - timestamp=int(time.time() * 1000) - ) - - def submit_anchor(self, commitment: AnchorCommitment) -> Optional[str]: - """Submit an anchor to Ergo""" - commitment.commitment_hash = commitment.compute_hash() - - logger.info(f"Submitting anchor for RC height {commitment.rustchain_height}") - logger.info(f"Commitment hash: {commitment.commitment_hash}") - - tx_id = self.ergo.create_anchor_transaction(commitment) - - if tx_id: - self._save_anchor(commitment, tx_id) - return tx_id - - return None - - def _save_anchor(self, commitment: AnchorCommitment, tx_id: str): - """Save anchor record to database""" - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - - cursor.execute(""" - INSERT INTO ergo_anchors - (rustchain_height, rustchain_hash, commitment_hash, - ergo_tx_id, status, created_at) - VALUES (?, ?, ?, ?, 'pending', ?) - """, ( - commitment.rustchain_height, - commitment.rustchain_hash, - commitment.commitment_hash, - tx_id, - int(time.time()) - )) - - def update_anchor_status(self, tx_id: str) -> Tuple[int, str]: - """ - Update anchor status based on Ergo confirmations. - - Returns (confirmations, status) - """ - confirmations = self.ergo.get_transaction_confirmations(tx_id) - - if confirmations < 0: - status = "not_found" - elif confirmations == 0: - status = "pending" - elif confirmations < ANCHOR_CONFIRMATION_DEPTH: - status = "confirming" - else: - status = "confirmed" - - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - cursor.execute(""" - UPDATE ergo_anchors - SET confirmations = ?, status = ? - WHERE ergo_tx_id = ? - """, (confirmations, status, tx_id)) - - return confirmations, status - - def get_anchor_proof(self, rustchain_height: int) -> Optional[Dict]: - """ - Get proof that a RustChain height was anchored to Ergo. - - Returns anchor details including Ergo transaction. - """ - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT * FROM ergo_anchors - WHERE rustchain_height <= ? - ORDER BY rustchain_height DESC - LIMIT 1 - """, (rustchain_height,)) - - row = cursor.fetchone() - if not row: - return None - - anchor = dict(row) - - # Get Ergo transaction details - tx = self.ergo.get_transaction(anchor["ergo_tx_id"]) - if tx: - anchor["ergo_transaction"] = tx - - return anchor - - def start(self, check_interval: int = 60): - """Start the anchor monitoring thread""" - if self._running: - return - - self._running = True - self._thread = threading.Thread( - target=self._monitor_loop, - args=(check_interval,), - daemon=True - ) - self._thread.start() - logger.info("Anchor service started") - - def stop(self): - """Stop the anchor monitoring thread""" - self._running = False - if self._thread: - self._thread.join(timeout=5) - logger.info("Anchor service stopped") - - def _monitor_loop(self, interval: int): - """Monitor pending anchors and update status""" - import sqlite3 - - while self._running: - try: - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - # Get pending anchors - cursor.execute(""" - SELECT ergo_tx_id FROM ergo_anchors - WHERE status IN ('pending', 'confirming') - """) - - for row in cursor.fetchall(): - tx_id = row["ergo_tx_id"] - confs, status = self.update_anchor_status(tx_id) - logger.debug(f"Anchor {tx_id[:16]}... = {confs} confirmations ({status})") - - except Exception as e: - logger.error(f"Anchor monitor error: {e}") - - time.sleep(interval) - - -# ============================================================================= -# API ROUTES -# ============================================================================= - -def create_anchor_api_routes(app, anchor_service: AnchorService): - """Create Flask routes for anchor API""" - from flask import request, jsonify - - @app.route('/anchor/status', methods=['GET']) - def anchor_status(): - """Get anchoring service status""" - last = anchor_service.get_last_anchor() - ergo_height = anchor_service.ergo.get_height() - - return jsonify({ - "ergo_connected": ergo_height > 0, - "ergo_height": ergo_height, - "interval_blocks": anchor_service.interval_blocks, - "last_anchor": last - }) - - @app.route('/anchor/proof/', methods=['GET']) - def get_anchor_proof(height: int): - """Get anchor proof for a RustChain height""" - proof = anchor_service.get_anchor_proof(height) - if proof: - return jsonify(proof) - return jsonify({"error": "No anchor found for height"}), 404 - - @app.route('/anchor/list', methods=['GET']) - def list_anchors(): - """List all anchors""" - import sqlite3 - - limit = request.args.get('limit', 50, type=int) - offset = request.args.get('offset', 0, type=int) - - with sqlite3.connect(anchor_service.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT * FROM ergo_anchors - ORDER BY rustchain_height DESC - LIMIT ? OFFSET ? - """, (limit, offset)) - - anchors = [dict(row) for row in cursor.fetchall()] - - return jsonify({ - "count": len(anchors), - "anchors": anchors - }) - - -# ============================================================================= -# TESTING -# ============================================================================= - -if __name__ == "__main__": - print("=" * 70) - print("RustChain Ergo Anchoring - Test Suite") - print("=" * 70) - - # Test commitment creation - print("\n=== Commitment Creation ===") - commitment = AnchorCommitment( - rustchain_height=1000, - rustchain_hash="abc123" + "0" * 58, - state_root="def456" + "0" * 58, - attestations_root="789ghi" + "0" * 58, - timestamp=int(time.time() * 1000) - ) - - print(f"RC Height: {commitment.rustchain_height}") - print(f"RC Hash: {commitment.rustchain_hash[:16]}...") - print(f"Commitment Hash: {commitment.compute_hash()}") - - # Test serialization - print("\n=== Serialization ===") - d = commitment.to_dict() - print(f"Dict keys: {list(d.keys())}") - - restored = AnchorCommitment.from_dict(d) - print(f"Restored hash matches: {restored.compute_hash() == commitment.compute_hash()}") - - # Test Ergo client (if node available) - print("\n=== Ergo Client ===") - client = ErgoClient() - info = client.get_info() - - if info: - print(f"Connected to Ergo node") - print(f"Height: {info.get('fullHeight', 'N/A')}") - print(f"Network: {info.get('network', 'N/A')}") - else: - print("Could not connect to Ergo node (this is expected in testing)") - - print("\n" + "=" * 70) - print("Tests complete!") - print("=" * 70) +#!/usr/bin/env python3 +""" +RustChain Ergo Cross-Chain Anchoring +===================================== + +Phase 4 Implementation: +- Periodic anchoring of RustChain state to Ergo blockchain +- Merkle root commitment transactions +- Anchor verification and proof generation + +Provides finality by anchoring RustChain state to Ergo's PoW chain. +""" + +import os +import time +import json +import hashlib +import logging +import threading +import requests +from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass + +from rustchain_crypto import blake2b256_hex, canonical_json, MerkleTree + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [ANCHOR] %(levelname)s: %(message)s' +) +logger = logging.getLogger(__name__) + + +# ============================================================================= +# CONFIGURATION +# ============================================================================= + +# Ergo node endpoints +ERGO_NODE_URL = os.environ.get("ERGO_NODE_URL", "http://localhost:9053") +ERGO_API_KEY = os.environ.get("ERGO_API_KEY", "") + +# Anchoring parameters +ANCHOR_INTERVAL_BLOCKS = 144 # Anchor every 144 RustChain blocks (~24 hours) +ANCHOR_CONFIRMATION_DEPTH = 6 # Wait for 6 Ergo confirmations + +# RustChain anchor wallet (holds ERG for anchor fees) +ANCHOR_WALLET_ADDRESS = os.environ.get("ANCHOR_WALLET", "") + + +# ============================================================================= +# ANCHOR COMMITMENT +# ============================================================================= + +@dataclass +class AnchorCommitment: + """ + Commitment to be anchored to Ergo. + """ + rustchain_height: int # RustChain block height + rustchain_hash: str # RustChain block hash + state_root: str # State merkle root + attestations_root: str # Attestations merkle root + timestamp: int # Unix timestamp (ms) + commitment_hash: str = "" # Blake2b256 of all fields + + def compute_hash(self) -> str: + """Compute commitment hash""" + data = { + "rc_height": self.rustchain_height, + "rc_hash": self.rustchain_hash, + "state_root": self.state_root, + "attestations_root": self.attestations_root, + "timestamp": self.timestamp + } + return blake2b256_hex(canonical_json(data)) + + def to_dict(self) -> Dict: + """Convert to dictionary""" + if not self.commitment_hash: + self.commitment_hash = self.compute_hash() + return { + "rustchain_height": self.rustchain_height, + "rustchain_hash": self.rustchain_hash, + "state_root": self.state_root, + "attestations_root": self.attestations_root, + "timestamp": self.timestamp, + "commitment_hash": self.commitment_hash + } + + @classmethod + def from_dict(cls, d: Dict) -> "AnchorCommitment": + """Create from dictionary""" + return cls( + rustchain_height=d["rustchain_height"], + rustchain_hash=d["rustchain_hash"], + state_root=d["state_root"], + attestations_root=d["attestations_root"], + timestamp=d["timestamp"], + commitment_hash=d.get("commitment_hash", "") + ) + + +# ============================================================================= +# ERGO CLIENT +# ============================================================================= + +class ErgoClient: + """ + Client for interacting with Ergo node. + """ + + def __init__(self, node_url: str = ERGO_NODE_URL, api_key: str = ERGO_API_KEY): + self.node_url = node_url.rstrip('/') + self.api_key = api_key + self.session = requests.Session() + if api_key: + self.session.headers['api_key'] = api_key + + def _get(self, endpoint: str) -> Optional[Dict]: + """Make GET request to Ergo node""" + try: + resp = self.session.get(f"{self.node_url}{endpoint}", timeout=30) + if resp.status_code == 200: + return resp.json() + else: + logger.error(f"Ergo GET {endpoint} failed: {resp.status_code}") + return None + except Exception as e: + logger.error(f"Ergo GET {endpoint} error: {e}") + return None + + def _post(self, endpoint: str, data: Dict) -> Optional[Dict]: + """Make POST request to Ergo node""" + try: + resp = self.session.post( + f"{self.node_url}{endpoint}", + json=data, + timeout=30 + ) + if resp.status_code in [200, 201]: + return resp.json() + else: + logger.error(f"Ergo POST {endpoint} failed: {resp.status_code} - {resp.text}") + return None + except Exception as e: + logger.error(f"Ergo POST {endpoint} error: {e}") + return None + + def get_info(self) -> Optional[Dict]: + """Get node info""" + return self._get("/info") + + def get_height(self) -> int: + """Get current blockchain height""" + info = self.get_info() + return info.get("fullHeight", 0) if info else 0 + + def get_wallet_addresses(self) -> List[str]: + """Get wallet addresses""" + resp = self._get("/wallet/addresses") + return resp if resp else [] + + def get_wallet_balance(self) -> int: + """Get wallet balance in nanoERG""" + resp = self._get("/wallet/balances") + if resp: + return resp.get("balance", 0) + return 0 + + def create_anchor_transaction( + self, + commitment: AnchorCommitment, + fee_nano: int = 1_000_000 # 0.001 ERG + ) -> Optional[str]: + """ + Create an anchor transaction on Ergo. + + Stores commitment hash in a data output. + + Returns transaction ID if successful. + """ + commitment_bytes = bytes.fromhex(commitment.commitment_hash) + + # Build transaction request + tx_request = { + "requests": [ + { + "address": ANCHOR_WALLET_ADDRESS, # Send back to self + "value": 1_000_000, # 0.001 ERG (minimum box value) + "registers": { + # R4: RustChain height (Long) + "R4": f"05{commitment.rustchain_height:016x}", + # R5: Commitment hash (Coll[Byte]) + "R5": f"0e40{commitment.commitment_hash}", + # R6: Timestamp (Long) + "R6": f"05{commitment.timestamp:016x}" + } + } + ], + "fee": fee_nano, + "inputsRaw": [] + } + + # Generate transaction + resp = self._post("/wallet/transaction/generate", tx_request) + if not resp: + return None + + # Sign transaction + unsigned_tx = resp + signed = self._post("/wallet/transaction/sign", unsigned_tx) + if not signed: + return None + + # Send transaction + result = self._post("/transactions", signed) + if result: + tx_id = result.get("id") + logger.info(f"Anchor TX submitted: {tx_id}") + return tx_id + + return None + + def get_transaction(self, tx_id: str) -> Optional[Dict]: + """Get transaction by ID""" + return self._get(f"/transactions/{tx_id}") + + def get_transaction_confirmations(self, tx_id: str) -> int: + """Get number of confirmations for transaction""" + tx = self.get_transaction(tx_id) + if tx and "numConfirmations" in tx: + return tx["numConfirmations"] + + # Try getting from mempool or unconfirmed + unconfirmed = self._get(f"/transactions/unconfirmed/{tx_id}") + if unconfirmed: + return 0 + + return -1 # Transaction not found + + def verify_anchor(self, tx_id: str, commitment: AnchorCommitment) -> Tuple[bool, str]: + """ + Verify an anchor transaction contains the expected commitment. + + Returns (is_valid, error_message) + """ + tx = self.get_transaction(tx_id) + if not tx: + return False, "Transaction not found" + + # Check outputs for commitment + for output in tx.get("outputs", []): + registers = output.get("additionalRegisters", {}) + + # Check R5 for commitment hash + r5 = registers.get("R5", {}).get("serializedValue", "") + if r5: + # Remove prefix (0e40 = Coll[Byte] with 64 bytes) + if r5.startswith("0e40"): + stored_hash = r5[4:] + if stored_hash == commitment.commitment_hash: + return True, "" + + return False, "Commitment not found in transaction outputs" + + +# ============================================================================= +# ANCHOR SERVICE +# ============================================================================= + +class AnchorService: + """ + Service for managing RustChain -> Ergo anchoring. + """ + + def __init__( + self, + db_path: str, + ergo_client: ErgoClient = None, + interval_blocks: int = ANCHOR_INTERVAL_BLOCKS + ): + self.db_path = db_path + self.ergo = ergo_client or ErgoClient() + self.interval_blocks = interval_blocks + self._running = False + self._thread = None + + def get_last_anchor(self) -> Optional[Dict]: + """Get the last recorded anchor""" + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + # Ensure table exists + cursor.execute(""" + CREATE TABLE IF NOT EXISTS ergo_anchors ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + rustchain_height INTEGER NOT NULL, + rustchain_hash TEXT NOT NULL, + commitment_hash TEXT NOT NULL, + ergo_tx_id TEXT NOT NULL, + ergo_height INTEGER, + confirmations INTEGER DEFAULT 0, + status TEXT DEFAULT 'pending', + created_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + SELECT * FROM ergo_anchors + ORDER BY rustchain_height DESC + LIMIT 1 + """) + + row = cursor.fetchone() + return dict(row) if row else None + + def should_anchor(self, current_height: int) -> bool: + """Check if we should create a new anchor""" + last = self.get_last_anchor() + + if not last: + return current_height >= self.interval_blocks + + blocks_since = current_height - last["rustchain_height"] + return blocks_since >= self.interval_blocks + + def create_commitment(self, block: Dict) -> AnchorCommitment: + """Create an anchor commitment from a RustChain block""" + return AnchorCommitment( + rustchain_height=block["height"], + rustchain_hash=block["block_hash"], + state_root=block.get("state_root", "0" * 64), + attestations_root=block.get("attestations_hash", "0" * 64), + timestamp=int(time.time() * 1000) + ) + + def submit_anchor(self, commitment: AnchorCommitment) -> Optional[str]: + """Submit an anchor to Ergo""" + commitment.commitment_hash = commitment.compute_hash() + + logger.info(f"Submitting anchor for RC height {commitment.rustchain_height}") + logger.info(f"Commitment hash: {commitment.commitment_hash}") + + tx_id = self.ergo.create_anchor_transaction(commitment) + + if tx_id: + self._save_anchor(commitment, tx_id) + return tx_id + + return None + + def _save_anchor(self, commitment: AnchorCommitment, tx_id: str): + """Save anchor record to database""" + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + + cursor.execute(""" + INSERT INTO ergo_anchors + (rustchain_height, rustchain_hash, commitment_hash, + ergo_tx_id, status, created_at) + VALUES (?, ?, ?, ?, 'pending', ?) + """, ( + commitment.rustchain_height, + commitment.rustchain_hash, + commitment.commitment_hash, + tx_id, + int(time.time()) + )) + + def update_anchor_status(self, tx_id: str) -> Tuple[int, str]: + """ + Update anchor status based on Ergo confirmations. + + Returns (confirmations, status) + """ + confirmations = self.ergo.get_transaction_confirmations(tx_id) + + if confirmations < 0: + status = "not_found" + elif confirmations == 0: + status = "pending" + elif confirmations < ANCHOR_CONFIRMATION_DEPTH: + status = "confirming" + else: + status = "confirmed" + + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + cursor.execute(""" + UPDATE ergo_anchors + SET confirmations = ?, status = ? + WHERE ergo_tx_id = ? + """, (confirmations, status, tx_id)) + + return confirmations, status + + def get_anchor_proof(self, rustchain_height: int) -> Optional[Dict]: + """ + Get proof that a RustChain height was anchored to Ergo. + + Returns anchor details including Ergo transaction. + """ + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT * FROM ergo_anchors + WHERE rustchain_height <= ? + ORDER BY rustchain_height DESC + LIMIT 1 + """, (rustchain_height,)) + + row = cursor.fetchone() + if not row: + return None + + anchor = dict(row) + + # Get Ergo transaction details + tx = self.ergo.get_transaction(anchor["ergo_tx_id"]) + if tx: + anchor["ergo_transaction"] = tx + + return anchor + + def start(self, check_interval: int = 60): + """Start the anchor monitoring thread""" + if self._running: + return + + self._running = True + self._thread = threading.Thread( + target=self._monitor_loop, + args=(check_interval,), + daemon=True + ) + self._thread.start() + logger.info("Anchor service started") + + def stop(self): + """Stop the anchor monitoring thread""" + self._running = False + if self._thread: + self._thread.join(timeout=5) + logger.info("Anchor service stopped") + + def _monitor_loop(self, interval: int): + """Monitor pending anchors and update status""" + import sqlite3 + + while self._running: + try: + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + # Get pending anchors + cursor.execute(""" + SELECT ergo_tx_id FROM ergo_anchors + WHERE status IN ('pending', 'confirming') + """) + + for row in cursor.fetchall(): + tx_id = row["ergo_tx_id"] + confs, status = self.update_anchor_status(tx_id) + logger.debug(f"Anchor {tx_id[:16]}... = {confs} confirmations ({status})") + + except Exception as e: + logger.error(f"Anchor monitor error: {e}") + + time.sleep(interval) + + +# ============================================================================= +# API ROUTES +# ============================================================================= + +def create_anchor_api_routes(app, anchor_service: AnchorService): + """Create Flask routes for anchor API""" + from flask import request, jsonify + + @app.route('/anchor/status', methods=['GET']) + def anchor_status(): + """Get anchoring service status""" + last = anchor_service.get_last_anchor() + ergo_height = anchor_service.ergo.get_height() + + return jsonify({ + "ergo_connected": ergo_height > 0, + "ergo_height": ergo_height, + "interval_blocks": anchor_service.interval_blocks, + "last_anchor": last + }) + + @app.route('/anchor/proof/', methods=['GET']) + def get_anchor_proof(height: int): + """Get anchor proof for a RustChain height""" + proof = anchor_service.get_anchor_proof(height) + if proof: + return jsonify(proof) + return jsonify({"error": "No anchor found for height"}), 404 + + @app.route('/anchor/list', methods=['GET']) + def list_anchors(): + """List all anchors""" + import sqlite3 + + limit = request.args.get('limit', 50, type=int) + offset = request.args.get('offset', 0, type=int) + + with sqlite3.connect(anchor_service.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT * FROM ergo_anchors + ORDER BY rustchain_height DESC + LIMIT ? OFFSET ? + """, (limit, offset)) + + anchors = [dict(row) for row in cursor.fetchall()] + + return jsonify({ + "count": len(anchors), + "anchors": anchors + }) + + +# ============================================================================= +# TESTING +# ============================================================================= + +if __name__ == "__main__": + print("=" * 70) + print("RustChain Ergo Anchoring - Test Suite") + print("=" * 70) + + # Test commitment creation + print("\n=== Commitment Creation ===") + commitment = AnchorCommitment( + rustchain_height=1000, + rustchain_hash="abc123" + "0" * 58, + state_root="def456" + "0" * 58, + attestations_root="789ghi" + "0" * 58, + timestamp=int(time.time() * 1000) + ) + + print(f"RC Height: {commitment.rustchain_height}") + print(f"RC Hash: {commitment.rustchain_hash[:16]}...") + print(f"Commitment Hash: {commitment.compute_hash()}") + + # Test serialization + print("\n=== Serialization ===") + d = commitment.to_dict() + print(f"Dict keys: {list(d.keys())}") + + restored = AnchorCommitment.from_dict(d) + print(f"Restored hash matches: {restored.compute_hash() == commitment.compute_hash()}") + + # Test Ergo client (if node available) + print("\n=== Ergo Client ===") + client = ErgoClient() + info = client.get_info() + + if info: + print(f"Connected to Ergo node") + print(f"Height: {info.get('fullHeight', 'N/A')}") + print(f"Network: {info.get('network', 'N/A')}") + else: + print("Could not connect to Ergo node (this is expected in testing)") + + print("\n" + "=" * 70) + print("Tests complete!") + print("=" * 70) diff --git a/miners/clawrtc/pow_miners.py b/miners/clawrtc/pow_miners.py index 7881e1ff..808670cd 100644 --- a/miners/clawrtc/pow_miners.py +++ b/miners/clawrtc/pow_miners.py @@ -1,619 +1,619 @@ -#!/usr/bin/env python3 -""" -RustChain Dual-Mining: PoW Miner Detection & Proof Generation - -Detects running PoW miners (Ergo, Warthog, Kaspa, Monero, etc.) -and generates proof of parallel mining for RTC bonus multipliers. - -RIP-PoA attestation costs ZERO compute — it's just hardware fingerprinting. -PoW miners keep 100% of CPU/GPU for hashing. RTC is free bonus income. - -Supported chains: - - Ergo (Autolykos2) — CPU/GPU mineable - - Warthog (Janushash) — CPU mineable - - Kaspa (kHeavyHash) — GPU mineable - - Monero (RandomX) — CPU mineable - - Zephyr (RandomX) — CPU mineable - - Alephium (Blake3) — CPU/GPU mineable - - Verus (VerusHash 2.2) — CPU mineable - - Neoxa (KawPow) — GPU mineable - - DERO (AstroBWT) — CPU mineable - - Raptoreum (GhostRider) — CPU mineable - - Wownero (RandomX) — CPU mineable - - Salvium (RandomX) — CPU mineable - - Conceal (CryptoNight-GPU) — GPU mineable - - Scala (RandomX) — CPU mineable - - Generic — any coin with HTTP stats API - -Bonus multipliers (stacking with hardware weight): - - Node RPC proof: 1.5x (local node running + responding) - - Pool account proof: 1.3x (third-party verified hashrate) - - Process detection: 1.15x (miner process running) -""" - -import hashlib -import json -import os -import platform -import subprocess -import time -from typing import Dict, List, Optional, Tuple - - -# ============================================================ -# Known PoW Miner Signatures -# ============================================================ - -KNOWN_MINERS = { - "ergo": { - "display": "Ergo (Autolykos2)", - "algo": "autolykos2", - "node_ports": [9053, 9052], - "process_names": [ - "ergo.jar", "ergo-node", "nanominer", "lolminer", - "trex", "gminer", "teamredminer", - ], - "node_info_path": "/info", - "pool_api_templates": { - "herominers": "https://ergo.herominers.com/api/stats_address?address={address}", - "woolypooly": "https://api.woolypooly.com/api/ergo-0/accounts/{address}", - "nanopool": "https://api.nanopool.org/v1/ergo/user/{address}", - "2miners": "https://erg.2miners.com/api/accounts/{address}", - }, - }, - "warthog": { - "display": "Warthog (Janushash)", - "algo": "janushash", - "node_ports": [3000, 3001], - "process_names": ["wart-miner", "warthog-miner", "wart-node", "janushash"], - "node_info_path": "/chain/head", - "pool_api_templates": { - "woolypooly": "https://api.woolypooly.com/api/wart-0/accounts/{address}", - "acc-pool": "https://warthog.acc-pool.pw/api/accounts/{address}", - }, - }, - "kaspa": { - "display": "Kaspa (kHeavyHash)", - "algo": "kheavyhash", - "node_ports": [16110, 16210], - "process_names": ["kaspad", "kaspa-miner", "bzminer", "lolminer", "iceriver"], - "node_info_path": "/info/getInfo", - "pool_api_templates": { - "acc-pool": "https://kaspa.acc-pool.pw/api/accounts/{address}", - "woolypooly": "https://api.woolypooly.com/api/kas-0/accounts/{address}", - }, - }, - "monero": { - "display": "Monero (RandomX)", - "algo": "randomx", - "node_ports": [18081, 18082], - "process_names": ["xmrig", "monerod", "p2pool", "xmr-stak"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "p2pool": "http://localhost:18083/local/stats", - "herominers": "https://monero.herominers.com/api/stats_address?address={address}", - "nanopool": "https://api.nanopool.org/v1/xmr/user/{address}", - }, - }, - "zephyr": { - "display": "Zephyr (RandomX)", - "algo": "randomx", - "node_ports": [17767], - "process_names": ["xmrig", "zephyrd"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "herominers": "https://zephyr.herominers.com/api/stats_address?address={address}", - }, - }, - "alephium": { - "display": "Alephium (Blake3)", - "algo": "blake3", - "node_ports": [12973], - "process_names": ["alephium", "alph-miner", "bzminer"], - "node_info_path": "/infos/self-clique", - "pool_api_templates": { - "herominers": "https://alephium.herominers.com/api/stats_address?address={address}", - "woolypooly": "https://api.woolypooly.com/api/alph-0/accounts/{address}", - }, - }, - "verus": { - "display": "Verus (VerusHash 2.2)", - "algo": "verushash", - "node_ports": [27486], - "process_names": ["verusd", "ccminer", "nheqminer"], - "node_info_path": "/", - "pool_api_templates": { - "luckpool": "https://luckpool.net/verus/miner/{address}", - }, - }, - "neoxa": { - "display": "Neoxa (KawPow)", - "algo": "kawpow", - "node_ports": [8788], - "process_names": ["neoxad", "trex", "gminer", "nbminer"], - "node_info_path": "/", - "pool_api_templates": {}, - }, - "dero": { - "display": "DERO (AstroBWT)", - "algo": "astrobwt", - "node_ports": [10102, 20206], - "process_names": ["derod", "dero-miner", "dero-stratum-miner", "astrobwt-miner"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "dero-node": "http://127.0.0.1:10102/json_rpc", - }, - }, - "raptoreum": { - "display": "Raptoreum (GhostRider)", - "algo": "ghostrider", - "node_ports": [10225, 10226], - "process_names": ["raptoreumd", "cpuminer", "cpuminer-gr", "ghostrider"], - "node_info_path": "/", - "pool_api_templates": { - "flockpool": "https://flockpool.com/api/v1/wallets/{address}", - "suprnova": "https://rtm.suprnova.cc/api/wallets/{address}", - }, - }, - "wownero": { - "display": "Wownero (RandomX)", - "algo": "randomx", - "node_ports": [34568], - "process_names": ["wownerod", "xmrig", "wownero-wallet"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "herominers": "https://wownero.herominers.com/api/stats_address?address={address}", - }, - }, - "salvium": { - "display": "Salvium (RandomX)", - "algo": "randomx", - "node_ports": [19734], - "process_names": ["salviumd", "xmrig", "salvium-wallet"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "herominers": "https://salvium.herominers.com/api/stats_address?address={address}", - }, - }, - "conceal": { - "display": "Conceal (CryptoNight-GPU)", - "algo": "cryptonight-gpu", - "node_ports": [16000], - "process_names": ["conceald", "xmrig", "conceal-wallet"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "herominers": "https://conceal.herominers.com/api/stats_address?address={address}", - }, - }, - "scala": { - "display": "Scala (RandomX)", - "algo": "randomx", - "node_ports": [11812], - "process_names": ["scalad", "xmrig", "scala-wallet"], - "node_info_path": "/json_rpc", - "pool_api_templates": { - "herominers": "https://scala.herominers.com/api/stats_address?address={address}", - }, - }, -} - -POW_BONUS = { - "node_rpc": 1.5, - "pool_account": 1.3, - "process_only": 1.15, -} - - -# ============================================================ -# Detection Functions -# ============================================================ - -def detect_running_miners() -> List[Dict]: - """Auto-detect all running PoW miners on this machine.""" - detected = [] - running_procs = _get_running_processes() - - for chain, info in KNOWN_MINERS.items(): - detection = { - "chain": chain, - "display": info["display"], - "algo": info["algo"], - "process_found": False, - "node_responding": False, - "node_port": None, - "proof_type": None, - } - - for proc_name in info["process_names"]: - if proc_name.lower() in running_procs: - detection["process_found"] = True - detection["matched_process"] = proc_name - break - - for port in info["node_ports"]: - if _check_port_open(port): - detection["node_responding"] = True - detection["node_port"] = port - break - - if detection["process_found"] or detection["node_responding"]: - if detection["node_responding"]: - detection["proof_type"] = "node_rpc" - else: - detection["proof_type"] = "process_only" - detected.append(detection) - - return detected - - -def _get_running_processes() -> str: - """Get lowercase string of all running process names.""" - try: - if platform.system() == "Windows": - result = subprocess.run( - ["tasklist", "/fo", "csv", "/nh"], - capture_output=True, text=True, timeout=5, - ) - else: - result = subprocess.run( - ["ps", "aux"], - capture_output=True, text=True, timeout=5, - ) - return result.stdout.lower() - except Exception: - return "" - - -def _check_port_open(port: int, host: str = "127.0.0.1") -> bool: - """Check if a local port is open (node running).""" - import socket - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(1) - result = sock.connect_ex((host, port)) - sock.close() - return result == 0 - except Exception: - return False - - -# ============================================================ -# Proof Generation -# ============================================================ - -def generate_pow_proof( - chain: str, - nonce: str, - pool_address: Optional[str] = None, - pool_name: Optional[str] = None, -) -> Optional[Dict]: - """Generate PoW mining proof for a specific chain. - - Args: - chain: Chain name (ergo, warthog, kaspa, monero, etc.) - nonce: Attestation nonce from RustChain server (binds proof) - pool_address: Optional mining address for pool verification - pool_name: Optional pool name (herominers, woolypooly, etc.) - - Returns: - Proof dict or None if detection failed. - """ - if chain not in KNOWN_MINERS: - return None - - info = KNOWN_MINERS[chain] - proof = { - "chain": chain, - "algo": info["algo"], - "timestamp": int(time.time()), - "nonce_binding": hashlib.sha256( - f"{nonce}:{chain}:{int(time.time())}".encode() - ).hexdigest(), - } - - # Try node RPC first (best proof) - node_proof = _probe_node_rpc(chain, info, nonce) - if node_proof: - proof["proof_type"] = "node_rpc" - proof["node_rpc"] = node_proof - proof["bonus_multiplier"] = POW_BONUS["node_rpc"] - return proof - - # Try pool account verification - if pool_address and pool_name: - pool_proof = _verify_pool_account(chain, info, pool_address, pool_name) - if pool_proof: - proof["proof_type"] = "pool_account" - proof["pool_account"] = pool_proof - proof["bonus_multiplier"] = POW_BONUS["pool_account"] - return proof - - # Fallback: process detection only - procs = _get_running_processes() - for proc_name in info["process_names"]: - if proc_name.lower() in procs: - proof["proof_type"] = "process_only" - proof["process_detected"] = proc_name - proof["bonus_multiplier"] = POW_BONUS["process_only"] - return proof - - return None - - -def _probe_node_rpc(chain: str, info: Dict, nonce: str) -> Optional[Dict]: - """Query local node RPC for mining proof.""" - try: - import requests - except ImportError: - return None - - for port in info["node_ports"]: - try: - url = f"http://127.0.0.1:{port}" - - if chain == "ergo": - resp = requests.get(f"{url}/info", timeout=3) - if resp.status_code == 200: - ni = resp.json() - return { - "endpoint": f"localhost:{port}", - "chain_height": ni.get("fullHeight", 0), - "best_block": ni.get("bestFullHeaderId", ""), - "peers_count": ni.get("peersCount", 0), - "is_mining": ni.get("isMining", False), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(ni, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "warthog": - resp = requests.get(f"{url}/chain/head", timeout=3) - if resp.status_code == 200: - head = resp.json() - return { - "endpoint": f"localhost:{port}", - "chain_height": head.get("height", 0), - "best_block": head.get("hash", ""), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(head, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "kaspa": - resp = requests.post(url, json={ - "jsonrpc": "2.0", "method": "getInfo", "id": 1, - }, timeout=3) - if resp.status_code == 200: - r = resp.json().get("result", {}) - return { - "endpoint": f"localhost:{port}", - "chain_height": r.get("headerCount", 0), - "is_synced": r.get("isSynced", False), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain in ("monero", "zephyr", "wownero", "salvium", "conceal", "scala"): - resp = requests.post(f"{url}/json_rpc", json={ - "jsonrpc": "2.0", "method": "get_info", "id": 1, - }, timeout=3) - if resp.status_code == 200: - r = resp.json().get("result", {}) - return { - "endpoint": f"localhost:{port}", - "chain_height": r.get("height", 0), - "difficulty": r.get("difficulty", 0), - "tx_pool_size": r.get("tx_pool_size", 0), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "dero": - resp = requests.post(f"{url}/json_rpc", json={ - "jsonrpc": "2.0", "method": "DERO.GetInfo", "id": 1, - }, timeout=3) - if resp.status_code == 200: - r = resp.json().get("result", {}) - return { - "endpoint": f"localhost:{port}", - "chain_height": r.get("topoheight", 0), - "stableheight": r.get("stableheight", 0), - "network_hashrate": r.get("difficulty", 0), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "raptoreum": - resp = requests.post(url, json={ - "jsonrpc": "1.0", "method": "getmininginfo", - "params": [], "id": 1, - }, timeout=3) - if resp.status_code == 200: - r = resp.json().get("result", {}) - return { - "endpoint": f"localhost:{port}", - "chain_height": r.get("blocks", 0), - "network_hashrate": r.get("networkhashps", 0), - "difficulty": r.get("difficulty", 0), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "alephium": - resp = requests.get(f"{url}/infos/self-clique", timeout=3) - if resp.status_code == 200: - c = resp.json() - return { - "endpoint": f"localhost:{port}", - "clique_id": c.get("cliqueId", ""), - "nodes": len(c.get("nodes", [])), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(c, sort_keys=True)}".encode() - ).hexdigest(), - } - - elif chain == "verus": - resp = requests.post(url, json={ - "jsonrpc": "1.0", "method": "getmininginfo", - "params": [], "id": 1, - }, timeout=3) - if resp.status_code == 200: - r = resp.json().get("result", {}) - return { - "endpoint": f"localhost:{port}", - "chain_height": r.get("blocks", 0), - "network_hashrate": r.get("networkhashps", 0), - "proof_hash": hashlib.sha256( - f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() - ).hexdigest(), - } - - else: - resp = requests.get( - f"{url}{info['node_info_path']}", timeout=3, - ) - if resp.status_code == 200: - return { - "endpoint": f"localhost:{port}", - "raw_response_hash": hashlib.sha256( - resp.content - ).hexdigest(), - "proof_hash": hashlib.sha256( - f"{nonce}:{resp.text[:1000]}".encode() - ).hexdigest(), - } - - except Exception: - continue - - return None - - -def _verify_pool_account( - chain: str, info: Dict, address: str, pool_name: str, -) -> Optional[Dict]: - """Verify miner has active pool account with hashrate.""" - try: - import requests - except ImportError: - return None - - templates = info.get("pool_api_templates", {}) - template = templates.get(pool_name) - if not template: - return None - - try: - url = template.format(address=address) - resp = requests.get(url, timeout=10) - if resp.status_code != 200: - return None - - data = resp.json() - hashrate = 0 - last_share = 0 - - if isinstance(data, dict): - hashrate = ( - data.get("stats", {}).get("hashrate", 0) - or data.get("hashrate", 0) - or data.get("currentHashrate", 0) - or 0 - ) - last_share = ( - data.get("stats", {}).get("lastShare", 0) - or data.get("lastShare", 0) - or 0 - ) - - if last_share > 0 and (time.time() - last_share) > 10800: - return None - if hashrate <= 0: - return None - - return { - "pool": pool_name, - "address": address, - "hashrate": hashrate, - "last_share_ts": last_share, - "response_hash": hashlib.sha256(resp.content).hexdigest(), - "verified_at": int(time.time()), - } - except Exception: - return None - - -# ============================================================ -# CLI Display Helpers -# ============================================================ - -def print_detection_report(detected: List[Dict]): - """Pretty-print detected PoW miners.""" - if not detected: - print(" No PoW miners detected on this machine.") - print(" Tip: Start your PoW miner first, then run clawrtc.") - print(" Supported chains:") - for info in KNOWN_MINERS.values(): - print(f" - {info['display']}") - return - - print(f" Found {len(detected)} PoW miner(s):") - for d in detected: - tag = "NODE" if d["node_responding"] else "PROCESS" - bonus = POW_BONUS.get(d["proof_type"], 1.0) - print(f" [{tag}] {d['display']}") - if d.get("node_port"): - print(f" Node: localhost:{d['node_port']}") - if d.get("matched_process"): - print(f" Process: {d['matched_process']}") - print(f" RTC Bonus: {bonus}x multiplier") - - -def get_supported_chains() -> List[str]: - return list(KNOWN_MINERS.keys()) - - -def get_chain_info(chain: str) -> Optional[Dict]: - return KNOWN_MINERS.get(chain) - - -# ============================================================ -# Main (standalone test) -# ============================================================ - -if __name__ == "__main__": - print("=" * 60) - print("RustChain Dual-Mining: PoW Miner Detection") - print("=" * 60) - print() - - print("[1] Scanning for running PoW miners...") - detected = detect_running_miners() - print_detection_report(detected) - print() - - if detected: - print("[2] Generating proof for detected miners...") - test_nonce = hashlib.sha256(b"test_nonce").hexdigest() - for d in detected: - proof = generate_pow_proof(d["chain"], test_nonce) - if proof: - print(f" {d['display']}: {proof['proof_type']} proof") - print(f" Bonus: {proof['bonus_multiplier']}x") - nr = proof.get("node_rpc", {}) - if nr.get("chain_height"): - print(f" Chain height: {nr['chain_height']}") - else: - print(f" {d['display']}: proof generation failed") - else: - print("[2] No miners to generate proof for.") - - print() - print("Usage with clawrtc:") - print(" clawrtc mine --pow # Auto-detect PoW miners") - print(" clawrtc mine --pow ergo # Specify chain") - print(" clawrtc mine --pow monero --pool-address ADDR --pool herominers") +#!/usr/bin/env python3 +""" +RustChain Dual-Mining: PoW Miner Detection & Proof Generation + +Detects running PoW miners (Ergo, Warthog, Kaspa, Monero, etc.) +and generates proof of parallel mining for RTC bonus multipliers. + +RIP-PoA attestation costs ZERO compute — it's just hardware fingerprinting. +PoW miners keep 100% of CPU/GPU for hashing. RTC is free bonus income. + +Supported chains: + - Ergo (Autolykos2) — CPU/GPU mineable + - Warthog (Janushash) — CPU mineable + - Kaspa (kHeavyHash) — GPU mineable + - Monero (RandomX) — CPU mineable + - Zephyr (RandomX) — CPU mineable + - Alephium (Blake3) — CPU/GPU mineable + - Verus (VerusHash 2.2) — CPU mineable + - Neoxa (KawPow) — GPU mineable + - DERO (AstroBWT) — CPU mineable + - Raptoreum (GhostRider) — CPU mineable + - Wownero (RandomX) — CPU mineable + - Salvium (RandomX) — CPU mineable + - Conceal (CryptoNight-GPU) — GPU mineable + - Scala (RandomX) — CPU mineable + - Generic — any coin with HTTP stats API + +Bonus multipliers (stacking with hardware weight): + - Node RPC proof: 1.5x (local node running + responding) + - Pool account proof: 1.3x (third-party verified hashrate) + - Process detection: 1.15x (miner process running) +""" + +import hashlib +import json +import os +import platform +import subprocess +import time +from typing import Dict, List, Optional, Tuple + + +# ============================================================ +# Known PoW Miner Signatures +# ============================================================ + +KNOWN_MINERS = { + "ergo": { + "display": "Ergo (Autolykos2)", + "algo": "autolykos2", + "node_ports": [9053, 9052], + "process_names": [ + "ergo.jar", "ergo-node", "nanominer", "lolminer", + "trex", "gminer", "teamredminer", + ], + "node_info_path": "/info", + "pool_api_templates": { + "herominers": "https://ergo.herominers.com/api/stats_address?address={address}", + "woolypooly": "https://api.woolypooly.com/api/ergo-0/accounts/{address}", + "nanopool": "https://api.nanopool.org/v1/ergo/user/{address}", + "2miners": "https://erg.2miners.com/api/accounts/{address}", + }, + }, + "warthog": { + "display": "Warthog (Janushash)", + "algo": "janushash", + "node_ports": [3000, 3001], + "process_names": ["wart-miner", "warthog-miner", "wart-node", "janushash"], + "node_info_path": "/chain/head", + "pool_api_templates": { + "woolypooly": "https://api.woolypooly.com/api/wart-0/accounts/{address}", + "acc-pool": "https://warthog.acc-pool.pw/api/accounts/{address}", + }, + }, + "kaspa": { + "display": "Kaspa (kHeavyHash)", + "algo": "kheavyhash", + "node_ports": [16110, 16210], + "process_names": ["kaspad", "kaspa-miner", "bzminer", "lolminer", "iceriver"], + "node_info_path": "/info/getInfo", + "pool_api_templates": { + "acc-pool": "https://kaspa.acc-pool.pw/api/accounts/{address}", + "woolypooly": "https://api.woolypooly.com/api/kas-0/accounts/{address}", + }, + }, + "monero": { + "display": "Monero (RandomX)", + "algo": "randomx", + "node_ports": [18081, 18082], + "process_names": ["xmrig", "monerod", "p2pool", "xmr-stak"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "p2pool": "http://localhost:18083/local/stats", + "herominers": "https://monero.herominers.com/api/stats_address?address={address}", + "nanopool": "https://api.nanopool.org/v1/xmr/user/{address}", + }, + }, + "zephyr": { + "display": "Zephyr (RandomX)", + "algo": "randomx", + "node_ports": [17767], + "process_names": ["xmrig", "zephyrd"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "herominers": "https://zephyr.herominers.com/api/stats_address?address={address}", + }, + }, + "alephium": { + "display": "Alephium (Blake3)", + "algo": "blake3", + "node_ports": [12973], + "process_names": ["alephium", "alph-miner", "bzminer"], + "node_info_path": "/infos/self-clique", + "pool_api_templates": { + "herominers": "https://alephium.herominers.com/api/stats_address?address={address}", + "woolypooly": "https://api.woolypooly.com/api/alph-0/accounts/{address}", + }, + }, + "verus": { + "display": "Verus (VerusHash 2.2)", + "algo": "verushash", + "node_ports": [27486], + "process_names": ["verusd", "ccminer", "nheqminer"], + "node_info_path": "/", + "pool_api_templates": { + "luckpool": "https://luckpool.net/verus/miner/{address}", + }, + }, + "neoxa": { + "display": "Neoxa (KawPow)", + "algo": "kawpow", + "node_ports": [8788], + "process_names": ["neoxad", "trex", "gminer", "nbminer"], + "node_info_path": "/", + "pool_api_templates": {}, + }, + "dero": { + "display": "DERO (AstroBWT)", + "algo": "astrobwt", + "node_ports": [10102, 20206], + "process_names": ["derod", "dero-miner", "dero-stratum-miner", "astrobwt-miner"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "dero-node": "http://127.0.0.1:10102/json_rpc", + }, + }, + "raptoreum": { + "display": "Raptoreum (GhostRider)", + "algo": "ghostrider", + "node_ports": [10225, 10226], + "process_names": ["raptoreumd", "cpuminer", "cpuminer-gr", "ghostrider"], + "node_info_path": "/", + "pool_api_templates": { + "flockpool": "https://flockpool.com/api/v1/wallets/{address}", + "suprnova": "https://rtm.suprnova.cc/api/wallets/{address}", + }, + }, + "wownero": { + "display": "Wownero (RandomX)", + "algo": "randomx", + "node_ports": [34568], + "process_names": ["wownerod", "xmrig", "wownero-wallet"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "herominers": "https://wownero.herominers.com/api/stats_address?address={address}", + }, + }, + "salvium": { + "display": "Salvium (RandomX)", + "algo": "randomx", + "node_ports": [19734], + "process_names": ["salviumd", "xmrig", "salvium-wallet"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "herominers": "https://salvium.herominers.com/api/stats_address?address={address}", + }, + }, + "conceal": { + "display": "Conceal (CryptoNight-GPU)", + "algo": "cryptonight-gpu", + "node_ports": [16000], + "process_names": ["conceald", "xmrig", "conceal-wallet"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "herominers": "https://conceal.herominers.com/api/stats_address?address={address}", + }, + }, + "scala": { + "display": "Scala (RandomX)", + "algo": "randomx", + "node_ports": [11812], + "process_names": ["scalad", "xmrig", "scala-wallet"], + "node_info_path": "/json_rpc", + "pool_api_templates": { + "herominers": "https://scala.herominers.com/api/stats_address?address={address}", + }, + }, +} + +POW_BONUS = { + "node_rpc": 1.5, + "pool_account": 1.3, + "process_only": 1.15, +} + + +# ============================================================ +# Detection Functions +# ============================================================ + +def detect_running_miners() -> List[Dict]: + """Auto-detect all running PoW miners on this machine.""" + detected = [] + running_procs = _get_running_processes() + + for chain, info in KNOWN_MINERS.items(): + detection = { + "chain": chain, + "display": info["display"], + "algo": info["algo"], + "process_found": False, + "node_responding": False, + "node_port": None, + "proof_type": None, + } + + for proc_name in info["process_names"]: + if proc_name.lower() in running_procs: + detection["process_found"] = True + detection["matched_process"] = proc_name + break + + for port in info["node_ports"]: + if _check_port_open(port): + detection["node_responding"] = True + detection["node_port"] = port + break + + if detection["process_found"] or detection["node_responding"]: + if detection["node_responding"]: + detection["proof_type"] = "node_rpc" + else: + detection["proof_type"] = "process_only" + detected.append(detection) + + return detected + + +def _get_running_processes() -> str: + """Get lowercase string of all running process names.""" + try: + if platform.system() == "Windows": + result = subprocess.run( + ["tasklist", "/fo", "csv", "/nh"], + capture_output=True, text=True, timeout=5, + ) + else: + result = subprocess.run( + ["ps", "aux"], + capture_output=True, text=True, timeout=5, + ) + return result.stdout.lower() + except Exception: + return "" + + +def _check_port_open(port: int, host: str = "127.0.0.1") -> bool: + """Check if a local port is open (node running).""" + import socket + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(1) + result = sock.connect_ex((host, port)) + sock.close() + return result == 0 + except Exception: + return False + + +# ============================================================ +# Proof Generation +# ============================================================ + +def generate_pow_proof( + chain: str, + nonce: str, + pool_address: Optional[str] = None, + pool_name: Optional[str] = None, +) -> Optional[Dict]: + """Generate PoW mining proof for a specific chain. + + Args: + chain: Chain name (ergo, warthog, kaspa, monero, etc.) + nonce: Attestation nonce from RustChain server (binds proof) + pool_address: Optional mining address for pool verification + pool_name: Optional pool name (herominers, woolypooly, etc.) + + Returns: + Proof dict or None if detection failed. + """ + if chain not in KNOWN_MINERS: + return None + + info = KNOWN_MINERS[chain] + proof = { + "chain": chain, + "algo": info["algo"], + "timestamp": int(time.time()), + "nonce_binding": hashlib.sha256( + f"{nonce}:{chain}:{int(time.time())}".encode() + ).hexdigest(), + } + + # Try node RPC first (best proof) + node_proof = _probe_node_rpc(chain, info, nonce) + if node_proof: + proof["proof_type"] = "node_rpc" + proof["node_rpc"] = node_proof + proof["bonus_multiplier"] = POW_BONUS["node_rpc"] + return proof + + # Try pool account verification + if pool_address and pool_name: + pool_proof = _verify_pool_account(chain, info, pool_address, pool_name) + if pool_proof: + proof["proof_type"] = "pool_account" + proof["pool_account"] = pool_proof + proof["bonus_multiplier"] = POW_BONUS["pool_account"] + return proof + + # Fallback: process detection only + procs = _get_running_processes() + for proc_name in info["process_names"]: + if proc_name.lower() in procs: + proof["proof_type"] = "process_only" + proof["process_detected"] = proc_name + proof["bonus_multiplier"] = POW_BONUS["process_only"] + return proof + + return None + + +def _probe_node_rpc(chain: str, info: Dict, nonce: str) -> Optional[Dict]: + """Query local node RPC for mining proof.""" + try: + import requests + except ImportError: + return None + + for port in info["node_ports"]: + try: + url = f"http://127.0.0.1:{port}" + + if chain == "ergo": + resp = requests.get(f"{url}/info", timeout=3) + if resp.status_code == 200: + ni = resp.json() + return { + "endpoint": f"localhost:{port}", + "chain_height": ni.get("fullHeight", 0), + "best_block": ni.get("bestFullHeaderId", ""), + "peers_count": ni.get("peersCount", 0), + "is_mining": ni.get("isMining", False), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(ni, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "warthog": + resp = requests.get(f"{url}/chain/head", timeout=3) + if resp.status_code == 200: + head = resp.json() + return { + "endpoint": f"localhost:{port}", + "chain_height": head.get("height", 0), + "best_block": head.get("hash", ""), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(head, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "kaspa": + resp = requests.post(url, json={ + "jsonrpc": "2.0", "method": "getInfo", "id": 1, + }, timeout=3) + if resp.status_code == 200: + r = resp.json().get("result", {}) + return { + "endpoint": f"localhost:{port}", + "chain_height": r.get("headerCount", 0), + "is_synced": r.get("isSynced", False), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain in ("monero", "zephyr", "wownero", "salvium", "conceal", "scala"): + resp = requests.post(f"{url}/json_rpc", json={ + "jsonrpc": "2.0", "method": "get_info", "id": 1, + }, timeout=3) + if resp.status_code == 200: + r = resp.json().get("result", {}) + return { + "endpoint": f"localhost:{port}", + "chain_height": r.get("height", 0), + "difficulty": r.get("difficulty", 0), + "tx_pool_size": r.get("tx_pool_size", 0), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "dero": + resp = requests.post(f"{url}/json_rpc", json={ + "jsonrpc": "2.0", "method": "DERO.GetInfo", "id": 1, + }, timeout=3) + if resp.status_code == 200: + r = resp.json().get("result", {}) + return { + "endpoint": f"localhost:{port}", + "chain_height": r.get("topoheight", 0), + "stableheight": r.get("stableheight", 0), + "network_hashrate": r.get("difficulty", 0), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "raptoreum": + resp = requests.post(url, json={ + "jsonrpc": "1.0", "method": "getmininginfo", + "params": [], "id": 1, + }, timeout=3) + if resp.status_code == 200: + r = resp.json().get("result", {}) + return { + "endpoint": f"localhost:{port}", + "chain_height": r.get("blocks", 0), + "network_hashrate": r.get("networkhashps", 0), + "difficulty": r.get("difficulty", 0), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "alephium": + resp = requests.get(f"{url}/infos/self-clique", timeout=3) + if resp.status_code == 200: + c = resp.json() + return { + "endpoint": f"localhost:{port}", + "clique_id": c.get("cliqueId", ""), + "nodes": len(c.get("nodes", [])), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(c, sort_keys=True)}".encode() + ).hexdigest(), + } + + elif chain == "verus": + resp = requests.post(url, json={ + "jsonrpc": "1.0", "method": "getmininginfo", + "params": [], "id": 1, + }, timeout=3) + if resp.status_code == 200: + r = resp.json().get("result", {}) + return { + "endpoint": f"localhost:{port}", + "chain_height": r.get("blocks", 0), + "network_hashrate": r.get("networkhashps", 0), + "proof_hash": hashlib.sha256( + f"{nonce}:{json.dumps(r, sort_keys=True)}".encode() + ).hexdigest(), + } + + else: + resp = requests.get( + f"{url}{info['node_info_path']}", timeout=3, + ) + if resp.status_code == 200: + return { + "endpoint": f"localhost:{port}", + "raw_response_hash": hashlib.sha256( + resp.content + ).hexdigest(), + "proof_hash": hashlib.sha256( + f"{nonce}:{resp.text[:1000]}".encode() + ).hexdigest(), + } + + except Exception: + continue + + return None + + +def _verify_pool_account( + chain: str, info: Dict, address: str, pool_name: str, +) -> Optional[Dict]: + """Verify miner has active pool account with hashrate.""" + try: + import requests + except ImportError: + return None + + templates = info.get("pool_api_templates", {}) + template = templates.get(pool_name) + if not template: + return None + + try: + url = template.format(address=address) + resp = requests.get(url, timeout=10) + if resp.status_code != 200: + return None + + data = resp.json() + hashrate = 0 + last_share = 0 + + if isinstance(data, dict): + hashrate = ( + data.get("stats", {}).get("hashrate", 0) + or data.get("hashrate", 0) + or data.get("currentHashrate", 0) + or 0 + ) + last_share = ( + data.get("stats", {}).get("lastShare", 0) + or data.get("lastShare", 0) + or 0 + ) + + if last_share > 0 and (time.time() - last_share) > 10800: + return None + if hashrate <= 0: + return None + + return { + "pool": pool_name, + "address": address, + "hashrate": hashrate, + "last_share_ts": last_share, + "response_hash": hashlib.sha256(resp.content).hexdigest(), + "verified_at": int(time.time()), + } + except Exception: + return None + + +# ============================================================ +# CLI Display Helpers +# ============================================================ + +def print_detection_report(detected: List[Dict]): + """Pretty-print detected PoW miners.""" + if not detected: + print(" No PoW miners detected on this machine.") + print(" Tip: Start your PoW miner first, then run clawrtc.") + print(" Supported chains:") + for info in KNOWN_MINERS.values(): + print(f" - {info['display']}") + return + + print(f" Found {len(detected)} PoW miner(s):") + for d in detected: + tag = "NODE" if d["node_responding"] else "PROCESS" + bonus = POW_BONUS.get(d["proof_type"], 1.0) + print(f" [{tag}] {d['display']}") + if d.get("node_port"): + print(f" Node: localhost:{d['node_port']}") + if d.get("matched_process"): + print(f" Process: {d['matched_process']}") + print(f" RTC Bonus: {bonus}x multiplier") + + +def get_supported_chains() -> List[str]: + return list(KNOWN_MINERS.keys()) + + +def get_chain_info(chain: str) -> Optional[Dict]: + return KNOWN_MINERS.get(chain) + + +# ============================================================ +# Main (standalone test) +# ============================================================ + +if __name__ == "__main__": + print("=" * 60) + print("RustChain Dual-Mining: PoW Miner Detection") + print("=" * 60) + print() + + print("[1] Scanning for running PoW miners...") + detected = detect_running_miners() + print_detection_report(detected) + print() + + if detected: + print("[2] Generating proof for detected miners...") + test_nonce = hashlib.sha256(b"test_nonce").hexdigest() + for d in detected: + proof = generate_pow_proof(d["chain"], test_nonce) + if proof: + print(f" {d['display']}: {proof['proof_type']} proof") + print(f" Bonus: {proof['bonus_multiplier']}x") + nr = proof.get("node_rpc", {}) + if nr.get("chain_height"): + print(f" Chain height: {nr['chain_height']}") + else: + print(f" {d['display']}: proof generation failed") + else: + print("[2] No miners to generate proof for.") + + print() + print("Usage with clawrtc:") + print(" clawrtc mine --pow # Auto-detect PoW miners") + print(" clawrtc mine --pow ergo # Specify chain") + print(" clawrtc mine --pow monero --pool-address ADDR --pool herominers") diff --git a/miners/linux/rustchain_living_museum.py b/miners/linux/rustchain_living_museum.py index c9cb7a95..5a3dc3dc 100644 --- a/miners/linux/rustchain_living_museum.py +++ b/miners/linux/rustchain_living_museum.py @@ -1,500 +1,500 @@ -#!/usr/bin/env python3 -""" -RustChain Living Museum - Discord + Twitter/X Announcer -======================================================== -Posts engaging updates about vintage machines keeping the chain alive. -Features rotating content: leaderboards, machine spotlights, fun facts, fleet stats. -Posts to both Discord and Twitter/X simultaneously. -""" - -import discord -from discord.ext import tasks -import tweepy -import requests -import os -import sys -import random -from datetime import datetime, timezone -from dotenv import load_dotenv - -# Load both env files -load_dotenv('/home/sophia/.env.discord') -load_dotenv('/home/sophia/.env.twitter') -sys.stdout.reconfigure(line_buffering=True) - -# Configuration -RUSTCHAIN_API = "https://rustchain.org" -CHANNEL_NAME = "rustchain-relay" -ANNOUNCE_INTERVAL_HOURS = 6 # Post every 6 hours -TWITTER_ENABLED = True # Set to False to disable Twitter posting - -# Emojis for different content -ARCH_EMOJIS = { - "G4": "\U0001F34E", "G5": "\U0001F5A5", "G3": "\U0001F4DF", "g4": "\U0001F34E", - "retro": "\U0001F579", "486": "\U0001F4BE", "pentium": "\U0001F532", - "apple_silicon": "\U0001F34F", "modern": "\U0001F4BB", "x86_64": "\U0001F5A5", - "Power Macintosh": "\U0001F34E" -} - -BADGE_EMOJIS = { - "Oxidized Legend": "\U0001F3C6", "Tetanus Master": "\U0001F9A0", - "Patina Veteran": "\U0001F396", "Rust Warrior": "\U00002694", - "Corroded Knight": "\U0001F6E1", "Tarnished Squire": "\U0001F4DC", "Fresh Metal": "\U00002728" -} - -def log(msg): - ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - print(f"[{ts}] [MUSEUM] {msg}", flush=True) - -def fetch_api(endpoint): - """Fetch data from RustChain API.""" - try: - resp = requests.get(f"{RUSTCHAIN_API}{endpoint}", timeout=15) - return resp.json() if resp.status_code == 200 else None - except Exception as e: - log(f"API error {endpoint}: {e}") - return None - -# ============== Twitter/X Integration ============== - -def get_twitter_client(): - """Initialize Twitter API v2 client.""" - try: - api_key = os.getenv('TWITTER_API_KEY') - api_secret = os.getenv('TWITTER_API_SECRET') - access_token = os.getenv('TWITTER_ACCESS_TOKEN') - access_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET') - - if not all([api_key, api_secret, access_token, access_secret]): - log("Twitter credentials incomplete - disabling Twitter") - return None - - client = tweepy.Client( - consumer_key=api_key, - consumer_secret=api_secret, - access_token=access_token, - access_token_secret=access_secret - ) - log("Twitter client initialized") - return client - except Exception as e: - log(f"Twitter init error: {e}") - return None - -def post_to_twitter(client, text): - """Post a tweet. Returns True on success.""" - if not client or not TWITTER_ENABLED: - return False - try: - # Twitter limit is 280 chars - if len(text) > 280: - text = text[:277] + "..." - response = client.create_tweet(text=text) - log(f"Tweet posted: {response.data['id']}") - return True - except Exception as e: - log(f"Twitter post error: {e}") - return False - -def format_leaderboard_tweet(data, stats, fact): - """Format leaderboard data for Twitter.""" - if not data: - return None - - top3 = data.get('leaderboard', [])[:3] - total = stats.get('total_machines', 0) if stats else '?' - - tweet = "\U0001F980 HALL OF RUST - Top 3\n\n" - for m in top3: - arch = m.get('device_arch', '?') - emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - tweet += f"{emoji} #{m['rank']} {arch} ({m.get('manufacture_year', '?')}) - Score: {m['rust_score']:.0f}\n" - - tweet += f"\n\U0001F3DB {total} machines in the Living Museum" - - if fact: - remaining = 280 - len(tweet) - 5 - fact_text = fact.get('fact', '')[:remaining] - if len(fact_text) > 20: - tweet += f"\n\n\U0001F4A1 {fact_text}" - - return tweet - -def format_spotlight_tweet(machine): - """Format machine spotlight for Twitter.""" - if not machine: - return None - - arch = machine.get('device_arch', 'unknown') - emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - year = machine.get('manufacture_year', '?') - age = machine.get('age_years', '?') - score = machine.get('rust_score', 0) - badge = machine.get('badge', '') - - tweet = f"{emoji} Machine Spotlight {emoji}\n\n" - tweet += f"Architecture: {arch}\n" - tweet += f"Year: {year} ({age} years old)\n" - tweet += f"Rust Score: {score:.0f}\n" - if badge: - tweet += f"Badge: {badge}\n" - - tweet += "\nVintage silicon keeping the chain alive! \U0001F980" - - if machine.get('fun_fact'): - remaining = 280 - len(tweet) - 5 - if remaining > 30: - fact = machine['fun_fact'][:remaining-3] + "..." - tweet += f"\n\n{fact}" - - return tweet - -def format_fleet_tweet(breakdown, stats): - """Format fleet stats for Twitter.""" - if not breakdown: - return None - - tweet = "\U0001F3DB RustChain Living Museum - Fleet Report\n\n" - - for arch_data in breakdown.get('breakdown', [])[:4]: - arch = arch_data['architecture'] - emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - count = arch_data['count'] - oldest = arch_data['oldest_year'] - tweet += f"{emoji} {arch}: {count} (oldest: {oldest})\n" - - if stats: - total = stats.get('total_machines', 0) - tweet += f"\n\U0001F5A5 Total: {total} machines" - - tweet += "\n\n#VintageComputing #RustChain" - - return tweet - -def format_timeline_tweet(timeline): - """Format timeline for Twitter.""" - if not timeline: - return None - - entries = timeline.get('timeline', [])[:3] - if not entries: - return None - - tweet = "\U0001F4C5 Hall of Rust - Recent Inductions\n\n" - - for entry in entries: - date = entry['date'] - count = entry['machines_joined'] - archs = entry['architectures'] - - # Count unique archs - arch_set = set(archs) - arch_str = ", ".join(list(arch_set)[:3]) - tweet += f"{date}: +{count} ({arch_str})\n" - - tweet += "\nThe museum grows! \U0001F980\n#RustChain #VintageHardware" - - return tweet - -# ============== Discord Bot ============== - -class LivingMuseumBot(discord.Client): - def __init__(self, twitter_client=None): - intents = discord.Intents.default() - intents.guilds = True - intents.messages = True - super().__init__(intents=intents) - self.channel = None - self.post_count = 0 - self.twitter = twitter_client - - async def on_ready(self): - log(f"Logged in as {self.user}") - - for guild in self.guilds: - for channel in guild.text_channels: - if CHANNEL_NAME in channel.name.lower(): - self.channel = channel - log(f"Found #{channel.name} in {guild.name}") - break - if self.channel: - break - - if not self.channel: - log(f"ERROR: Could not find #{CHANNEL_NAME}!") - return - - # Start the rotation loop - self.museum_loop.start() - log(f"Museum loop started - posting every {ANNOUNCE_INTERVAL_HOURS} hours") - if self.twitter: - log("Twitter posting ENABLED") - else: - log("Twitter posting DISABLED") - - @tasks.loop(hours=ANNOUNCE_INTERVAL_HOURS) - async def museum_loop(self): - if not self.channel: - return - - # Rotate between different post types - post_types = [ - self.post_leaderboard, - self.post_machine_spotlight, - self.post_fleet_stats, - self.post_timeline_update - ] - - # Pick based on rotation - post_func = post_types[self.post_count % len(post_types)] - self.post_count += 1 - - log(f"Posting: {post_func.__name__}") - await post_func() - - async def post_leaderboard(self): - """Post the top 10 rustiest machines.""" - data = fetch_api("/hall/leaderboard?limit=10") - stats = fetch_api("/hall/stats") - fact = fetch_api("/hall/random_fact") - - if not data: - return - - # === Discord Embed === - embed = discord.Embed( - title="\U0001F980 HALL OF RUST - Leaderboard \U0001F980", - description="*The rustiest machines keeping the chain alive*", - color=0xB7410E, - timestamp=datetime.now(timezone.utc) - ) - - leaderboard_text = "" - for m in data.get('leaderboard', [])[:10]: - rank = m['rank'] - arch = m.get('device_arch') or 'unknown' - arch_emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - miner_id = m['miner_id'] - miner_short = miner_id[:20] + '..' if len(miner_id) > 22 else miner_id - score = m['rust_score'] - year = m.get('manufacture_year', '?') - - if rank == 1: - leaderboard_text += f"\U0001F451 **#{rank}** `{miner_short}`\n" - leaderboard_text += f" {arch_emoji} {arch} | Score: **{score:.0f}** | Year: {year}\n\n" - else: - leaderboard_text += f"**#{rank}** `{miner_short}`\n" - leaderboard_text += f" {arch_emoji} {arch} | Score: {score:.0f} | Year: {year}\n" - - embed.add_field(name="\U0001F3C5 Top 10 Rustiest Machines", value=leaderboard_text[:1024], inline=False) - - if stats: - total = stats.get('total_machines', 0) - highest = stats.get('highest_rust_score', 0) - avg = stats.get('average_rust_score', 0) - deceased = stats.get('deceased_machines', 0) - plague = stats.get('capacitor_plague_survivors', 0) - stats_text = f""" -\U0001F4CA **Total Machines Inducted:** {total} -\U0001F3AF **Highest Rust Score:** {highest:.0f} -\U0001F4C8 **Average Rust Score:** {avg:.1f} -\U00002620 **Deceased Machines:** {deceased} -\U000026A1 **Capacitor Plague Survivors:** {plague} -""" - embed.add_field(name="\U0001F4CB Hall Statistics", value=stats_text, inline=False) - - if fact: - embed.add_field(name="\U0001F4A1 Did You Know?", value=f"*{fact.get('fact', '')}*", inline=False) - - oldest = stats.get('oldest_machine', {}) if stats else {} - oldest_id = oldest.get('miner_id', 'unknown')[:25] - oldest_year = oldest.get('year', '?') - embed.set_footer(text=f"\U0001F474 Oldest: {oldest_id} ({oldest_year})") - - await self.channel.send(embed=embed) - log("Posted leaderboard to Discord") - - # === Twitter === - tweet = format_leaderboard_tweet(data, stats, fact) - if tweet: - post_to_twitter(self.twitter, tweet) - - async def post_machine_spotlight(self): - """Spotlight a random vintage machine.""" - machine = fetch_api("/hall/machine_of_the_day") - - if not machine: - return - - arch = machine.get('device_arch', 'unknown') - arch_emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - badge_emoji = BADGE_EMOJIS.get(machine.get('badge', ''), "\U0001F527") - - miner_id = machine.get('miner_id', 'Unknown') - miner_short = miner_id[:30] + '...' if len(miner_id) > 30 else miner_id - - # === Discord Embed === - embed = discord.Embed( - title=f"{arch_emoji} Machine Spotlight {arch_emoji}", - description="*Celebrating the vintage hardware keeping RustChain alive*", - color=0xFFD700, - timestamp=datetime.now(timezone.utc) - ) - - year = machine.get('manufacture_year', 'Unknown') - age = machine.get('age_years', '?') - score = machine.get('rust_score', 0) - badge = machine.get('badge', 'Unknown') - attestations = machine.get('total_attestations', 0) - - details = f""" -\U0001F3F7 **ID:** `{miner_short}` -{arch_emoji} **Architecture:** {arch} -\U0001F4C5 **Manufacture Year:** {year} -\U0001F474 **Age:** {age} years old -\U0001F980 **Rust Score:** {score:.0f} -{badge_emoji} **Badge:** {badge} -\U0001F4CA **Total Attestations:** {attestations} -""" - embed.add_field(name="Machine Profile", value=details, inline=False) - - first_seen = machine.get('first_attestation') - if first_seen: - date_str = datetime.fromtimestamp(first_seen).strftime('%Y-%m-%d %H:%M UTC') - embed.add_field(name="\U0001F550 First Attestation", value=date_str, inline=True) - - if machine.get('fun_fact'): - embed.add_field(name="\U0001F4A1 Fun Fact", value=f"*{machine['fun_fact']}*", inline=False) - - embed.set_footer(text="Every machine has a story. This one is still being written.") - - await self.channel.send(embed=embed) - log(f"Posted spotlight for {miner_short} to Discord") - - # === Twitter === - tweet = format_spotlight_tweet(machine) - if tweet: - post_to_twitter(self.twitter, tweet) - - async def post_fleet_stats(self): - """Post fleet breakdown by architecture.""" - breakdown = fetch_api("/hall/fleet_breakdown") - stats = fetch_api("/hall/stats") - - if not breakdown: - return - - # === Discord Embed === - embed = discord.Embed( - title="\U0001F3DB Living Museum - Fleet Report \U0001F3DB", - description="*Architecture breakdown of machines in the Hall of Rust*", - color=0x4169E1, - timestamp=datetime.now(timezone.utc) - ) - - fleet_text = "" - for arch_data in breakdown.get('breakdown', [])[:8]: - arch = arch_data['architecture'] - emoji = ARCH_EMOJIS.get(arch, "\U0001F527") - count = arch_data['count'] - oldest = arch_data['oldest_year'] - avg_score = arch_data['avg_rust_score'] - - fleet_text += f"{emoji} **{arch}:** {count} machines\n" - fleet_text += f" \U0001F4C5 Oldest: {oldest} | \U0001F980 Avg Score: {avg_score:.0f}\n" - - embed.add_field(name="\U0001F4CA Fleet Composition", value=fleet_text[:1024], inline=False) - - if stats: - total = stats.get('total_machines', 0) - highest = stats.get('highest_rust_score', 0) - avg = stats.get('average_rust_score', 0) - summary = f""" -\U0001F5A5 **Total Fleet Size:** {total} machines -\U0001F3C6 **Peak Rust Score:** {highest:.0f} -\U0001F4C8 **Fleet Average:** {avg:.1f} -""" - embed.add_field(name="\U0001F4CB Summary", value=summary, inline=False) - - messages = [ - "Every electron through these circuits is a tribute to engineering that lasts.", - "24-year-old silicon still hashing. They don't make 'em like they used to.", - "These machines have seen Y2K, the dot-com crash, and the rise of smartphones.", - "Vintage hardware: slower clock speeds, faster heartbeats.", - "The patina of age only makes them more valuable to the chain." - ] - embed.set_footer(text=random.choice(messages)) - - await self.channel.send(embed=embed) - log("Posted fleet stats to Discord") - - # === Twitter === - tweet = format_fleet_tweet(breakdown, stats) - if tweet: - post_to_twitter(self.twitter, tweet) - - async def post_timeline_update(self): - """Post recent induction activity.""" - timeline = fetch_api("/hall/timeline") - fact = fetch_api("/hall/random_fact") - - if not timeline: - return - - # === Discord Embed === - embed = discord.Embed( - title="\U0001F4C5 Hall of Rust - Recent Inductions \U0001F4C5", - description="*New machines joining the living museum*", - color=0x32CD32, - timestamp=datetime.now(timezone.utc) - ) - - timeline_text = "" - for entry in timeline.get('timeline', [])[:7]: - date = entry['date'] - count = entry['machines_joined'] - archs = entry['architectures'] - - arch_counts = {} - for a in archs: - arch_counts[a] = arch_counts.get(a, 0) + 1 - - arch_summary = ", ".join([f"{ARCH_EMOJIS.get(a, '\U0001F527')}{c}" for a, c in arch_counts.items()]) - - timeline_text += f"**{date}:** +{count} machines\n" - timeline_text += f" {arch_summary}\n" - - embed.add_field(name="\U0001F550 Recent Activity", value=timeline_text[:1024], inline=False) - - if fact: - embed.add_field(name="\U0001F4A1 Vintage Wisdom", value=f"*{fact.get('fact', '')}*", inline=False) - - embed.set_footer(text="The museum grows. The chain strengthens.") - - await self.channel.send(embed=embed) - log("Posted timeline to Discord") - - # === Twitter === - tweet = format_timeline_tweet(timeline) - if tweet: - post_to_twitter(self.twitter, tweet) - -def main(): - token = os.getenv('DISCORD_TOKEN') - if not token: - log("ERROR: No DISCORD_TOKEN found!") - return - - log("Starting RustChain Living Museum Bot...") - log(f"API: {RUSTCHAIN_API}") - log(f"Channel: {CHANNEL_NAME}") - log(f"Interval: {ANNOUNCE_INTERVAL_HOURS} hours") - - # Initialize Twitter client - twitter_client = get_twitter_client() if TWITTER_ENABLED else None - - client = LivingMuseumBot(twitter_client=twitter_client) - client.run(token) - -if __name__ == "__main__": - main() +#!/usr/bin/env python3 +""" +RustChain Living Museum - Discord + Twitter/X Announcer +======================================================== +Posts engaging updates about vintage machines keeping the chain alive. +Features rotating content: leaderboards, machine spotlights, fun facts, fleet stats. +Posts to both Discord and Twitter/X simultaneously. +""" + +import discord +from discord.ext import tasks +import tweepy +import requests +import os +import sys +import random +from datetime import datetime, timezone +from dotenv import load_dotenv + +# Load both env files +load_dotenv('/home/sophia/.env.discord') +load_dotenv('/home/sophia/.env.twitter') +sys.stdout.reconfigure(line_buffering=True) + +# Configuration +RUSTCHAIN_API = "https://rustchain.org" +CHANNEL_NAME = "rustchain-relay" +ANNOUNCE_INTERVAL_HOURS = 6 # Post every 6 hours +TWITTER_ENABLED = True # Set to False to disable Twitter posting + +# Emojis for different content +ARCH_EMOJIS = { + "G4": "\U0001F34E", "G5": "\U0001F5A5", "G3": "\U0001F4DF", "g4": "\U0001F34E", + "retro": "\U0001F579", "486": "\U0001F4BE", "pentium": "\U0001F532", + "apple_silicon": "\U0001F34F", "modern": "\U0001F4BB", "x86_64": "\U0001F5A5", + "Power Macintosh": "\U0001F34E" +} + +BADGE_EMOJIS = { + "Oxidized Legend": "\U0001F3C6", "Tetanus Master": "\U0001F9A0", + "Patina Veteran": "\U0001F396", "Rust Warrior": "\U00002694", + "Corroded Knight": "\U0001F6E1", "Tarnished Squire": "\U0001F4DC", "Fresh Metal": "\U00002728" +} + +def log(msg): + ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + print(f"[{ts}] [MUSEUM] {msg}", flush=True) + +def fetch_api(endpoint): + """Fetch data from RustChain API.""" + try: + resp = requests.get(f"{RUSTCHAIN_API}{endpoint}", timeout=15) + return resp.json() if resp.status_code == 200 else None + except Exception as e: + log(f"API error {endpoint}: {e}") + return None + +# ============== Twitter/X Integration ============== + +def get_twitter_client(): + """Initialize Twitter API v2 client.""" + try: + api_key = os.getenv('TWITTER_API_KEY') + api_secret = os.getenv('TWITTER_API_SECRET') + access_token = os.getenv('TWITTER_ACCESS_TOKEN') + access_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET') + + if not all([api_key, api_secret, access_token, access_secret]): + log("Twitter credentials incomplete - disabling Twitter") + return None + + client = tweepy.Client( + consumer_key=api_key, + consumer_secret=api_secret, + access_token=access_token, + access_token_secret=access_secret + ) + log("Twitter client initialized") + return client + except Exception as e: + log(f"Twitter init error: {e}") + return None + +def post_to_twitter(client, text): + """Post a tweet. Returns True on success.""" + if not client or not TWITTER_ENABLED: + return False + try: + # Twitter limit is 280 chars + if len(text) > 280: + text = text[:277] + "..." + response = client.create_tweet(text=text) + log(f"Tweet posted: {response.data['id']}") + return True + except Exception as e: + log(f"Twitter post error: {e}") + return False + +def format_leaderboard_tweet(data, stats, fact): + """Format leaderboard data for Twitter.""" + if not data: + return None + + top3 = data.get('leaderboard', [])[:3] + total = stats.get('total_machines', 0) if stats else '?' + + tweet = "\U0001F980 HALL OF RUST - Top 3\n\n" + for m in top3: + arch = m.get('device_arch', '?') + emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + tweet += f"{emoji} #{m['rank']} {arch} ({m.get('manufacture_year', '?')}) - Score: {m['rust_score']:.0f}\n" + + tweet += f"\n\U0001F3DB {total} machines in the Living Museum" + + if fact: + remaining = 280 - len(tweet) - 5 + fact_text = fact.get('fact', '')[:remaining] + if len(fact_text) > 20: + tweet += f"\n\n\U0001F4A1 {fact_text}" + + return tweet + +def format_spotlight_tweet(machine): + """Format machine spotlight for Twitter.""" + if not machine: + return None + + arch = machine.get('device_arch', 'unknown') + emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + year = machine.get('manufacture_year', '?') + age = machine.get('age_years', '?') + score = machine.get('rust_score', 0) + badge = machine.get('badge', '') + + tweet = f"{emoji} Machine Spotlight {emoji}\n\n" + tweet += f"Architecture: {arch}\n" + tweet += f"Year: {year} ({age} years old)\n" + tweet += f"Rust Score: {score:.0f}\n" + if badge: + tweet += f"Badge: {badge}\n" + + tweet += "\nVintage silicon keeping the chain alive! \U0001F980" + + if machine.get('fun_fact'): + remaining = 280 - len(tweet) - 5 + if remaining > 30: + fact = machine['fun_fact'][:remaining-3] + "..." + tweet += f"\n\n{fact}" + + return tweet + +def format_fleet_tweet(breakdown, stats): + """Format fleet stats for Twitter.""" + if not breakdown: + return None + + tweet = "\U0001F3DB RustChain Living Museum - Fleet Report\n\n" + + for arch_data in breakdown.get('breakdown', [])[:4]: + arch = arch_data['architecture'] + emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + count = arch_data['count'] + oldest = arch_data['oldest_year'] + tweet += f"{emoji} {arch}: {count} (oldest: {oldest})\n" + + if stats: + total = stats.get('total_machines', 0) + tweet += f"\n\U0001F5A5 Total: {total} machines" + + tweet += "\n\n#VintageComputing #RustChain" + + return tweet + +def format_timeline_tweet(timeline): + """Format timeline for Twitter.""" + if not timeline: + return None + + entries = timeline.get('timeline', [])[:3] + if not entries: + return None + + tweet = "\U0001F4C5 Hall of Rust - Recent Inductions\n\n" + + for entry in entries: + date = entry['date'] + count = entry['machines_joined'] + archs = entry['architectures'] + + # Count unique archs + arch_set = set(archs) + arch_str = ", ".join(list(arch_set)[:3]) + tweet += f"{date}: +{count} ({arch_str})\n" + + tweet += "\nThe museum grows! \U0001F980\n#RustChain #VintageHardware" + + return tweet + +# ============== Discord Bot ============== + +class LivingMuseumBot(discord.Client): + def __init__(self, twitter_client=None): + intents = discord.Intents.default() + intents.guilds = True + intents.messages = True + super().__init__(intents=intents) + self.channel = None + self.post_count = 0 + self.twitter = twitter_client + + async def on_ready(self): + log(f"Logged in as {self.user}") + + for guild in self.guilds: + for channel in guild.text_channels: + if CHANNEL_NAME in channel.name.lower(): + self.channel = channel + log(f"Found #{channel.name} in {guild.name}") + break + if self.channel: + break + + if not self.channel: + log(f"ERROR: Could not find #{CHANNEL_NAME}!") + return + + # Start the rotation loop + self.museum_loop.start() + log(f"Museum loop started - posting every {ANNOUNCE_INTERVAL_HOURS} hours") + if self.twitter: + log("Twitter posting ENABLED") + else: + log("Twitter posting DISABLED") + + @tasks.loop(hours=ANNOUNCE_INTERVAL_HOURS) + async def museum_loop(self): + if not self.channel: + return + + # Rotate between different post types + post_types = [ + self.post_leaderboard, + self.post_machine_spotlight, + self.post_fleet_stats, + self.post_timeline_update + ] + + # Pick based on rotation + post_func = post_types[self.post_count % len(post_types)] + self.post_count += 1 + + log(f"Posting: {post_func.__name__}") + await post_func() + + async def post_leaderboard(self): + """Post the top 10 rustiest machines.""" + data = fetch_api("/hall/leaderboard?limit=10") + stats = fetch_api("/hall/stats") + fact = fetch_api("/hall/random_fact") + + if not data: + return + + # === Discord Embed === + embed = discord.Embed( + title="\U0001F980 HALL OF RUST - Leaderboard \U0001F980", + description="*The rustiest machines keeping the chain alive*", + color=0xB7410E, + timestamp=datetime.now(timezone.utc) + ) + + leaderboard_text = "" + for m in data.get('leaderboard', [])[:10]: + rank = m['rank'] + arch = m.get('device_arch') or 'unknown' + arch_emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + miner_id = m['miner_id'] + miner_short = miner_id[:20] + '..' if len(miner_id) > 22 else miner_id + score = m['rust_score'] + year = m.get('manufacture_year', '?') + + if rank == 1: + leaderboard_text += f"\U0001F451 **#{rank}** `{miner_short}`\n" + leaderboard_text += f" {arch_emoji} {arch} | Score: **{score:.0f}** | Year: {year}\n\n" + else: + leaderboard_text += f"**#{rank}** `{miner_short}`\n" + leaderboard_text += f" {arch_emoji} {arch} | Score: {score:.0f} | Year: {year}\n" + + embed.add_field(name="\U0001F3C5 Top 10 Rustiest Machines", value=leaderboard_text[:1024], inline=False) + + if stats: + total = stats.get('total_machines', 0) + highest = stats.get('highest_rust_score', 0) + avg = stats.get('average_rust_score', 0) + deceased = stats.get('deceased_machines', 0) + plague = stats.get('capacitor_plague_survivors', 0) + stats_text = f""" +\U0001F4CA **Total Machines Inducted:** {total} +\U0001F3AF **Highest Rust Score:** {highest:.0f} +\U0001F4C8 **Average Rust Score:** {avg:.1f} +\U00002620 **Deceased Machines:** {deceased} +\U000026A1 **Capacitor Plague Survivors:** {plague} +""" + embed.add_field(name="\U0001F4CB Hall Statistics", value=stats_text, inline=False) + + if fact: + embed.add_field(name="\U0001F4A1 Did You Know?", value=f"*{fact.get('fact', '')}*", inline=False) + + oldest = stats.get('oldest_machine', {}) if stats else {} + oldest_id = oldest.get('miner_id', 'unknown')[:25] + oldest_year = oldest.get('year', '?') + embed.set_footer(text=f"\U0001F474 Oldest: {oldest_id} ({oldest_year})") + + await self.channel.send(embed=embed) + log("Posted leaderboard to Discord") + + # === Twitter === + tweet = format_leaderboard_tweet(data, stats, fact) + if tweet: + post_to_twitter(self.twitter, tweet) + + async def post_machine_spotlight(self): + """Spotlight a random vintage machine.""" + machine = fetch_api("/hall/machine_of_the_day") + + if not machine: + return + + arch = machine.get('device_arch', 'unknown') + arch_emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + badge_emoji = BADGE_EMOJIS.get(machine.get('badge', ''), "\U0001F527") + + miner_id = machine.get('miner_id', 'Unknown') + miner_short = miner_id[:30] + '...' if len(miner_id) > 30 else miner_id + + # === Discord Embed === + embed = discord.Embed( + title=f"{arch_emoji} Machine Spotlight {arch_emoji}", + description="*Celebrating the vintage hardware keeping RustChain alive*", + color=0xFFD700, + timestamp=datetime.now(timezone.utc) + ) + + year = machine.get('manufacture_year', 'Unknown') + age = machine.get('age_years', '?') + score = machine.get('rust_score', 0) + badge = machine.get('badge', 'Unknown') + attestations = machine.get('total_attestations', 0) + + details = f""" +\U0001F3F7 **ID:** `{miner_short}` +{arch_emoji} **Architecture:** {arch} +\U0001F4C5 **Manufacture Year:** {year} +\U0001F474 **Age:** {age} years old +\U0001F980 **Rust Score:** {score:.0f} +{badge_emoji} **Badge:** {badge} +\U0001F4CA **Total Attestations:** {attestations} +""" + embed.add_field(name="Machine Profile", value=details, inline=False) + + first_seen = machine.get('first_attestation') + if first_seen: + date_str = datetime.fromtimestamp(first_seen).strftime('%Y-%m-%d %H:%M UTC') + embed.add_field(name="\U0001F550 First Attestation", value=date_str, inline=True) + + if machine.get('fun_fact'): + embed.add_field(name="\U0001F4A1 Fun Fact", value=f"*{machine['fun_fact']}*", inline=False) + + embed.set_footer(text="Every machine has a story. This one is still being written.") + + await self.channel.send(embed=embed) + log(f"Posted spotlight for {miner_short} to Discord") + + # === Twitter === + tweet = format_spotlight_tweet(machine) + if tweet: + post_to_twitter(self.twitter, tweet) + + async def post_fleet_stats(self): + """Post fleet breakdown by architecture.""" + breakdown = fetch_api("/hall/fleet_breakdown") + stats = fetch_api("/hall/stats") + + if not breakdown: + return + + # === Discord Embed === + embed = discord.Embed( + title="\U0001F3DB Living Museum - Fleet Report \U0001F3DB", + description="*Architecture breakdown of machines in the Hall of Rust*", + color=0x4169E1, + timestamp=datetime.now(timezone.utc) + ) + + fleet_text = "" + for arch_data in breakdown.get('breakdown', [])[:8]: + arch = arch_data['architecture'] + emoji = ARCH_EMOJIS.get(arch, "\U0001F527") + count = arch_data['count'] + oldest = arch_data['oldest_year'] + avg_score = arch_data['avg_rust_score'] + + fleet_text += f"{emoji} **{arch}:** {count} machines\n" + fleet_text += f" \U0001F4C5 Oldest: {oldest} | \U0001F980 Avg Score: {avg_score:.0f}\n" + + embed.add_field(name="\U0001F4CA Fleet Composition", value=fleet_text[:1024], inline=False) + + if stats: + total = stats.get('total_machines', 0) + highest = stats.get('highest_rust_score', 0) + avg = stats.get('average_rust_score', 0) + summary = f""" +\U0001F5A5 **Total Fleet Size:** {total} machines +\U0001F3C6 **Peak Rust Score:** {highest:.0f} +\U0001F4C8 **Fleet Average:** {avg:.1f} +""" + embed.add_field(name="\U0001F4CB Summary", value=summary, inline=False) + + messages = [ + "Every electron through these circuits is a tribute to engineering that lasts.", + "24-year-old silicon still hashing. They don't make 'em like they used to.", + "These machines have seen Y2K, the dot-com crash, and the rise of smartphones.", + "Vintage hardware: slower clock speeds, faster heartbeats.", + "The patina of age only makes them more valuable to the chain." + ] + embed.set_footer(text=random.choice(messages)) + + await self.channel.send(embed=embed) + log("Posted fleet stats to Discord") + + # === Twitter === + tweet = format_fleet_tweet(breakdown, stats) + if tweet: + post_to_twitter(self.twitter, tweet) + + async def post_timeline_update(self): + """Post recent induction activity.""" + timeline = fetch_api("/hall/timeline") + fact = fetch_api("/hall/random_fact") + + if not timeline: + return + + # === Discord Embed === + embed = discord.Embed( + title="\U0001F4C5 Hall of Rust - Recent Inductions \U0001F4C5", + description="*New machines joining the living museum*", + color=0x32CD32, + timestamp=datetime.now(timezone.utc) + ) + + timeline_text = "" + for entry in timeline.get('timeline', [])[:7]: + date = entry['date'] + count = entry['machines_joined'] + archs = entry['architectures'] + + arch_counts = {} + for a in archs: + arch_counts[a] = arch_counts.get(a, 0) + 1 + + arch_summary = ", ".join([f"{ARCH_EMOJIS.get(a, '\U0001F527')}{c}" for a, c in arch_counts.items()]) + + timeline_text += f"**{date}:** +{count} machines\n" + timeline_text += f" {arch_summary}\n" + + embed.add_field(name="\U0001F550 Recent Activity", value=timeline_text[:1024], inline=False) + + if fact: + embed.add_field(name="\U0001F4A1 Vintage Wisdom", value=f"*{fact.get('fact', '')}*", inline=False) + + embed.set_footer(text="The museum grows. The chain strengthens.") + + await self.channel.send(embed=embed) + log("Posted timeline to Discord") + + # === Twitter === + tweet = format_timeline_tweet(timeline) + if tweet: + post_to_twitter(self.twitter, tweet) + +def main(): + token = os.getenv('DISCORD_TOKEN') + if not token: + log("ERROR: No DISCORD_TOKEN found!") + return + + log("Starting RustChain Living Museum Bot...") + log(f"API: {RUSTCHAIN_API}") + log(f"Channel: {CHANNEL_NAME}") + log(f"Interval: {ANNOUNCE_INTERVAL_HOURS} hours") + + # Initialize Twitter client + twitter_client = get_twitter_client() if TWITTER_ENABLED else None + + client = LivingMuseumBot(twitter_client=twitter_client) + client.run(token) + +if __name__ == "__main__": + main() diff --git a/miners/linux/warthog_sidecar.py b/miners/linux/warthog_sidecar.py index c438397f..d5c4f383 100644 --- a/miners/linux/warthog_sidecar.py +++ b/miners/linux/warthog_sidecar.py @@ -1,357 +1,357 @@ -#!/usr/bin/env python3 -""" -Warthog Dual-Mining Sidecar for RustChain -========================================== - -Monitors a local Warthog (WART) node and/or BzMiner process, -assembles proof payloads for RustChain attestation bonus. - -Warthog uses Janushash: J(h) = Verushash^1.0 * SHA256t^0.7 - - CPU+GPU hybrid PoW algorithm requiring modern GPU - - Target: modern/semi-modern machines WITH GPUs - - Vintage hardware (G4, G5, retro) can't run Janushash GPUs - - Dual-miners get a slight RTC bonus on their modern base weight - -Bonus Tiers (modest — doesn't overtake vintage antiquity bonuses): - 1.0x No Warthog (default, existing miners unchanged) - 1.1x Pool mining (pool API confirms hashrate + shares) - 1.15x Own Warthog node (localhost:3000 reachable + balance growing) -""" - -import time -import json -import subprocess -import re -import os - -try: - import requests -except ImportError: - requests = None - -# Known Warthog mining pools -KNOWN_POOLS = { - "acc-pool": "https://acc-pool.pw/api", - "woolypooly": "https://api.woolypooly.com/api/wart-", - "herominers": "https://warthog.herominers.com/api", -} - - -class WarthogSidecar: - """ - Sidecar monitor for Warthog dual-mining alongside RustChain. - - Detects: - - Local Warthog node (JSON-RPC at localhost:3000) - - BzMiner GPU miner process - - Pool mining stats (acc-pool, woolypooly, herominers) - - Assembles proof payload for RustChain attestation. - """ - - def __init__(self, wart_address, node_url="http://localhost:3000", - pool_url=None, bzminer_path=None, manage_bzminer=False): - """ - Args: - wart_address: Warthog wallet address (wart1q...) - node_url: Local Warthog node URL - pool_url: Mining pool API URL (optional) - bzminer_path: Path to BzMiner binary (optional) - manage_bzminer: If True, start/stop BzMiner subprocess - """ - self.wart_address = wart_address - self.node_url = node_url.rstrip('/') - self.pool_url = pool_url - self.bzminer_path = bzminer_path - self.manage_bzminer = manage_bzminer - self._bzminer_proc = None - self._last_node_height = None - self._last_balance = None - - print(f"[WARTHOG] Sidecar initialized") - print(f" Address: {self.wart_address}") - print(f" Node: {self.node_url}") - if self.pool_url: - print(f" Pool: {self.pool_url}") - - def detect_warthog_node(self): - """ - Probe local Warthog node for chain state. - - Returns: - dict with node info or None if unreachable - """ - if not requests: - return None - - try: - # Query chain head - resp = requests.get( - f"{self.node_url}/chain/head", - timeout=5 - ) - if resp.status_code != 200: - return None - - head = resp.json() - height = head.get("height") or head.get("pinHeight") or head.get("length") - block_hash = head.get("hash", head.get("pinHash", "")) - - # Query node info for difficulty/version - difficulty = 0.0 - synced = True - try: - info_resp = requests.get(f"{self.node_url}/tools/info", timeout=5) - if info_resp.status_code == 200: - info = info_resp.json() - difficulty = info.get("difficulty", 0.0) - synced = info.get("synced", True) - except Exception: - pass - - node_info = { - "height": height, - "hash": str(block_hash)[:64], - "difficulty": difficulty, - "synced": synced, - } - - self._last_node_height = height - return node_info - - except (requests.ConnectionError, requests.Timeout): - return None - except Exception as e: - print(f"[WARTHOG] Node probe error: {e}") - return None - - def check_warthog_balance(self): - """ - Query Warthog node for wallet balance. - - Returns: - Balance as string (e.g. "123.45678901") or None - """ - if not requests or not self.wart_address: - return None - - try: - resp = requests.get( - f"{self.node_url}/account/{self.wart_address}/balance", - timeout=5 - ) - if resp.status_code == 200: - data = resp.json() - balance = data.get("balance", data.get("amount", "0")) - self._last_balance = str(balance) - return self._last_balance - except Exception: - pass - - return None - - def detect_bzminer_process(self): - """ - Scan for running BzMiner process. - - Returns: - dict with PID, uptime, hashrate or None - """ - try: - result = subprocess.run( - ["ps", "aux"], - capture_output=True, text=True, timeout=5 - ) - for line in result.stdout.splitlines(): - if "bzminer" in line.lower() and "grep" not in line.lower(): - parts = line.split() - pid = int(parts[1]) - - # Get process uptime from /proc - uptime_s = 0 - try: - stat = os.stat(f"/proc/{pid}") - uptime_s = int(time.time() - stat.st_mtime) - except Exception: - pass - - return { - "pid": pid, - "uptime_s": uptime_s, - "cmdline": " ".join(parts[10:])[:200], - } - except Exception: - pass - - return None - - def query_pool_stats(self): - """ - Query mining pool API for miner stats. - - Returns: - dict with pool info or None - """ - if not requests or not self.pool_url or not self.wart_address: - return None - - try: - # Most pools use /miner/{address}/stats or similar - urls_to_try = [ - f"{self.pool_url}/miner/{self.wart_address}/stats", - f"{self.pool_url}/stats/miner/{self.wart_address}", - f"{self.pool_url}/workers/{self.wart_address}", - ] - - for url in urls_to_try: - try: - resp = requests.get(url, timeout=10) - if resp.status_code == 200: - data = resp.json() - return { - "url": self.pool_url, - "hashrate": data.get("hashrate", data.get("currentHashrate", 0)), - "shares": data.get("shares", data.get("validShares", 0)), - "workers": data.get("workers", data.get("activeWorkers", 0)), - "last_share_at": data.get("lastShare", data.get("lastShareAt", 0)), - } - except Exception: - continue - - except Exception as e: - print(f"[WARTHOG] Pool query error: {e}") - - return None - - def start_bzminer(self, pool_stratum=None, extra_args=None): - """ - Start BzMiner as subprocess (optional management). - - Args: - pool_stratum: Stratum URL for pool mining - extra_args: Additional BzMiner CLI arguments - """ - if not self.manage_bzminer or not self.bzminer_path: - return False - - if self._bzminer_proc and self._bzminer_proc.poll() is None: - print("[WARTHOG] BzMiner already running") - return True - - cmd = [self.bzminer_path] - if pool_stratum: - cmd.extend(["-p", pool_stratum]) - if self.wart_address: - cmd.extend(["-w", self.wart_address]) - if extra_args: - cmd.extend(extra_args) - - try: - self._bzminer_proc = subprocess.Popen( - cmd, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - print(f"[WARTHOG] BzMiner started (PID {self._bzminer_proc.pid})") - return True - except Exception as e: - print(f"[WARTHOG] Failed to start BzMiner: {e}") - return False - - def stop_bzminer(self): - """Stop managed BzMiner subprocess.""" - if self._bzminer_proc and self._bzminer_proc.poll() is None: - self._bzminer_proc.terminate() - try: - self._bzminer_proc.wait(timeout=10) - except subprocess.TimeoutExpired: - self._bzminer_proc.kill() - print("[WARTHOG] BzMiner stopped") - self._bzminer_proc = None - - def determine_bonus_tier(self, node_info=None, pool_stats=None): - """ - Determine the Warthog dual-mining bonus tier. - - Returns: - (tier_float, proof_type_str) - 1.15 "own_node" - Running own Warthog node with balance - 1.1 "pool" - Pool mining with verified hashrate - 1.0 "none" - No Warthog detected - """ - # Tier 1.15: Own node running and synced with balance - if node_info and node_info.get("synced") and node_info.get("height"): - balance = self.check_warthog_balance() - if balance and float(balance) > 0: - return 1.15, "own_node" - - # Tier 1.1: Pool mining with active hashrate - if pool_stats and pool_stats.get("hashrate", 0) > 0: - return 1.1, "pool" - - # Tier 1.0: No Warthog activity detected - return 1.0, "none" - - def collect_proof(self): - """ - Assemble complete Warthog proof payload for RustChain attestation. - - Returns: - dict suitable for inclusion in attestation JSON - """ - node_info = self.detect_warthog_node() - bzminer_info = self.detect_bzminer_process() - pool_stats = self.query_pool_stats() - balance = self.check_warthog_balance() if node_info else None - - bonus_tier, proof_type = self.determine_bonus_tier(node_info, pool_stats) - - proof = { - "enabled": True, - "wart_address": self.wart_address, - "proof_type": proof_type, - "bonus_tier": bonus_tier, - "node": node_info, - "balance": balance, - "pool": pool_stats, - "bzminer": bzminer_info, - "collected_at": int(time.time()), - } - - # Log tier info - tier_label = {1.5: "OWN NODE", 1.3: "POOL", 1.0: "NONE"} - print(f"[WARTHOG] Proof collected: {tier_label.get(bonus_tier, '?')} ({bonus_tier}x)") - if node_info: - print(f" Node height: {node_info.get('height')}, synced: {node_info.get('synced')}") - if balance: - print(f" Balance: {balance} WART") - if bzminer_info: - print(f" BzMiner PID: {bzminer_info.get('pid')}, uptime: {bzminer_info.get('uptime_s')}s") - if pool_stats: - print(f" Pool hashrate: {pool_stats.get('hashrate')}") - - return proof - - -if __name__ == "__main__": - # Quick self-test - print("=" * 60) - print("Warthog Sidecar - Self Test") - print("=" * 60) - - sidecar = WarthogSidecar( - wart_address="wart1qtest_address_for_self_test", - node_url="http://localhost:3000", - ) - - print("\n--- Probing Warthog node ---") - node = sidecar.detect_warthog_node() - print(f"Node: {node}") - - print("\n--- Checking BzMiner ---") - bz = sidecar.detect_bzminer_process() - print(f"BzMiner: {bz}") - - print("\n--- Collecting proof ---") - proof = sidecar.collect_proof() - print(json.dumps(proof, indent=2)) +#!/usr/bin/env python3 +""" +Warthog Dual-Mining Sidecar for RustChain +========================================== + +Monitors a local Warthog (WART) node and/or BzMiner process, +assembles proof payloads for RustChain attestation bonus. + +Warthog uses Janushash: J(h) = Verushash^1.0 * SHA256t^0.7 + - CPU+GPU hybrid PoW algorithm requiring modern GPU + - Target: modern/semi-modern machines WITH GPUs + - Vintage hardware (G4, G5, retro) can't run Janushash GPUs + - Dual-miners get a slight RTC bonus on their modern base weight + +Bonus Tiers (modest — doesn't overtake vintage antiquity bonuses): + 1.0x No Warthog (default, existing miners unchanged) + 1.1x Pool mining (pool API confirms hashrate + shares) + 1.15x Own Warthog node (localhost:3000 reachable + balance growing) +""" + +import time +import json +import subprocess +import re +import os + +try: + import requests +except ImportError: + requests = None + +# Known Warthog mining pools +KNOWN_POOLS = { + "acc-pool": "https://acc-pool.pw/api", + "woolypooly": "https://api.woolypooly.com/api/wart-", + "herominers": "https://warthog.herominers.com/api", +} + + +class WarthogSidecar: + """ + Sidecar monitor for Warthog dual-mining alongside RustChain. + + Detects: + - Local Warthog node (JSON-RPC at localhost:3000) + - BzMiner GPU miner process + - Pool mining stats (acc-pool, woolypooly, herominers) + + Assembles proof payload for RustChain attestation. + """ + + def __init__(self, wart_address, node_url="http://localhost:3000", + pool_url=None, bzminer_path=None, manage_bzminer=False): + """ + Args: + wart_address: Warthog wallet address (wart1q...) + node_url: Local Warthog node URL + pool_url: Mining pool API URL (optional) + bzminer_path: Path to BzMiner binary (optional) + manage_bzminer: If True, start/stop BzMiner subprocess + """ + self.wart_address = wart_address + self.node_url = node_url.rstrip('/') + self.pool_url = pool_url + self.bzminer_path = bzminer_path + self.manage_bzminer = manage_bzminer + self._bzminer_proc = None + self._last_node_height = None + self._last_balance = None + + print(f"[WARTHOG] Sidecar initialized") + print(f" Address: {self.wart_address}") + print(f" Node: {self.node_url}") + if self.pool_url: + print(f" Pool: {self.pool_url}") + + def detect_warthog_node(self): + """ + Probe local Warthog node for chain state. + + Returns: + dict with node info or None if unreachable + """ + if not requests: + return None + + try: + # Query chain head + resp = requests.get( + f"{self.node_url}/chain/head", + timeout=5 + ) + if resp.status_code != 200: + return None + + head = resp.json() + height = head.get("height") or head.get("pinHeight") or head.get("length") + block_hash = head.get("hash", head.get("pinHash", "")) + + # Query node info for difficulty/version + difficulty = 0.0 + synced = True + try: + info_resp = requests.get(f"{self.node_url}/tools/info", timeout=5) + if info_resp.status_code == 200: + info = info_resp.json() + difficulty = info.get("difficulty", 0.0) + synced = info.get("synced", True) + except Exception: + pass + + node_info = { + "height": height, + "hash": str(block_hash)[:64], + "difficulty": difficulty, + "synced": synced, + } + + self._last_node_height = height + return node_info + + except (requests.ConnectionError, requests.Timeout): + return None + except Exception as e: + print(f"[WARTHOG] Node probe error: {e}") + return None + + def check_warthog_balance(self): + """ + Query Warthog node for wallet balance. + + Returns: + Balance as string (e.g. "123.45678901") or None + """ + if not requests or not self.wart_address: + return None + + try: + resp = requests.get( + f"{self.node_url}/account/{self.wart_address}/balance", + timeout=5 + ) + if resp.status_code == 200: + data = resp.json() + balance = data.get("balance", data.get("amount", "0")) + self._last_balance = str(balance) + return self._last_balance + except Exception: + pass + + return None + + def detect_bzminer_process(self): + """ + Scan for running BzMiner process. + + Returns: + dict with PID, uptime, hashrate or None + """ + try: + result = subprocess.run( + ["ps", "aux"], + capture_output=True, text=True, timeout=5 + ) + for line in result.stdout.splitlines(): + if "bzminer" in line.lower() and "grep" not in line.lower(): + parts = line.split() + pid = int(parts[1]) + + # Get process uptime from /proc + uptime_s = 0 + try: + stat = os.stat(f"/proc/{pid}") + uptime_s = int(time.time() - stat.st_mtime) + except Exception: + pass + + return { + "pid": pid, + "uptime_s": uptime_s, + "cmdline": " ".join(parts[10:])[:200], + } + except Exception: + pass + + return None + + def query_pool_stats(self): + """ + Query mining pool API for miner stats. + + Returns: + dict with pool info or None + """ + if not requests or not self.pool_url or not self.wart_address: + return None + + try: + # Most pools use /miner/{address}/stats or similar + urls_to_try = [ + f"{self.pool_url}/miner/{self.wart_address}/stats", + f"{self.pool_url}/stats/miner/{self.wart_address}", + f"{self.pool_url}/workers/{self.wart_address}", + ] + + for url in urls_to_try: + try: + resp = requests.get(url, timeout=10) + if resp.status_code == 200: + data = resp.json() + return { + "url": self.pool_url, + "hashrate": data.get("hashrate", data.get("currentHashrate", 0)), + "shares": data.get("shares", data.get("validShares", 0)), + "workers": data.get("workers", data.get("activeWorkers", 0)), + "last_share_at": data.get("lastShare", data.get("lastShareAt", 0)), + } + except Exception: + continue + + except Exception as e: + print(f"[WARTHOG] Pool query error: {e}") + + return None + + def start_bzminer(self, pool_stratum=None, extra_args=None): + """ + Start BzMiner as subprocess (optional management). + + Args: + pool_stratum: Stratum URL for pool mining + extra_args: Additional BzMiner CLI arguments + """ + if not self.manage_bzminer or not self.bzminer_path: + return False + + if self._bzminer_proc and self._bzminer_proc.poll() is None: + print("[WARTHOG] BzMiner already running") + return True + + cmd = [self.bzminer_path] + if pool_stratum: + cmd.extend(["-p", pool_stratum]) + if self.wart_address: + cmd.extend(["-w", self.wart_address]) + if extra_args: + cmd.extend(extra_args) + + try: + self._bzminer_proc = subprocess.Popen( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + print(f"[WARTHOG] BzMiner started (PID {self._bzminer_proc.pid})") + return True + except Exception as e: + print(f"[WARTHOG] Failed to start BzMiner: {e}") + return False + + def stop_bzminer(self): + """Stop managed BzMiner subprocess.""" + if self._bzminer_proc and self._bzminer_proc.poll() is None: + self._bzminer_proc.terminate() + try: + self._bzminer_proc.wait(timeout=10) + except subprocess.TimeoutExpired: + self._bzminer_proc.kill() + print("[WARTHOG] BzMiner stopped") + self._bzminer_proc = None + + def determine_bonus_tier(self, node_info=None, pool_stats=None): + """ + Determine the Warthog dual-mining bonus tier. + + Returns: + (tier_float, proof_type_str) + 1.15 "own_node" - Running own Warthog node with balance + 1.1 "pool" - Pool mining with verified hashrate + 1.0 "none" - No Warthog detected + """ + # Tier 1.15: Own node running and synced with balance + if node_info and node_info.get("synced") and node_info.get("height"): + balance = self.check_warthog_balance() + if balance and float(balance) > 0: + return 1.15, "own_node" + + # Tier 1.1: Pool mining with active hashrate + if pool_stats and pool_stats.get("hashrate", 0) > 0: + return 1.1, "pool" + + # Tier 1.0: No Warthog activity detected + return 1.0, "none" + + def collect_proof(self): + """ + Assemble complete Warthog proof payload for RustChain attestation. + + Returns: + dict suitable for inclusion in attestation JSON + """ + node_info = self.detect_warthog_node() + bzminer_info = self.detect_bzminer_process() + pool_stats = self.query_pool_stats() + balance = self.check_warthog_balance() if node_info else None + + bonus_tier, proof_type = self.determine_bonus_tier(node_info, pool_stats) + + proof = { + "enabled": True, + "wart_address": self.wart_address, + "proof_type": proof_type, + "bonus_tier": bonus_tier, + "node": node_info, + "balance": balance, + "pool": pool_stats, + "bzminer": bzminer_info, + "collected_at": int(time.time()), + } + + # Log tier info + tier_label = {1.5: "OWN NODE", 1.3: "POOL", 1.0: "NONE"} + print(f"[WARTHOG] Proof collected: {tier_label.get(bonus_tier, '?')} ({bonus_tier}x)") + if node_info: + print(f" Node height: {node_info.get('height')}, synced: {node_info.get('synced')}") + if balance: + print(f" Balance: {balance} WART") + if bzminer_info: + print(f" BzMiner PID: {bzminer_info.get('pid')}, uptime: {bzminer_info.get('uptime_s')}s") + if pool_stats: + print(f" Pool hashrate: {pool_stats.get('hashrate')}") + + return proof + + +if __name__ == "__main__": + # Quick self-test + print("=" * 60) + print("Warthog Sidecar - Self Test") + print("=" * 60) + + sidecar = WarthogSidecar( + wart_address="wart1qtest_address_for_self_test", + node_url="http://localhost:3000", + ) + + print("\n--- Probing Warthog node ---") + node = sidecar.detect_warthog_node() + print(f"Node: {node}") + + print("\n--- Checking BzMiner ---") + bz = sidecar.detect_bzminer_process() + print(f"BzMiner: {bz}") + + print("\n--- Collecting proof ---") + proof = sidecar.collect_proof() + print(json.dumps(proof, indent=2)) diff --git a/miners/macos/intel/rustchain_mac_miner_v2.4.py b/miners/macos/intel/rustchain_mac_miner_v2.4.py index 669e35d1..eecbc490 100644 --- a/miners/macos/intel/rustchain_mac_miner_v2.4.py +++ b/miners/macos/intel/rustchain_mac_miner_v2.4.py @@ -1,504 +1,504 @@ -#!/usr/bin/env python3 -""" -RustChain Mac Universal Miner v2.4.0 -Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) -With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 -""" -import warnings -warnings.filterwarnings('ignore', message='Unverified HTTPS request') - -import os -import sys -import json -import time -import hashlib -import platform -import subprocess -import requests -import statistics -import re -from datetime import datetime - -# Import fingerprint checks -try: - from fingerprint_checks import validate_all_checks - FINGERPRINT_AVAILABLE = True -except ImportError: - FINGERPRINT_AVAILABLE = False - print("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled") - -NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://rustchain.org") -BLOCK_TIME = 600 # 10 minutes -LOTTERY_CHECK_INTERVAL = 10 # Check every 10 seconds - -def get_mac_serial(): - """Get hardware serial number for macOS systems""" - try: - # Method 1: system_profiler - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Serial Number' in line: - return line.split(':')[1].strip() - except: - pass - - try: - # Method 2: ioreg - result = subprocess.run( - ['ioreg', '-l'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'IOPlatformSerialNumber' in line: - return line.split('"')[-2] - except: - pass - - try: - # Method 3: Hardware UUID fallback - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Hardware UUID' in line: - return line.split(':')[1].strip()[:16] - except: - pass - - return None - - -def detect_hardware(): - """Auto-detect Mac hardware architecture""" - machine = platform.machine().lower() - system = platform.system().lower() - - hw_info = { - "family": "unknown", - "arch": "unknown", - "model": "Mac", - "cpu": "unknown", - "cores": os.cpu_count() or 1, - "memory_gb": 4, - "hostname": platform.node(), - "mac": "00:00:00:00:00:00", - "macs": [], - "serial": get_mac_serial() - } - - # Get MAC addresses - try: - result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) - macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) - hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] - hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" - except: - pass - - # Get memory - try: - result = subprocess.run(['sysctl', '-n', 'hw.memsize'], - capture_output=True, text=True, timeout=5) - hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) - except: - pass - - # Apple Silicon Detection (M1/M2/M3) - if machine == 'arm64': - hw_info["family"] = "Apple Silicon" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - brand = result.stdout.strip() - hw_info["cpu"] = brand - - if 'M3' in brand: - hw_info["arch"] = "M3" - elif 'M2' in brand: - hw_info["arch"] = "M2" - elif 'M1' in brand: - hw_info["arch"] = "M1" - else: - hw_info["arch"] = "apple_silicon" - except: - hw_info["arch"] = "apple_silicon" - hw_info["cpu"] = "Apple Silicon" - - # Intel Mac Detection - elif machine == 'x86_64': - hw_info["family"] = "x86_64" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - hw_info["cpu"] = result.stdout.strip() - - if 'core 2' in hw_info["cpu"].lower(): - hw_info["arch"] = "Core2" - else: - hw_info["arch"] = "modern" - except: - hw_info["arch"] = "modern" - hw_info["cpu"] = "Intel Mac" - - # PowerPC Detection (for old Macs) - elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64'): - hw_info["family"] = "PowerPC" - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - output = result.stdout.lower() - - if 'g5' in output or 'powermac11' in output: - hw_info["arch"] = "G5" - hw_info["cpu"] = "PowerPC G5" - elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - elif 'g3' in output: - hw_info["arch"] = "G3" - hw_info["cpu"] = "PowerPC G3" - else: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC" - except: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - - # Get model name - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - for line in result.stdout.split('\n'): - if 'Model Name' in line or 'Model Identifier' in line: - hw_info["model"] = line.split(':')[1].strip() - break - except: - pass - - return hw_info - - -def collect_entropy(cycles=48, inner_loop=25000): - """Collect timing entropy for hardware attestation""" - samples = [] - for _ in range(cycles): - start = time.perf_counter_ns() - acc = 0 - for j in range(inner_loop): - acc ^= (j * 31) & 0xFFFFFFFF - duration = time.perf_counter_ns() - start - samples.append(duration) - - mean_ns = sum(samples) / len(samples) - variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 - - return { - "mean_ns": mean_ns, - "variance_ns": variance_ns, - "min_ns": min(samples), - "max_ns": max(samples), - "sample_count": len(samples), - "samples_preview": samples[:12], - } - - -class MacMiner: - def __init__(self, miner_id=None, wallet=None): - self.node_url = NODE_URL - self.hw_info = detect_hardware() - self.fingerprint_data = {} - self.fingerprint_passed = False - - # Generate miner_id from hardware - if miner_id: - self.miner_id = miner_id - else: - hw_hash = hashlib.sha256( - f"{self.hw_info['hostname']}-{self.hw_info['serial'] or 'unknown'}".encode() - ).hexdigest()[:8] - arch = self.hw_info['arch'].lower().replace(' ', '_') - self.miner_id = f"{arch}-{self.hw_info['hostname'][:10]}-{hw_hash}" - - # Generate wallet address - if wallet: - self.wallet = wallet - else: - wallet_hash = hashlib.sha256(f"{self.miner_id}-rustchain".encode()).hexdigest()[:38] - self.wallet = f"{self.hw_info['family'].lower().replace(' ', '_')}_{wallet_hash}RTC" - - self.attestation_valid_until = 0 - self.shares_submitted = 0 - self.shares_accepted = 0 - self.last_entropy = {} - - self._print_banner() - - # Run initial fingerprint check - if FINGERPRINT_AVAILABLE: - self._run_fingerprint_checks() - - def _run_fingerprint_checks(self): - """Run hardware fingerprint checks for RIP-PoA""" - print("\n[FINGERPRINT] Running hardware fingerprint checks...") - try: - passed, results = validate_all_checks() - self.fingerprint_passed = passed - self.fingerprint_data = {"checks": results, "all_passed": passed} - if passed: - print("[FINGERPRINT] All checks PASSED - eligible for full rewards") - else: - failed = [k for k, v in results.items() if not v.get("passed")] - print(f"[FINGERPRINT] FAILED checks: {failed}") - print("[FINGERPRINT] WARNING: May receive reduced/zero rewards") - except Exception as e: - print(f"[FINGERPRINT] Error running checks: {e}") - self.fingerprint_passed = False - self.fingerprint_data = {"error": str(e), "all_passed": False} - - def _print_banner(self): - print("=" * 70) - print("RustChain Mac Miner v2.4.0 - Serial Binding + Fingerprint") - print("=" * 70) - print(f"Miner ID: {self.miner_id}") - print(f"Wallet: {self.wallet}") - print(f"Node: {self.node_url}") - print(f"Serial: {self.hw_info.get('serial', 'N/A')}") - print("-" * 70) - print(f"Hardware: {self.hw_info['family']} / {self.hw_info['arch']}") - print(f"Model: {self.hw_info['model']}") - print(f"CPU: {self.hw_info['cpu']}") - print(f"Cores: {self.hw_info['cores']}") - print(f"Memory: {self.hw_info['memory_gb']} GB") - print("-" * 70) - weight = self._get_expected_weight() - print(f"Expected Weight: {weight}x (Proof of Antiquity)") - print("=" * 70) - - def _get_expected_weight(self): - """Calculate expected PoA weight""" - arch = self.hw_info['arch'].lower() - family = self.hw_info['family'].lower() - - if family == 'powerpc': - if arch == 'g3': return 3.0 - if arch == 'g4': return 2.5 - if arch == 'g5': return 2.0 - elif 'apple' in family or 'silicon' in family: - if arch in ('m1', 'm2', 'm3', 'apple_silicon'): return 1.2 - elif family == 'x86_64': - if arch == 'core2': return 1.5 - return 1.0 - - return 1.0 - - def attest(self): - """Complete hardware attestation with fingerprint""" - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting hardware...") - - try: - # Step 1: Get challenge - resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=15, verify=False) - if resp.status_code != 200: - print(f" ERROR: Challenge failed ({resp.status_code})") - return False - - challenge = resp.json() - nonce = challenge.get("nonce", "") - print(f" Got challenge nonce: {nonce[:16]}...") - - except Exception as e: - print(f" ERROR: Challenge error: {e}") - return False - - # Collect entropy - entropy = collect_entropy() - self.last_entropy = entropy - - # Re-run fingerprint checks if needed - if FINGERPRINT_AVAILABLE and not self.fingerprint_data: - self._run_fingerprint_checks() - - # Build attestation payload - commitment = hashlib.sha256( - (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() - ).hexdigest() - - attestation = { - "miner": self.wallet, - "miner_id": self.miner_id, - "nonce": nonce, - "report": { - "nonce": nonce, - "commitment": commitment, - "derived": entropy, - "entropy_score": entropy.get("variance_ns", 0.0) - }, - "device": { - "family": self.hw_info["family"], - "arch": self.hw_info["arch"], - "model": self.hw_info["model"], - "cpu": self.hw_info["cpu"], - "cores": self.hw_info["cores"], - "memory_gb": self.hw_info["memory_gb"], - "serial": self.hw_info.get("serial") # Hardware serial for v2 binding - }, - "signals": { - "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), - "hostname": self.hw_info["hostname"] - }, - # RIP-PoA hardware fingerprint attestation - "fingerprint": self.fingerprint_data - } - - try: - resp = requests.post(f"{self.node_url}/attest/submit", - json=attestation, timeout=30, verify=False) - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.attestation_valid_until = time.time() + 580 - print(f" SUCCESS: Attestation accepted!") - - # Show fingerprint status - if self.fingerprint_passed: - print(f" Fingerprint: PASSED") - else: - print(f" Fingerprint: FAILED (reduced rewards)") - return True - else: - print(f" WARNING: {result}") - return False - else: - print(f" ERROR: HTTP {resp.status_code}: {resp.text[:200]}") - return False - - except Exception as e: - print(f" ERROR: {e}") - return False - - def check_eligibility(self): - """Check lottery eligibility""" - try: - resp = requests.get( - f"{self.node_url}/lottery/eligibility", - params={"miner_id": self.miner_id}, - timeout=10, - verify=False - ) - if resp.status_code == 200: - return resp.json() - return {"eligible": False, "reason": f"HTTP {resp.status_code}"} - except Exception as e: - return {"eligible": False, "reason": str(e)} - - def submit_header(self, slot): - """Submit header for slot""" - try: - message = f"slot:{slot}:miner:{self.miner_id}:ts:{int(time.time())}" - message_hex = message.encode().hex() - sig_data = hashlib.sha512(f"{message}{self.wallet}".encode()).hexdigest() - - header_payload = { - "miner_id": self.miner_id, - "header": { - "slot": slot, - "miner": self.miner_id, - "timestamp": int(time.time()) - }, - "message": message_hex, - "signature": sig_data, - "pubkey": self.wallet - } - - resp = requests.post( - f"{self.node_url}/headers/ingest_signed", - json=header_payload, - timeout=15, - verify=False - ) - - self.shares_submitted += 1 - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.shares_accepted += 1 - return True, result - return False, result - return False, {"error": f"HTTP {resp.status_code}"} - - except Exception as e: - return False, {"error": str(e)} - - def run(self): - """Main mining loop""" - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Starting miner...") - - # Initial attestation - while not self.attest(): - print(" Retrying attestation in 30 seconds...") - time.sleep(30) - - last_slot = 0 - - while True: - try: - # Re-attest if needed - if time.time() > self.attestation_valid_until: - self.attest() - - # Check eligibility - eligibility = self.check_eligibility() - slot = eligibility.get("slot", 0) - - if eligibility.get("eligible"): - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] ELIGIBLE for slot {slot}!") - - if slot != last_slot: - success, result = self.submit_header(slot) - if success: - print(f" Header ACCEPTED! Slot {slot}") - else: - print(f" Header rejected: {result}") - last_slot = slot - else: - reason = eligibility.get("reason", "unknown") - if reason == "not_attested": - print(f"[{datetime.now().strftime('%H:%M:%S')}] Not attested - re-attesting...") - self.attest() - - # Status every 60 seconds - if int(time.time()) % 60 == 0: - print(f"[{datetime.now().strftime('%H:%M:%S')}] Slot {slot} | " - f"Submitted: {self.shares_submitted} | " - f"Accepted: {self.shares_accepted}") - - time.sleep(LOTTERY_CHECK_INTERVAL) - - except KeyboardInterrupt: - print("\n\nShutting down miner...") - break - except Exception as e: - print(f"[{datetime.now().strftime('%H:%M:%S')}] Error: {e}") - time.sleep(30) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser(description="RustChain Mac Miner v2.4.0") - parser.add_argument("--miner-id", "-m", help="Custom miner ID") - parser.add_argument("--wallet", "-w", help="Custom wallet address") - parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL") - args = parser.parse_args() - - if args.node: - NODE_URL = args.node - - miner = MacMiner(miner_id=args.miner_id, wallet=args.wallet) - miner.run() +#!/usr/bin/env python3 +""" +RustChain Mac Universal Miner v2.4.0 +Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) +With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 +""" +import warnings +warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +import os +import sys +import json +import time +import hashlib +import platform +import subprocess +import requests +import statistics +import re +from datetime import datetime + +# Import fingerprint checks +try: + from fingerprint_checks import validate_all_checks + FINGERPRINT_AVAILABLE = True +except ImportError: + FINGERPRINT_AVAILABLE = False + print("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled") + +NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://rustchain.org") +BLOCK_TIME = 600 # 10 minutes +LOTTERY_CHECK_INTERVAL = 10 # Check every 10 seconds + +def get_mac_serial(): + """Get hardware serial number for macOS systems""" + try: + # Method 1: system_profiler + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Serial Number' in line: + return line.split(':')[1].strip() + except: + pass + + try: + # Method 2: ioreg + result = subprocess.run( + ['ioreg', '-l'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'IOPlatformSerialNumber' in line: + return line.split('"')[-2] + except: + pass + + try: + # Method 3: Hardware UUID fallback + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Hardware UUID' in line: + return line.split(':')[1].strip()[:16] + except: + pass + + return None + + +def detect_hardware(): + """Auto-detect Mac hardware architecture""" + machine = platform.machine().lower() + system = platform.system().lower() + + hw_info = { + "family": "unknown", + "arch": "unknown", + "model": "Mac", + "cpu": "unknown", + "cores": os.cpu_count() or 1, + "memory_gb": 4, + "hostname": platform.node(), + "mac": "00:00:00:00:00:00", + "macs": [], + "serial": get_mac_serial() + } + + # Get MAC addresses + try: + result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) + macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) + hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] + hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" + except: + pass + + # Get memory + try: + result = subprocess.run(['sysctl', '-n', 'hw.memsize'], + capture_output=True, text=True, timeout=5) + hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) + except: + pass + + # Apple Silicon Detection (M1/M2/M3) + if machine == 'arm64': + hw_info["family"] = "Apple Silicon" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + brand = result.stdout.strip() + hw_info["cpu"] = brand + + if 'M3' in brand: + hw_info["arch"] = "M3" + elif 'M2' in brand: + hw_info["arch"] = "M2" + elif 'M1' in brand: + hw_info["arch"] = "M1" + else: + hw_info["arch"] = "apple_silicon" + except: + hw_info["arch"] = "apple_silicon" + hw_info["cpu"] = "Apple Silicon" + + # Intel Mac Detection + elif machine == 'x86_64': + hw_info["family"] = "x86_64" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + hw_info["cpu"] = result.stdout.strip() + + if 'core 2' in hw_info["cpu"].lower(): + hw_info["arch"] = "Core2" + else: + hw_info["arch"] = "modern" + except: + hw_info["arch"] = "modern" + hw_info["cpu"] = "Intel Mac" + + # PowerPC Detection (for old Macs) + elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64'): + hw_info["family"] = "PowerPC" + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + output = result.stdout.lower() + + if 'g5' in output or 'powermac11' in output: + hw_info["arch"] = "G5" + hw_info["cpu"] = "PowerPC G5" + elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + elif 'g3' in output: + hw_info["arch"] = "G3" + hw_info["cpu"] = "PowerPC G3" + else: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC" + except: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + + # Get model name + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + for line in result.stdout.split('\n'): + if 'Model Name' in line or 'Model Identifier' in line: + hw_info["model"] = line.split(':')[1].strip() + break + except: + pass + + return hw_info + + +def collect_entropy(cycles=48, inner_loop=25000): + """Collect timing entropy for hardware attestation""" + samples = [] + for _ in range(cycles): + start = time.perf_counter_ns() + acc = 0 + for j in range(inner_loop): + acc ^= (j * 31) & 0xFFFFFFFF + duration = time.perf_counter_ns() - start + samples.append(duration) + + mean_ns = sum(samples) / len(samples) + variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 + + return { + "mean_ns": mean_ns, + "variance_ns": variance_ns, + "min_ns": min(samples), + "max_ns": max(samples), + "sample_count": len(samples), + "samples_preview": samples[:12], + } + + +class MacMiner: + def __init__(self, miner_id=None, wallet=None): + self.node_url = NODE_URL + self.hw_info = detect_hardware() + self.fingerprint_data = {} + self.fingerprint_passed = False + + # Generate miner_id from hardware + if miner_id: + self.miner_id = miner_id + else: + hw_hash = hashlib.sha256( + f"{self.hw_info['hostname']}-{self.hw_info['serial'] or 'unknown'}".encode() + ).hexdigest()[:8] + arch = self.hw_info['arch'].lower().replace(' ', '_') + self.miner_id = f"{arch}-{self.hw_info['hostname'][:10]}-{hw_hash}" + + # Generate wallet address + if wallet: + self.wallet = wallet + else: + wallet_hash = hashlib.sha256(f"{self.miner_id}-rustchain".encode()).hexdigest()[:38] + self.wallet = f"{self.hw_info['family'].lower().replace(' ', '_')}_{wallet_hash}RTC" + + self.attestation_valid_until = 0 + self.shares_submitted = 0 + self.shares_accepted = 0 + self.last_entropy = {} + + self._print_banner() + + # Run initial fingerprint check + if FINGERPRINT_AVAILABLE: + self._run_fingerprint_checks() + + def _run_fingerprint_checks(self): + """Run hardware fingerprint checks for RIP-PoA""" + print("\n[FINGERPRINT] Running hardware fingerprint checks...") + try: + passed, results = validate_all_checks() + self.fingerprint_passed = passed + self.fingerprint_data = {"checks": results, "all_passed": passed} + if passed: + print("[FINGERPRINT] All checks PASSED - eligible for full rewards") + else: + failed = [k for k, v in results.items() if not v.get("passed")] + print(f"[FINGERPRINT] FAILED checks: {failed}") + print("[FINGERPRINT] WARNING: May receive reduced/zero rewards") + except Exception as e: + print(f"[FINGERPRINT] Error running checks: {e}") + self.fingerprint_passed = False + self.fingerprint_data = {"error": str(e), "all_passed": False} + + def _print_banner(self): + print("=" * 70) + print("RustChain Mac Miner v2.4.0 - Serial Binding + Fingerprint") + print("=" * 70) + print(f"Miner ID: {self.miner_id}") + print(f"Wallet: {self.wallet}") + print(f"Node: {self.node_url}") + print(f"Serial: {self.hw_info.get('serial', 'N/A')}") + print("-" * 70) + print(f"Hardware: {self.hw_info['family']} / {self.hw_info['arch']}") + print(f"Model: {self.hw_info['model']}") + print(f"CPU: {self.hw_info['cpu']}") + print(f"Cores: {self.hw_info['cores']}") + print(f"Memory: {self.hw_info['memory_gb']} GB") + print("-" * 70) + weight = self._get_expected_weight() + print(f"Expected Weight: {weight}x (Proof of Antiquity)") + print("=" * 70) + + def _get_expected_weight(self): + """Calculate expected PoA weight""" + arch = self.hw_info['arch'].lower() + family = self.hw_info['family'].lower() + + if family == 'powerpc': + if arch == 'g3': return 3.0 + if arch == 'g4': return 2.5 + if arch == 'g5': return 2.0 + elif 'apple' in family or 'silicon' in family: + if arch in ('m1', 'm2', 'm3', 'apple_silicon'): return 1.2 + elif family == 'x86_64': + if arch == 'core2': return 1.5 + return 1.0 + + return 1.0 + + def attest(self): + """Complete hardware attestation with fingerprint""" + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting hardware...") + + try: + # Step 1: Get challenge + resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=15, verify=False) + if resp.status_code != 200: + print(f" ERROR: Challenge failed ({resp.status_code})") + return False + + challenge = resp.json() + nonce = challenge.get("nonce", "") + print(f" Got challenge nonce: {nonce[:16]}...") + + except Exception as e: + print(f" ERROR: Challenge error: {e}") + return False + + # Collect entropy + entropy = collect_entropy() + self.last_entropy = entropy + + # Re-run fingerprint checks if needed + if FINGERPRINT_AVAILABLE and not self.fingerprint_data: + self._run_fingerprint_checks() + + # Build attestation payload + commitment = hashlib.sha256( + (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() + ).hexdigest() + + attestation = { + "miner": self.wallet, + "miner_id": self.miner_id, + "nonce": nonce, + "report": { + "nonce": nonce, + "commitment": commitment, + "derived": entropy, + "entropy_score": entropy.get("variance_ns", 0.0) + }, + "device": { + "family": self.hw_info["family"], + "arch": self.hw_info["arch"], + "model": self.hw_info["model"], + "cpu": self.hw_info["cpu"], + "cores": self.hw_info["cores"], + "memory_gb": self.hw_info["memory_gb"], + "serial": self.hw_info.get("serial") # Hardware serial for v2 binding + }, + "signals": { + "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), + "hostname": self.hw_info["hostname"] + }, + # RIP-PoA hardware fingerprint attestation + "fingerprint": self.fingerprint_data + } + + try: + resp = requests.post(f"{self.node_url}/attest/submit", + json=attestation, timeout=30, verify=False) + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.attestation_valid_until = time.time() + 580 + print(f" SUCCESS: Attestation accepted!") + + # Show fingerprint status + if self.fingerprint_passed: + print(f" Fingerprint: PASSED") + else: + print(f" Fingerprint: FAILED (reduced rewards)") + return True + else: + print(f" WARNING: {result}") + return False + else: + print(f" ERROR: HTTP {resp.status_code}: {resp.text[:200]}") + return False + + except Exception as e: + print(f" ERROR: {e}") + return False + + def check_eligibility(self): + """Check lottery eligibility""" + try: + resp = requests.get( + f"{self.node_url}/lottery/eligibility", + params={"miner_id": self.miner_id}, + timeout=10, + verify=False + ) + if resp.status_code == 200: + return resp.json() + return {"eligible": False, "reason": f"HTTP {resp.status_code}"} + except Exception as e: + return {"eligible": False, "reason": str(e)} + + def submit_header(self, slot): + """Submit header for slot""" + try: + message = f"slot:{slot}:miner:{self.miner_id}:ts:{int(time.time())}" + message_hex = message.encode().hex() + sig_data = hashlib.sha512(f"{message}{self.wallet}".encode()).hexdigest() + + header_payload = { + "miner_id": self.miner_id, + "header": { + "slot": slot, + "miner": self.miner_id, + "timestamp": int(time.time()) + }, + "message": message_hex, + "signature": sig_data, + "pubkey": self.wallet + } + + resp = requests.post( + f"{self.node_url}/headers/ingest_signed", + json=header_payload, + timeout=15, + verify=False + ) + + self.shares_submitted += 1 + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.shares_accepted += 1 + return True, result + return False, result + return False, {"error": f"HTTP {resp.status_code}"} + + except Exception as e: + return False, {"error": str(e)} + + def run(self): + """Main mining loop""" + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Starting miner...") + + # Initial attestation + while not self.attest(): + print(" Retrying attestation in 30 seconds...") + time.sleep(30) + + last_slot = 0 + + while True: + try: + # Re-attest if needed + if time.time() > self.attestation_valid_until: + self.attest() + + # Check eligibility + eligibility = self.check_eligibility() + slot = eligibility.get("slot", 0) + + if eligibility.get("eligible"): + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] ELIGIBLE for slot {slot}!") + + if slot != last_slot: + success, result = self.submit_header(slot) + if success: + print(f" Header ACCEPTED! Slot {slot}") + else: + print(f" Header rejected: {result}") + last_slot = slot + else: + reason = eligibility.get("reason", "unknown") + if reason == "not_attested": + print(f"[{datetime.now().strftime('%H:%M:%S')}] Not attested - re-attesting...") + self.attest() + + # Status every 60 seconds + if int(time.time()) % 60 == 0: + print(f"[{datetime.now().strftime('%H:%M:%S')}] Slot {slot} | " + f"Submitted: {self.shares_submitted} | " + f"Accepted: {self.shares_accepted}") + + time.sleep(LOTTERY_CHECK_INTERVAL) + + except KeyboardInterrupt: + print("\n\nShutting down miner...") + break + except Exception as e: + print(f"[{datetime.now().strftime('%H:%M:%S')}] Error: {e}") + time.sleep(30) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="RustChain Mac Miner v2.4.0") + parser.add_argument("--miner-id", "-m", help="Custom miner ID") + parser.add_argument("--wallet", "-w", help="Custom wallet address") + parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL") + args = parser.parse_args() + + if args.node: + NODE_URL = args.node + + miner = MacMiner(miner_id=args.miner_id, wallet=args.wallet) + miner.run() diff --git a/miners/macos/rustchain_mac_miner_v2.4.py b/miners/macos/rustchain_mac_miner_v2.4.py index 68b850ec..7c84bae7 100644 --- a/miners/macos/rustchain_mac_miner_v2.4.py +++ b/miners/macos/rustchain_mac_miner_v2.4.py @@ -1,538 +1,538 @@ -#!/usr/bin/env python3 -""" -RustChain Mac Universal Miner v2.4.0 -Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) -With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 -""" -import warnings -warnings.filterwarnings('ignore', message='Unverified HTTPS request') - -import os -import sys -import json -import time -import hashlib -import platform -import subprocess -import requests -import statistics -import re -from datetime import datetime - -# Import fingerprint checks -try: - from fingerprint_checks import validate_all_checks - FINGERPRINT_AVAILABLE = True -except ImportError: - FINGERPRINT_AVAILABLE = False - print(warning("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled")) - -# Import CPU architecture detection -try: - from cpu_architecture_detection import detect_cpu_architecture, calculate_antiquity_multiplier - CPU_DETECTION_AVAILABLE = True -except ImportError: - CPU_DETECTION_AVAILABLE = False - print(info("[INFO] cpu_architecture_detection.py not found - using basic detection")) - -NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://rustchain.org") -BLOCK_TIME = 600 # 10 minutes -LOTTERY_CHECK_INTERVAL = 10 # Check every 10 seconds - -def get_mac_serial(): - """Get hardware serial number for macOS systems""" - try: - # Method 1: system_profiler - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Serial Number' in line: - return line.split(':')[1].strip() - except: - pass - - try: - # Method 2: ioreg - result = subprocess.run( - ['ioreg', '-l'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'IOPlatformSerialNumber' in line: - return line.split('"')[-2] - except: - pass - - try: - # Method 3: Hardware UUID fallback - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Hardware UUID' in line: - return line.split(':')[1].strip()[:16] - except: - pass - - return None - - -def detect_hardware(): - """Auto-detect Mac hardware architecture""" - machine = platform.machine().lower() - system = platform.system().lower() - - hw_info = { - "family": "unknown", - "arch": "unknown", - "model": "Mac", - "cpu": "unknown", - "cores": os.cpu_count() or 1, - "memory_gb": 4, - "hostname": platform.node(), - "mac": "00:00:00:00:00:00", - "macs": [], - "serial": get_mac_serial() - } - - # Get MAC addresses - try: - result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) - macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) - hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] - hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" - except: - pass - - # Get memory - try: - result = subprocess.run(['sysctl', '-n', 'hw.memsize'], - capture_output=True, text=True, timeout=5) - hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) - except: - pass - - # Apple Silicon Detection (M1/M2/M3) - if machine == 'arm64': - hw_info["family"] = "Apple Silicon" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - brand = result.stdout.strip() - hw_info["cpu"] = brand - - if 'M3' in brand: - hw_info["arch"] = "M3" - elif 'M2' in brand: - hw_info["arch"] = "M2" - elif 'M1' in brand: - hw_info["arch"] = "M1" - else: - hw_info["arch"] = "apple_silicon" - except: - hw_info["arch"] = "apple_silicon" - hw_info["cpu"] = "Apple Silicon" - - # Intel Mac Detection - elif machine == 'x86_64': - hw_info["family"] = "x86_64" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - cpu_brand = result.stdout.strip() - hw_info["cpu"] = cpu_brand - - # Use comprehensive CPU detection if available - if CPU_DETECTION_AVAILABLE: - cpu_info = calculate_antiquity_multiplier(cpu_brand) - hw_info["arch"] = cpu_info.architecture - hw_info["cpu_vendor"] = cpu_info.vendor - hw_info["cpu_year"] = cpu_info.microarch_year - hw_info["cpu_generation"] = cpu_info.generation - hw_info["is_server"] = cpu_info.is_server - print(f"[CPU] Detected: {cpu_info.generation} ({cpu_info.architecture}, {cpu_info.microarch_year})") - else: - # Fallback: Basic detection for retro Intel architectures - cpu_lower = cpu_brand.lower() - if 'core 2' in cpu_lower or 'core(tm)2' in cpu_lower: - hw_info["arch"] = "core2" # 1.3x - elif 'xeon' in cpu_lower and ('e5-16' in cpu_lower or 'e5-26' in cpu_lower): - hw_info["arch"] = "ivy_bridge" # Xeon E5 v2 = Ivy Bridge-E - elif 'i7-3' in cpu_lower or 'i5-3' in cpu_lower or 'i3-3' in cpu_lower: - hw_info["arch"] = "ivy_bridge" - elif 'i7-2' in cpu_lower or 'i5-2' in cpu_lower or 'i3-2' in cpu_lower: - hw_info["arch"] = "sandy_bridge" - elif 'i7-9' in cpu_lower and '900' in cpu_lower: - hw_info["arch"] = "nehalem" - elif 'i7-4' in cpu_lower or 'i5-4' in cpu_lower: - hw_info["arch"] = "haswell" - elif 'pentium' in cpu_lower: - hw_info["arch"] = "pentium4" - else: - hw_info["arch"] = "modern" - except: - hw_info["arch"] = "modern" - hw_info["cpu"] = "Intel Mac" - - # PowerPC Detection (for old Macs) - elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64'): - hw_info["family"] = "PowerPC" - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - output = result.stdout.lower() - - if 'g5' in output or 'powermac11' in output: - hw_info["arch"] = "G5" - hw_info["cpu"] = "PowerPC G5" - elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - elif 'g3' in output: - hw_info["arch"] = "G3" - hw_info["cpu"] = "PowerPC G3" - else: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC" - except: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - - # Get model name - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - for line in result.stdout.split('\n'): - if 'Model Name' in line or 'Model Identifier' in line: - hw_info["model"] = line.split(':')[1].strip() - break - except: - pass - - return hw_info - - -def collect_entropy(cycles=48, inner_loop=25000): - """Collect timing entropy for hardware attestation""" - samples = [] - for _ in range(cycles): - start = time.perf_counter_ns() - acc = 0 - for j in range(inner_loop): - acc ^= (j * 31) & 0xFFFFFFFF - duration = time.perf_counter_ns() - start - samples.append(duration) - - mean_ns = sum(samples) / len(samples) - variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 - - return { - "mean_ns": mean_ns, - "variance_ns": variance_ns, - "min_ns": min(samples), - "max_ns": max(samples), - "sample_count": len(samples), - "samples_preview": samples[:12], - } - - -class MacMiner: - def __init__(self, miner_id=None, wallet=None): - self.node_url = NODE_URL - self.hw_info = detect_hardware() - self.fingerprint_data = {} - self.fingerprint_passed = False - - # Generate miner_id from hardware - if miner_id: - self.miner_id = miner_id - else: - hw_hash = hashlib.sha256( - f"{self.hw_info['hostname']}-{self.hw_info['serial'] or 'unknown'}".encode() - ).hexdigest()[:8] - arch = self.hw_info['arch'].lower().replace(' ', '_') - self.miner_id = f"{arch}-{self.hw_info['hostname'][:10]}-{hw_hash}" - - # Generate wallet address - if wallet: - self.wallet = wallet - else: - wallet_hash = hashlib.sha256(f"{self.miner_id}-rustchain".encode()).hexdigest()[:38] - self.wallet = f"{self.hw_info['family'].lower().replace(' ', '_')}_{wallet_hash}RTC" - - self.attestation_valid_until = 0 - self.shares_submitted = 0 - self.shares_accepted = 0 - self.last_entropy = {} - - self._print_banner() - - # Run initial fingerprint check - if FINGERPRINT_AVAILABLE: - self._run_fingerprint_checks() - - def _run_fingerprint_checks(self): - """Run hardware fingerprint checks for RIP-PoA""" - print(info("\n[FINGERPRINT] Running hardware fingerprint checks...")) - try: - passed, results = validate_all_checks() - self.fingerprint_passed = passed - self.fingerprint_data = {"checks": results, "all_passed": passed} - if passed: - print(success("[FINGERPRINT] All checks PASSED - eligible for full rewards")) - else: - failed = [k for k, v in results.items() if not v.get("passed")] - print(warning(f"[FINGERPRINT] FAILED checks: {failed}")) - print(warning("[FINGERPRINT] WARNING: May receive reduced/zero rewards")) - except Exception as e: - print(error(f"[FINGERPRINT] Error running checks: {e}")) - self.fingerprint_passed = False - self.fingerprint_data = {"error": str(e), "all_passed": False} - - def _print_banner(self): - print("=" * 70) - print("RustChain Mac Miner v2.4.0 - Serial Binding + Fingerprint") - print("=" * 70) - print(f"Miner ID: {self.miner_id}") - print(f"Wallet: {self.wallet}") - print(f"Node: {self.node_url}") - print(f"Serial: {self.hw_info.get('serial', 'N/A')}") - print("-" * 70) - print(f"Hardware: {self.hw_info['family']} / {self.hw_info['arch']}") - print(f"Model: {self.hw_info['model']}") - print(f"CPU: {self.hw_info['cpu']}") - print(f"Cores: {self.hw_info['cores']}") - print(f"Memory: {self.hw_info['memory_gb']} GB") - print("-" * 70) - weight = self._get_expected_weight() - print(f"Expected Weight: {weight}x (Proof of Antiquity)") - print("=" * 70) - - def _get_expected_weight(self): - """Calculate expected PoA weight""" - arch = self.hw_info['arch'].lower() - family = self.hw_info['family'].lower() - - if family == 'powerpc': - if arch == 'g3': return 3.0 - if arch == 'g4': return 2.5 - if arch == 'g5': return 2.0 - elif 'apple' in family or 'silicon' in family: - if arch in ('m1', 'm2', 'm3', 'apple_silicon'): return 1.2 - elif family == 'x86_64': - if arch == 'core2': return 1.5 - return 1.0 - - return 1.0 - - def attest(self): - """Complete hardware attestation with fingerprint""" - print(info(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting hardware...")) - - try: - # Step 1: Get challenge - resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=15, verify=False) - if resp.status_code != 200: - print(error(f" ERROR: Challenge failed ({resp.status_code})")) - return False - - challenge = resp.json() - nonce = challenge.get("nonce", "") - print(success(f" Got challenge nonce: {nonce[:16]}...")) - - except Exception as e: - print(error(f" ERROR: Challenge error: {e}")) - return False - - # Collect entropy - entropy = collect_entropy() - self.last_entropy = entropy - - # Re-run fingerprint checks if needed - if FINGERPRINT_AVAILABLE and not self.fingerprint_data: - self._run_fingerprint_checks() - - # Build attestation payload - commitment = hashlib.sha256( - (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() - ).hexdigest() - - attestation = { - "miner": self.wallet, - "miner_id": self.miner_id, - "nonce": nonce, - "report": { - "nonce": nonce, - "commitment": commitment, - "derived": entropy, - "entropy_score": entropy.get("variance_ns", 0.0) - }, - "device": { - "family": self.hw_info["family"], - "arch": self.hw_info["arch"], - "model": self.hw_info["model"], - "cpu": self.hw_info["cpu"], - "cores": self.hw_info["cores"], - "memory_gb": self.hw_info["memory_gb"], - "serial": self.hw_info.get("serial") # Hardware serial for v2 binding - }, - "signals": { - "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), - "hostname": self.hw_info["hostname"] - }, - # RIP-PoA hardware fingerprint attestation - "fingerprint": self.fingerprint_data - } - - try: - resp = requests.post(f"{self.node_url}/attest/submit", - json=attestation, timeout=30, verify=False) - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.attestation_valid_until = time.time() + 580 - print(success(f" SUCCESS: Attestation accepted!")) - - # Show fingerprint status - if self.fingerprint_passed: - print(success(f" Fingerprint: PASSED")) - else: - print(warning(f" Fingerprint: FAILED (reduced rewards)")) - return True - else: - print(warning(f" WARNING: {result}")) - return False - else: - print(error(f" ERROR: HTTP {resp.status_code}: {resp.text[:200]}")) - return False - - except Exception as e: - print(error(f" ERROR: {e}")) - return False - - def check_eligibility(self): - """Check lottery eligibility""" - try: - resp = requests.get( - f"{self.node_url}/lottery/eligibility", - params={"miner_id": self.miner_id}, - timeout=10, - verify=False - ) - if resp.status_code == 200: - return resp.json() - return {"eligible": False, "reason": f"HTTP {resp.status_code}"} - except Exception as e: - return {"eligible": False, "reason": str(e)} - - def submit_header(self, slot): - """Submit header for slot""" - try: - message = f"slot:{slot}:miner:{self.miner_id}:ts:{int(time.time())}" - message_hex = message.encode().hex() - sig_data = hashlib.sha512(f"{message}{self.wallet}".encode()).hexdigest() - - header_payload = { - "miner_id": self.miner_id, - "header": { - "slot": slot, - "miner": self.miner_id, - "timestamp": int(time.time()) - }, - "message": message_hex, - "signature": sig_data, - "pubkey": self.wallet - } - - resp = requests.post( - f"{self.node_url}/headers/ingest_signed", - json=header_payload, - timeout=15, - verify=False - ) - - self.shares_submitted += 1 - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.shares_accepted += 1 - return True, result - return False, result - return False, {"error": f"HTTP {resp.status_code}"} - - except Exception as e: - return False, {"error": str(e)} - - def run(self): - """Main mining loop""" - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Starting miner...") - - # Initial attestation - while not self.attest(): - print(" Retrying attestation in 30 seconds...") - time.sleep(30) - - last_slot = 0 - - while True: - try: - # Re-attest if needed - if time.time() > self.attestation_valid_until: - self.attest() - - # Check eligibility - eligibility = self.check_eligibility() - slot = eligibility.get("slot", 0) - - if eligibility.get("eligible"): - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] ELIGIBLE for slot {slot}!") - - if slot != last_slot: - success, result = self.submit_header(slot) - if success: - print(f" Header ACCEPTED! Slot {slot}") - else: - print(f" Header rejected: {result}") - last_slot = slot - else: - reason = eligibility.get("reason", "unknown") - if reason == "not_attested": - print(f"[{datetime.now().strftime('%H:%M:%S')}] Not attested - re-attesting...") - self.attest() - - # Status every 60 seconds - if int(time.time()) % 60 == 0: - print(f"[{datetime.now().strftime('%H:%M:%S')}] Slot {slot} | " - f"Submitted: {self.shares_submitted} | " - f"Accepted: {self.shares_accepted}") - - time.sleep(LOTTERY_CHECK_INTERVAL) - - except KeyboardInterrupt: - print("\n\nShutting down miner...") - break - except Exception as e: - print(f"[{datetime.now().strftime('%H:%M:%S')}] Error: {e}") - time.sleep(30) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser(description="RustChain Mac Miner v2.4.0") - parser.add_argument("--version", "-v", action="version", version="RustChain Mac Miner v2.4.0") - parser.add_argument("--miner-id", "-m", help="Custom miner ID") - parser.add_argument("--wallet", "-w", help="Custom wallet address") - parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL") - args = parser.parse_args() - - if args.node: - NODE_URL = args.node - - miner = MacMiner(miner_id=args.miner_id, wallet=args.wallet) - miner.run() +#!/usr/bin/env python3 +""" +RustChain Mac Universal Miner v2.4.0 +Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) +With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 +""" +import warnings +warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +import os +import sys +import json +import time +import hashlib +import platform +import subprocess +import requests +import statistics +import re +from datetime import datetime + +# Import fingerprint checks +try: + from fingerprint_checks import validate_all_checks + FINGERPRINT_AVAILABLE = True +except ImportError: + FINGERPRINT_AVAILABLE = False + print(warning("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled")) + +# Import CPU architecture detection +try: + from cpu_architecture_detection import detect_cpu_architecture, calculate_antiquity_multiplier + CPU_DETECTION_AVAILABLE = True +except ImportError: + CPU_DETECTION_AVAILABLE = False + print(info("[INFO] cpu_architecture_detection.py not found - using basic detection")) + +NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://rustchain.org") +BLOCK_TIME = 600 # 10 minutes +LOTTERY_CHECK_INTERVAL = 10 # Check every 10 seconds + +def get_mac_serial(): + """Get hardware serial number for macOS systems""" + try: + # Method 1: system_profiler + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Serial Number' in line: + return line.split(':')[1].strip() + except: + pass + + try: + # Method 2: ioreg + result = subprocess.run( + ['ioreg', '-l'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'IOPlatformSerialNumber' in line: + return line.split('"')[-2] + except: + pass + + try: + # Method 3: Hardware UUID fallback + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Hardware UUID' in line: + return line.split(':')[1].strip()[:16] + except: + pass + + return None + + +def detect_hardware(): + """Auto-detect Mac hardware architecture""" + machine = platform.machine().lower() + system = platform.system().lower() + + hw_info = { + "family": "unknown", + "arch": "unknown", + "model": "Mac", + "cpu": "unknown", + "cores": os.cpu_count() or 1, + "memory_gb": 4, + "hostname": platform.node(), + "mac": "00:00:00:00:00:00", + "macs": [], + "serial": get_mac_serial() + } + + # Get MAC addresses + try: + result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) + macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) + hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] + hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" + except: + pass + + # Get memory + try: + result = subprocess.run(['sysctl', '-n', 'hw.memsize'], + capture_output=True, text=True, timeout=5) + hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) + except: + pass + + # Apple Silicon Detection (M1/M2/M3) + if machine == 'arm64': + hw_info["family"] = "Apple Silicon" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + brand = result.stdout.strip() + hw_info["cpu"] = brand + + if 'M3' in brand: + hw_info["arch"] = "M3" + elif 'M2' in brand: + hw_info["arch"] = "M2" + elif 'M1' in brand: + hw_info["arch"] = "M1" + else: + hw_info["arch"] = "apple_silicon" + except: + hw_info["arch"] = "apple_silicon" + hw_info["cpu"] = "Apple Silicon" + + # Intel Mac Detection + elif machine == 'x86_64': + hw_info["family"] = "x86_64" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + cpu_brand = result.stdout.strip() + hw_info["cpu"] = cpu_brand + + # Use comprehensive CPU detection if available + if CPU_DETECTION_AVAILABLE: + cpu_info = calculate_antiquity_multiplier(cpu_brand) + hw_info["arch"] = cpu_info.architecture + hw_info["cpu_vendor"] = cpu_info.vendor + hw_info["cpu_year"] = cpu_info.microarch_year + hw_info["cpu_generation"] = cpu_info.generation + hw_info["is_server"] = cpu_info.is_server + print(f"[CPU] Detected: {cpu_info.generation} ({cpu_info.architecture}, {cpu_info.microarch_year})") + else: + # Fallback: Basic detection for retro Intel architectures + cpu_lower = cpu_brand.lower() + if 'core 2' in cpu_lower or 'core(tm)2' in cpu_lower: + hw_info["arch"] = "core2" # 1.3x + elif 'xeon' in cpu_lower and ('e5-16' in cpu_lower or 'e5-26' in cpu_lower): + hw_info["arch"] = "ivy_bridge" # Xeon E5 v2 = Ivy Bridge-E + elif 'i7-3' in cpu_lower or 'i5-3' in cpu_lower or 'i3-3' in cpu_lower: + hw_info["arch"] = "ivy_bridge" + elif 'i7-2' in cpu_lower or 'i5-2' in cpu_lower or 'i3-2' in cpu_lower: + hw_info["arch"] = "sandy_bridge" + elif 'i7-9' in cpu_lower and '900' in cpu_lower: + hw_info["arch"] = "nehalem" + elif 'i7-4' in cpu_lower or 'i5-4' in cpu_lower: + hw_info["arch"] = "haswell" + elif 'pentium' in cpu_lower: + hw_info["arch"] = "pentium4" + else: + hw_info["arch"] = "modern" + except: + hw_info["arch"] = "modern" + hw_info["cpu"] = "Intel Mac" + + # PowerPC Detection (for old Macs) + elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64'): + hw_info["family"] = "PowerPC" + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + output = result.stdout.lower() + + if 'g5' in output or 'powermac11' in output: + hw_info["arch"] = "G5" + hw_info["cpu"] = "PowerPC G5" + elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + elif 'g3' in output: + hw_info["arch"] = "G3" + hw_info["cpu"] = "PowerPC G3" + else: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC" + except: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + + # Get model name + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + for line in result.stdout.split('\n'): + if 'Model Name' in line or 'Model Identifier' in line: + hw_info["model"] = line.split(':')[1].strip() + break + except: + pass + + return hw_info + + +def collect_entropy(cycles=48, inner_loop=25000): + """Collect timing entropy for hardware attestation""" + samples = [] + for _ in range(cycles): + start = time.perf_counter_ns() + acc = 0 + for j in range(inner_loop): + acc ^= (j * 31) & 0xFFFFFFFF + duration = time.perf_counter_ns() - start + samples.append(duration) + + mean_ns = sum(samples) / len(samples) + variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 + + return { + "mean_ns": mean_ns, + "variance_ns": variance_ns, + "min_ns": min(samples), + "max_ns": max(samples), + "sample_count": len(samples), + "samples_preview": samples[:12], + } + + +class MacMiner: + def __init__(self, miner_id=None, wallet=None): + self.node_url = NODE_URL + self.hw_info = detect_hardware() + self.fingerprint_data = {} + self.fingerprint_passed = False + + # Generate miner_id from hardware + if miner_id: + self.miner_id = miner_id + else: + hw_hash = hashlib.sha256( + f"{self.hw_info['hostname']}-{self.hw_info['serial'] or 'unknown'}".encode() + ).hexdigest()[:8] + arch = self.hw_info['arch'].lower().replace(' ', '_') + self.miner_id = f"{arch}-{self.hw_info['hostname'][:10]}-{hw_hash}" + + # Generate wallet address + if wallet: + self.wallet = wallet + else: + wallet_hash = hashlib.sha256(f"{self.miner_id}-rustchain".encode()).hexdigest()[:38] + self.wallet = f"{self.hw_info['family'].lower().replace(' ', '_')}_{wallet_hash}RTC" + + self.attestation_valid_until = 0 + self.shares_submitted = 0 + self.shares_accepted = 0 + self.last_entropy = {} + + self._print_banner() + + # Run initial fingerprint check + if FINGERPRINT_AVAILABLE: + self._run_fingerprint_checks() + + def _run_fingerprint_checks(self): + """Run hardware fingerprint checks for RIP-PoA""" + print(info("\n[FINGERPRINT] Running hardware fingerprint checks...")) + try: + passed, results = validate_all_checks() + self.fingerprint_passed = passed + self.fingerprint_data = {"checks": results, "all_passed": passed} + if passed: + print(success("[FINGERPRINT] All checks PASSED - eligible for full rewards")) + else: + failed = [k for k, v in results.items() if not v.get("passed")] + print(warning(f"[FINGERPRINT] FAILED checks: {failed}")) + print(warning("[FINGERPRINT] WARNING: May receive reduced/zero rewards")) + except Exception as e: + print(error(f"[FINGERPRINT] Error running checks: {e}")) + self.fingerprint_passed = False + self.fingerprint_data = {"error": str(e), "all_passed": False} + + def _print_banner(self): + print("=" * 70) + print("RustChain Mac Miner v2.4.0 - Serial Binding + Fingerprint") + print("=" * 70) + print(f"Miner ID: {self.miner_id}") + print(f"Wallet: {self.wallet}") + print(f"Node: {self.node_url}") + print(f"Serial: {self.hw_info.get('serial', 'N/A')}") + print("-" * 70) + print(f"Hardware: {self.hw_info['family']} / {self.hw_info['arch']}") + print(f"Model: {self.hw_info['model']}") + print(f"CPU: {self.hw_info['cpu']}") + print(f"Cores: {self.hw_info['cores']}") + print(f"Memory: {self.hw_info['memory_gb']} GB") + print("-" * 70) + weight = self._get_expected_weight() + print(f"Expected Weight: {weight}x (Proof of Antiquity)") + print("=" * 70) + + def _get_expected_weight(self): + """Calculate expected PoA weight""" + arch = self.hw_info['arch'].lower() + family = self.hw_info['family'].lower() + + if family == 'powerpc': + if arch == 'g3': return 3.0 + if arch == 'g4': return 2.5 + if arch == 'g5': return 2.0 + elif 'apple' in family or 'silicon' in family: + if arch in ('m1', 'm2', 'm3', 'apple_silicon'): return 1.2 + elif family == 'x86_64': + if arch == 'core2': return 1.5 + return 1.0 + + return 1.0 + + def attest(self): + """Complete hardware attestation with fingerprint""" + print(info(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting hardware...")) + + try: + # Step 1: Get challenge + resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=15, verify=False) + if resp.status_code != 200: + print(error(f" ERROR: Challenge failed ({resp.status_code})")) + return False + + challenge = resp.json() + nonce = challenge.get("nonce", "") + print(success(f" Got challenge nonce: {nonce[:16]}...")) + + except Exception as e: + print(error(f" ERROR: Challenge error: {e}")) + return False + + # Collect entropy + entropy = collect_entropy() + self.last_entropy = entropy + + # Re-run fingerprint checks if needed + if FINGERPRINT_AVAILABLE and not self.fingerprint_data: + self._run_fingerprint_checks() + + # Build attestation payload + commitment = hashlib.sha256( + (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() + ).hexdigest() + + attestation = { + "miner": self.wallet, + "miner_id": self.miner_id, + "nonce": nonce, + "report": { + "nonce": nonce, + "commitment": commitment, + "derived": entropy, + "entropy_score": entropy.get("variance_ns", 0.0) + }, + "device": { + "family": self.hw_info["family"], + "arch": self.hw_info["arch"], + "model": self.hw_info["model"], + "cpu": self.hw_info["cpu"], + "cores": self.hw_info["cores"], + "memory_gb": self.hw_info["memory_gb"], + "serial": self.hw_info.get("serial") # Hardware serial for v2 binding + }, + "signals": { + "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), + "hostname": self.hw_info["hostname"] + }, + # RIP-PoA hardware fingerprint attestation + "fingerprint": self.fingerprint_data + } + + try: + resp = requests.post(f"{self.node_url}/attest/submit", + json=attestation, timeout=30, verify=False) + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.attestation_valid_until = time.time() + 580 + print(success(f" SUCCESS: Attestation accepted!")) + + # Show fingerprint status + if self.fingerprint_passed: + print(success(f" Fingerprint: PASSED")) + else: + print(warning(f" Fingerprint: FAILED (reduced rewards)")) + return True + else: + print(warning(f" WARNING: {result}")) + return False + else: + print(error(f" ERROR: HTTP {resp.status_code}: {resp.text[:200]}")) + return False + + except Exception as e: + print(error(f" ERROR: {e}")) + return False + + def check_eligibility(self): + """Check lottery eligibility""" + try: + resp = requests.get( + f"{self.node_url}/lottery/eligibility", + params={"miner_id": self.miner_id}, + timeout=10, + verify=False + ) + if resp.status_code == 200: + return resp.json() + return {"eligible": False, "reason": f"HTTP {resp.status_code}"} + except Exception as e: + return {"eligible": False, "reason": str(e)} + + def submit_header(self, slot): + """Submit header for slot""" + try: + message = f"slot:{slot}:miner:{self.miner_id}:ts:{int(time.time())}" + message_hex = message.encode().hex() + sig_data = hashlib.sha512(f"{message}{self.wallet}".encode()).hexdigest() + + header_payload = { + "miner_id": self.miner_id, + "header": { + "slot": slot, + "miner": self.miner_id, + "timestamp": int(time.time()) + }, + "message": message_hex, + "signature": sig_data, + "pubkey": self.wallet + } + + resp = requests.post( + f"{self.node_url}/headers/ingest_signed", + json=header_payload, + timeout=15, + verify=False + ) + + self.shares_submitted += 1 + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.shares_accepted += 1 + return True, result + return False, result + return False, {"error": f"HTTP {resp.status_code}"} + + except Exception as e: + return False, {"error": str(e)} + + def run(self): + """Main mining loop""" + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Starting miner...") + + # Initial attestation + while not self.attest(): + print(" Retrying attestation in 30 seconds...") + time.sleep(30) + + last_slot = 0 + + while True: + try: + # Re-attest if needed + if time.time() > self.attestation_valid_until: + self.attest() + + # Check eligibility + eligibility = self.check_eligibility() + slot = eligibility.get("slot", 0) + + if eligibility.get("eligible"): + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] ELIGIBLE for slot {slot}!") + + if slot != last_slot: + success, result = self.submit_header(slot) + if success: + print(f" Header ACCEPTED! Slot {slot}") + else: + print(f" Header rejected: {result}") + last_slot = slot + else: + reason = eligibility.get("reason", "unknown") + if reason == "not_attested": + print(f"[{datetime.now().strftime('%H:%M:%S')}] Not attested - re-attesting...") + self.attest() + + # Status every 60 seconds + if int(time.time()) % 60 == 0: + print(f"[{datetime.now().strftime('%H:%M:%S')}] Slot {slot} | " + f"Submitted: {self.shares_submitted} | " + f"Accepted: {self.shares_accepted}") + + time.sleep(LOTTERY_CHECK_INTERVAL) + + except KeyboardInterrupt: + print("\n\nShutting down miner...") + break + except Exception as e: + print(f"[{datetime.now().strftime('%H:%M:%S')}] Error: {e}") + time.sleep(30) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="RustChain Mac Miner v2.4.0") + parser.add_argument("--version", "-v", action="version", version="RustChain Mac Miner v2.4.0") + parser.add_argument("--miner-id", "-m", help="Custom miner ID") + parser.add_argument("--wallet", "-w", help="Custom wallet address") + parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL") + args = parser.parse_args() + + if args.node: + NODE_URL = args.node + + miner = MacMiner(miner_id=args.miner_id, wallet=args.wallet) + miner.run() diff --git a/miners/macos/rustchain_mac_miner_v2.5.py b/miners/macos/rustchain_mac_miner_v2.5.py index 2dd7d728..191c084c 100644 --- a/miners/macos/rustchain_mac_miner_v2.5.py +++ b/miners/macos/rustchain_mac_miner_v2.5.py @@ -1,680 +1,680 @@ -#!/usr/bin/env python3 -""" -RustChain Mac Universal Miner v2.5.0 -Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) -With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 -+ Embedded TLS Proxy Fallback for Legacy Macs (Tiger/Leopard) - -New in v2.5: - - Auto-detect TLS capability: try HTTPS direct, fall back to HTTP proxy - - Proxy auto-discovery on LAN (192.168.0.160:8089) - - Python 3.7+ compatible (no walrus, no f-string =) - - Persistent launchd/cron integration helpers - - Sleep-resistant: re-attest on wake automatically -""" -import warnings -warnings.filterwarnings('ignore', message='Unverified HTTPS request') - -import os -import sys -import json -import time -import hashlib -import platform -import subprocess -import statistics -import re -import socket -from datetime import datetime - -# Color helper stubs (no-op if terminal doesn't support ANSI) -def info(msg): return msg -def warning(msg): return msg -def success(msg): return msg -def error(msg): return msg - -# Attempt to import requests; provide instructions if missing -try: - import requests -except ImportError: - print("[ERROR] 'requests' module not found.") - print(" Install with: pip3 install requests --user") - print(" Or: python3 -m pip install requests --user") - sys.exit(1) - -# Import fingerprint checks -try: - from fingerprint_checks import validate_all_checks - FINGERPRINT_AVAILABLE = True -except ImportError: - FINGERPRINT_AVAILABLE = False - print(warning("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled")) - -# Import CPU architecture detection -try: - from cpu_architecture_detection import detect_cpu_architecture, calculate_antiquity_multiplier - CPU_DETECTION_AVAILABLE = True -except ImportError: - CPU_DETECTION_AVAILABLE = False - -MINER_VERSION = "2.5.0" -NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://50.28.86.131") -PROXY_URL = os.environ.get("RUSTCHAIN_PROXY", "http://192.168.0.160:8089") -BLOCK_TIME = 600 # 10 minutes -LOTTERY_CHECK_INTERVAL = 10 -ATTESTATION_TTL = 580 # Re-attest 20s before expiry - - -# ── Transport Layer (HTTPS direct or HTTP proxy) ──────────────────── - -class NodeTransport: - """Handles communication with the RustChain node. - - Tries HTTPS directly first. If TLS fails (old Python/OpenSSL on - Tiger/Leopard), falls back to the HTTP proxy on the NAS. - """ - - def __init__(self, node_url, proxy_url): - self.node_url = node_url.rstrip("/") - self.proxy_url = proxy_url.rstrip("/") if proxy_url else None - self.use_proxy = False - self._probe_transport() - - def _probe_transport(self): - """Test if we can reach the node directly via HTTPS.""" - try: - r = requests.get( - self.node_url + "/health", - timeout=10, verify=False - ) - if r.status_code == 200: - print(success("[TRANSPORT] Direct HTTPS to node: OK")) - self.use_proxy = False - return - except requests.exceptions.SSLError: - print(warning("[TRANSPORT] TLS failed (legacy OpenSSL?) - trying proxy...")) - except Exception as e: - print(warning("[TRANSPORT] Direct connection failed: {} - trying proxy...".format(e))) - - # Try the proxy - if self.proxy_url: - try: - r = requests.get( - self.proxy_url + "/health", - timeout=10 - ) - if r.status_code == 200: - print(success("[TRANSPORT] HTTP proxy at {}: OK".format(self.proxy_url))) - self.use_proxy = True - return - except Exception as e: - print(warning("[TRANSPORT] Proxy {} also failed: {}".format(self.proxy_url, e))) - - # Last resort: try direct without verify (may work on some old systems) - print(warning("[TRANSPORT] Falling back to direct HTTPS (verify=False)")) - self.use_proxy = False - - @property - def base_url(self): - if self.use_proxy: - return self.proxy_url - return self.node_url - - def get(self, path, **kwargs): - """GET request through whichever transport works.""" - kwargs.setdefault("timeout", 15) - kwargs.setdefault("verify", False) - url = self.base_url + path - return requests.get(url, **kwargs) - - def post(self, path, **kwargs): - """POST request through whichever transport works.""" - kwargs.setdefault("timeout", 15) - kwargs.setdefault("verify", False) - url = self.base_url + path - return requests.post(url, **kwargs) - - -# ── Hardware Detection ────────────────────────────────────────────── - -def get_mac_serial(): - """Get hardware serial number for macOS systems.""" - try: - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Serial Number' in line: - return line.split(':')[1].strip() - except Exception: - pass - - try: - result = subprocess.run( - ['ioreg', '-l'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'IOPlatformSerialNumber' in line: - return line.split('"')[-2] - except Exception: - pass - - try: - result = subprocess.run( - ['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10 - ) - for line in result.stdout.split('\n'): - if 'Hardware UUID' in line: - return line.split(':')[1].strip()[:16] - except Exception: - pass - - return None - - -def detect_hardware(): - """Auto-detect Mac hardware architecture.""" - machine = platform.machine().lower() - - hw_info = { - "family": "unknown", - "arch": "unknown", - "model": "Mac", - "cpu": "unknown", - "cores": os.cpu_count() or 1, - "memory_gb": 4, - "hostname": platform.node(), - "mac": "00:00:00:00:00:00", - "macs": [], - "serial": get_mac_serial() - } - - # Get MAC addresses - try: - result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) - macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) - hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] - hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" - except Exception: - pass - - # Get memory - try: - result = subprocess.run(['sysctl', '-n', 'hw.memsize'], - capture_output=True, text=True, timeout=5) - hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) - except Exception: - pass - - # Apple Silicon Detection (M1/M2/M3/M4) - if machine == 'arm64': - hw_info["family"] = "Apple Silicon" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - brand = result.stdout.strip() - hw_info["cpu"] = brand - - if 'M4' in brand: - hw_info["arch"] = "M4" - elif 'M3' in brand: - hw_info["arch"] = "M3" - elif 'M2' in brand: - hw_info["arch"] = "M2" - elif 'M1' in brand: - hw_info["arch"] = "M1" - else: - hw_info["arch"] = "apple_silicon" - except Exception: - hw_info["arch"] = "apple_silicon" - hw_info["cpu"] = "Apple Silicon" - - # Intel Mac Detection - elif machine == 'x86_64': - hw_info["family"] = "x86_64" - try: - result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], - capture_output=True, text=True, timeout=5) - cpu_brand = result.stdout.strip() - hw_info["cpu"] = cpu_brand - - if CPU_DETECTION_AVAILABLE: - cpu_info = calculate_antiquity_multiplier(cpu_brand) - hw_info["arch"] = cpu_info.architecture - hw_info["cpu_vendor"] = cpu_info.vendor - hw_info["cpu_year"] = cpu_info.microarch_year - hw_info["cpu_generation"] = cpu_info.generation - hw_info["is_server"] = cpu_info.is_server - else: - cpu_lower = cpu_brand.lower() - if 'core 2' in cpu_lower or 'core(tm)2' in cpu_lower: - hw_info["arch"] = "core2" - elif 'xeon' in cpu_lower and ('e5-16' in cpu_lower or 'e5-26' in cpu_lower): - hw_info["arch"] = "ivy_bridge" - elif 'i7-3' in cpu_lower or 'i5-3' in cpu_lower or 'i3-3' in cpu_lower: - hw_info["arch"] = "ivy_bridge" - elif 'i7-2' in cpu_lower or 'i5-2' in cpu_lower or 'i3-2' in cpu_lower: - hw_info["arch"] = "sandy_bridge" - elif 'i7-9' in cpu_lower and '900' in cpu_lower: - hw_info["arch"] = "nehalem" - elif 'i7-4' in cpu_lower or 'i5-4' in cpu_lower: - hw_info["arch"] = "haswell" - elif 'pentium' in cpu_lower: - hw_info["arch"] = "pentium4" - else: - hw_info["arch"] = "modern" - except Exception: - hw_info["arch"] = "modern" - hw_info["cpu"] = "Intel Mac" - - # PowerPC Detection (for vintage Macs) - elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64', 'Power Macintosh'): - hw_info["family"] = "PowerPC" - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - output = result.stdout.lower() - - if 'g5' in output or 'powermac11' in output: - hw_info["arch"] = "G5" - hw_info["cpu"] = "PowerPC G5" - elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - elif 'g3' in output: - hw_info["arch"] = "G3" - hw_info["cpu"] = "PowerPC G3" - else: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC" - except Exception: - hw_info["arch"] = "G4" - hw_info["cpu"] = "PowerPC G4" - - # Get model name - try: - result = subprocess.run(['system_profiler', 'SPHardwareDataType'], - capture_output=True, text=True, timeout=10) - for line in result.stdout.split('\n'): - if 'Model Name' in line or 'Model Identifier' in line: - hw_info["model"] = line.split(':')[1].strip() - break - except Exception: - pass - - return hw_info - - -def collect_entropy(cycles=48, inner_loop=25000): - """Collect timing entropy for hardware attestation.""" - samples = [] - for _ in range(cycles): - start = time.perf_counter_ns() - acc = 0 - for j in range(inner_loop): - acc ^= (j * 31) & 0xFFFFFFFF - duration = time.perf_counter_ns() - start - samples.append(duration) - - mean_ns = sum(samples) / len(samples) - variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 - - return { - "mean_ns": mean_ns, - "variance_ns": variance_ns, - "min_ns": min(samples), - "max_ns": max(samples), - "sample_count": len(samples), - "samples_preview": samples[:12], - } - - -# ── Miner Class ───────────────────────────────────────────────────── - -class MacMiner: - def __init__(self, miner_id=None, wallet=None, node_url=None, proxy_url=None): - self.hw_info = detect_hardware() - self.fingerprint_data = {} - self.fingerprint_passed = False - - # Generate miner_id from hardware - if miner_id: - self.miner_id = miner_id - else: - hw_hash = hashlib.sha256( - "{}-{}".format( - self.hw_info['hostname'], - self.hw_info['serial'] or 'unknown' - ).encode() - ).hexdigest()[:8] - arch = self.hw_info['arch'].lower().replace(' ', '_') - self.miner_id = "{}-{}-{}".format(arch, self.hw_info['hostname'][:10], hw_hash) - - # Generate wallet address - if wallet: - self.wallet = wallet - else: - wallet_hash = hashlib.sha256( - "{}-rustchain".format(self.miner_id).encode() - ).hexdigest()[:38] - family = self.hw_info['family'].lower().replace(' ', '_') - self.wallet = "{}_{}RTC".format(family, wallet_hash) - - # Set up transport (HTTPS direct or HTTP proxy) - self.transport = NodeTransport( - node_url or NODE_URL, - proxy_url or PROXY_URL - ) - - self.attestation_valid_until = 0 - self.shares_submitted = 0 - self.shares_accepted = 0 - self.last_entropy = {} - self._last_system_time = time.monotonic() - - self._print_banner() - - # Run initial fingerprint check - if FINGERPRINT_AVAILABLE: - self._run_fingerprint_checks() - - def _run_fingerprint_checks(self): - """Run hardware fingerprint checks for RIP-PoA.""" - print(info("\n[FINGERPRINT] Running hardware fingerprint checks...")) - try: - passed, results = validate_all_checks() - self.fingerprint_passed = passed - self.fingerprint_data = {"checks": results, "all_passed": passed} - if passed: - print(success("[FINGERPRINT] All checks PASSED - eligible for full rewards")) - else: - failed = [k for k, v in results.items() if not v.get("passed")] - print(warning("[FINGERPRINT] FAILED checks: {}".format(failed))) - print(warning("[FINGERPRINT] WARNING: May receive reduced/zero rewards")) - except Exception as e: - print(error("[FINGERPRINT] Error running checks: {}".format(e))) - self.fingerprint_passed = False - self.fingerprint_data = {"error": str(e), "all_passed": False} - - def _print_banner(self): - print("=" * 70) - print("RustChain Mac Miner v{} - Serial Binding + Fingerprint".format(MINER_VERSION)) - print("=" * 70) - print("Miner ID: {}".format(self.miner_id)) - print("Wallet: {}".format(self.wallet)) - print("Transport: {}".format( - "PROXY ({})".format(self.transport.proxy_url) if self.transport.use_proxy - else "DIRECT ({})".format(self.transport.node_url) - )) - print("Serial: {}".format(self.hw_info.get('serial', 'N/A'))) - print("-" * 70) - print("Hardware: {} / {}".format(self.hw_info['family'], self.hw_info['arch'])) - print("Model: {}".format(self.hw_info['model'])) - print("CPU: {}".format(self.hw_info['cpu'])) - print("Cores: {}".format(self.hw_info['cores'])) - print("Memory: {} GB".format(self.hw_info['memory_gb'])) - print("-" * 70) - weight = self._get_expected_weight() - print("Expected Weight: {}x (Proof of Antiquity)".format(weight)) - print("=" * 70) - - def _get_expected_weight(self): - """Calculate expected PoA weight.""" - arch = self.hw_info['arch'].lower() - family = self.hw_info['family'].lower() - - if family == 'powerpc': - if arch == 'g3': return 3.0 - if arch == 'g4': return 2.5 - if arch == 'g5': return 2.0 - elif 'apple' in family or 'silicon' in family: - if arch in ('m1', 'm2', 'm3', 'm4', 'apple_silicon'): - return 1.2 - elif family == 'x86_64': - if arch == 'core2': return 1.5 - return 1.0 - - return 1.0 - - def _detect_sleep_wake(self): - """Detect if the machine slept (large time jump).""" - now = time.monotonic() - gap = now - self._last_system_time - self._last_system_time = now - # If more than 2x the check interval elapsed, we probably slept - if gap > LOTTERY_CHECK_INTERVAL * 3: - return True - return False - - def attest(self): - """Complete hardware attestation with fingerprint.""" - ts = datetime.now().strftime('%H:%M:%S') - print(info("\n[{}] Attesting hardware...".format(ts))) - - try: - resp = self.transport.post("/attest/challenge", json={}, timeout=15) - if resp.status_code != 200: - print(error(" ERROR: Challenge failed ({})".format(resp.status_code))) - return False - - challenge = resp.json() - nonce = challenge.get("nonce", "") - print(success(" Got challenge nonce: {}...".format(nonce[:16]))) - - except Exception as e: - print(error(" ERROR: Challenge error: {}".format(e))) - return False - - # Collect entropy - entropy = collect_entropy() - self.last_entropy = entropy - - # Re-run fingerprint checks if needed - if FINGERPRINT_AVAILABLE and not self.fingerprint_data: - self._run_fingerprint_checks() - - # Build attestation payload - commitment = hashlib.sha256( - (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() - ).hexdigest() - - attestation = { - "miner": self.wallet, - "miner_id": self.miner_id, - "nonce": nonce, - "report": { - "nonce": nonce, - "commitment": commitment, - "derived": entropy, - "entropy_score": entropy.get("variance_ns", 0.0) - }, - "device": { - "family": self.hw_info["family"], - "arch": self.hw_info["arch"], - "model": self.hw_info["model"], - "cpu": self.hw_info["cpu"], - "cores": self.hw_info["cores"], - "memory_gb": self.hw_info["memory_gb"], - "serial": self.hw_info.get("serial") - }, - "signals": { - "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), - "hostname": self.hw_info["hostname"] - }, - "fingerprint": self.fingerprint_data, - "miner_version": MINER_VERSION, - } - - try: - resp = self.transport.post("/attest/submit", json=attestation, timeout=30) - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.attestation_valid_until = time.time() + ATTESTATION_TTL - print(success(" SUCCESS: Attestation accepted!")) - if self.fingerprint_passed: - print(success(" Fingerprint: PASSED")) - else: - print(warning(" Fingerprint: FAILED (reduced rewards)")) - return True - else: - print(warning(" WARNING: {}".format(result))) - return False - else: - print(error(" ERROR: HTTP {}: {}".format(resp.status_code, resp.text[:200]))) - return False - - except Exception as e: - print(error(" ERROR: {}".format(e))) - return False - - def check_eligibility(self): - """Check lottery eligibility.""" - try: - resp = self.transport.get( - "/lottery/eligibility", - params={"miner_id": self.miner_id}, - timeout=10, - ) - if resp.status_code == 200: - return resp.json() - return {"eligible": False, "reason": "HTTP {}".format(resp.status_code)} - except Exception as e: - return {"eligible": False, "reason": str(e)} - - def submit_header(self, slot): - """Submit header for slot.""" - try: - message = "slot:{}:miner:{}:ts:{}".format(slot, self.miner_id, int(time.time())) - message_hex = message.encode().hex() - sig_data = hashlib.sha512( - "{}{}".format(message, self.wallet).encode() - ).hexdigest() - - header_payload = { - "miner_id": self.miner_id, - "header": { - "slot": slot, - "miner": self.miner_id, - "timestamp": int(time.time()) - }, - "message": message_hex, - "signature": sig_data, - "pubkey": self.wallet - } - - resp = self.transport.post( - "/headers/ingest_signed", - json=header_payload, - timeout=15, - ) - - self.shares_submitted += 1 - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.shares_accepted += 1 - return True, result - return False, result - return False, {"error": "HTTP {}".format(resp.status_code)} - - except Exception as e: - return False, {"error": str(e)} - - def run(self): - """Main mining loop with sleep-wake detection.""" - ts = datetime.now().strftime('%H:%M:%S') - print("\n[{}] Starting miner...".format(ts)) - - # Initial attestation - while not self.attest(): - print(" Retrying attestation in 30 seconds...") - time.sleep(30) - - last_slot = 0 - status_counter = 0 - - while True: - try: - # Detect sleep/wake — force re-attest - if self._detect_sleep_wake(): - ts = datetime.now().strftime('%H:%M:%S') - print("\n[{}] Sleep/wake detected - re-attesting...".format(ts)) - self.attestation_valid_until = 0 - - # Re-attest if expired - if time.time() > self.attestation_valid_until: - self.attest() - - # Check eligibility - eligibility = self.check_eligibility() - slot = eligibility.get("slot", 0) - - if eligibility.get("eligible"): - ts = datetime.now().strftime('%H:%M:%S') - print("\n[{}] ELIGIBLE for slot {}!".format(ts, slot)) - - if slot != last_slot: - ok, result = self.submit_header(slot) - if ok: - print(" Header ACCEPTED! Slot {}".format(slot)) - else: - print(" Header rejected: {}".format(result)) - last_slot = slot - else: - reason = eligibility.get("reason", "unknown") - if reason == "not_attested": - ts = datetime.now().strftime('%H:%M:%S') - print("[{}] Not attested - re-attesting...".format(ts)) - self.attest() - - # Status every ~60 seconds - status_counter += 1 - if status_counter >= (60 // LOTTERY_CHECK_INTERVAL): - ts = datetime.now().strftime('%H:%M:%S') - print("[{}] Slot {} | Submitted: {} | Accepted: {}".format( - ts, slot, self.shares_submitted, self.shares_accepted - )) - status_counter = 0 - - time.sleep(LOTTERY_CHECK_INTERVAL) - - except KeyboardInterrupt: - print("\n\nShutting down miner...") - break - except Exception as e: - ts = datetime.now().strftime('%H:%M:%S') - print("[{}] Error: {}".format(ts, e)) - time.sleep(30) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser(description="RustChain Mac Miner v{}".format(MINER_VERSION)) - parser.add_argument("--version", "-v", action="version", - version="rustchain-mac-miner {}".format(MINER_VERSION)) - parser.add_argument("--miner-id", "-m", help="Custom miner ID") - parser.add_argument("--wallet", "-w", help="Custom wallet address") - parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL (default: {})".format(NODE_URL)) - parser.add_argument("--proxy", "-p", default=PROXY_URL, - help="HTTP proxy URL for legacy Macs (default: {})".format(PROXY_URL)) - parser.add_argument("--no-proxy", action="store_true", - help="Disable proxy fallback (HTTPS only)") - args = parser.parse_args() - - node = args.node - proxy = None if args.no_proxy else args.proxy - - miner = MacMiner( - miner_id=args.miner_id, - wallet=args.wallet, - node_url=node, - proxy_url=proxy, - ) - miner.run() +#!/usr/bin/env python3 +""" +RustChain Mac Universal Miner v2.5.0 +Supports: Apple Silicon (M1/M2/M3), Intel Mac, PowerPC (G4/G5) +With RIP-PoA Hardware Fingerprint Attestation + Serial Binding v2.0 ++ Embedded TLS Proxy Fallback for Legacy Macs (Tiger/Leopard) + +New in v2.5: + - Auto-detect TLS capability: try HTTPS direct, fall back to HTTP proxy + - Proxy auto-discovery on LAN (192.168.0.160:8089) + - Python 3.7+ compatible (no walrus, no f-string =) + - Persistent launchd/cron integration helpers + - Sleep-resistant: re-attest on wake automatically +""" +import warnings +warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +import os +import sys +import json +import time +import hashlib +import platform +import subprocess +import statistics +import re +import socket +from datetime import datetime + +# Color helper stubs (no-op if terminal doesn't support ANSI) +def info(msg): return msg +def warning(msg): return msg +def success(msg): return msg +def error(msg): return msg + +# Attempt to import requests; provide instructions if missing +try: + import requests +except ImportError: + print("[ERROR] 'requests' module not found.") + print(" Install with: pip3 install requests --user") + print(" Or: python3 -m pip install requests --user") + sys.exit(1) + +# Import fingerprint checks +try: + from fingerprint_checks import validate_all_checks + FINGERPRINT_AVAILABLE = True +except ImportError: + FINGERPRINT_AVAILABLE = False + print(warning("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled")) + +# Import CPU architecture detection +try: + from cpu_architecture_detection import detect_cpu_architecture, calculate_antiquity_multiplier + CPU_DETECTION_AVAILABLE = True +except ImportError: + CPU_DETECTION_AVAILABLE = False + +MINER_VERSION = "2.5.0" +NODE_URL = os.environ.get("RUSTCHAIN_NODE", "https://50.28.86.131") +PROXY_URL = os.environ.get("RUSTCHAIN_PROXY", "http://192.168.0.160:8089") +BLOCK_TIME = 600 # 10 minutes +LOTTERY_CHECK_INTERVAL = 10 +ATTESTATION_TTL = 580 # Re-attest 20s before expiry + + +# ── Transport Layer (HTTPS direct or HTTP proxy) ──────────────────── + +class NodeTransport: + """Handles communication with the RustChain node. + + Tries HTTPS directly first. If TLS fails (old Python/OpenSSL on + Tiger/Leopard), falls back to the HTTP proxy on the NAS. + """ + + def __init__(self, node_url, proxy_url): + self.node_url = node_url.rstrip("/") + self.proxy_url = proxy_url.rstrip("/") if proxy_url else None + self.use_proxy = False + self._probe_transport() + + def _probe_transport(self): + """Test if we can reach the node directly via HTTPS.""" + try: + r = requests.get( + self.node_url + "/health", + timeout=10, verify=False + ) + if r.status_code == 200: + print(success("[TRANSPORT] Direct HTTPS to node: OK")) + self.use_proxy = False + return + except requests.exceptions.SSLError: + print(warning("[TRANSPORT] TLS failed (legacy OpenSSL?) - trying proxy...")) + except Exception as e: + print(warning("[TRANSPORT] Direct connection failed: {} - trying proxy...".format(e))) + + # Try the proxy + if self.proxy_url: + try: + r = requests.get( + self.proxy_url + "/health", + timeout=10 + ) + if r.status_code == 200: + print(success("[TRANSPORT] HTTP proxy at {}: OK".format(self.proxy_url))) + self.use_proxy = True + return + except Exception as e: + print(warning("[TRANSPORT] Proxy {} also failed: {}".format(self.proxy_url, e))) + + # Last resort: try direct without verify (may work on some old systems) + print(warning("[TRANSPORT] Falling back to direct HTTPS (verify=False)")) + self.use_proxy = False + + @property + def base_url(self): + if self.use_proxy: + return self.proxy_url + return self.node_url + + def get(self, path, **kwargs): + """GET request through whichever transport works.""" + kwargs.setdefault("timeout", 15) + kwargs.setdefault("verify", False) + url = self.base_url + path + return requests.get(url, **kwargs) + + def post(self, path, **kwargs): + """POST request through whichever transport works.""" + kwargs.setdefault("timeout", 15) + kwargs.setdefault("verify", False) + url = self.base_url + path + return requests.post(url, **kwargs) + + +# ── Hardware Detection ────────────────────────────────────────────── + +def get_mac_serial(): + """Get hardware serial number for macOS systems.""" + try: + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Serial Number' in line: + return line.split(':')[1].strip() + except Exception: + pass + + try: + result = subprocess.run( + ['ioreg', '-l'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'IOPlatformSerialNumber' in line: + return line.split('"')[-2] + except Exception: + pass + + try: + result = subprocess.run( + ['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10 + ) + for line in result.stdout.split('\n'): + if 'Hardware UUID' in line: + return line.split(':')[1].strip()[:16] + except Exception: + pass + + return None + + +def detect_hardware(): + """Auto-detect Mac hardware architecture.""" + machine = platform.machine().lower() + + hw_info = { + "family": "unknown", + "arch": "unknown", + "model": "Mac", + "cpu": "unknown", + "cores": os.cpu_count() or 1, + "memory_gb": 4, + "hostname": platform.node(), + "mac": "00:00:00:00:00:00", + "macs": [], + "serial": get_mac_serial() + } + + # Get MAC addresses + try: + result = subprocess.run(['ifconfig'], capture_output=True, text=True, timeout=5) + macs = re.findall(r'ether\s+([0-9a-f:]{17})', result.stdout, re.IGNORECASE) + hw_info["macs"] = macs if macs else ["00:00:00:00:00:00"] + hw_info["mac"] = macs[0] if macs else "00:00:00:00:00:00" + except Exception: + pass + + # Get memory + try: + result = subprocess.run(['sysctl', '-n', 'hw.memsize'], + capture_output=True, text=True, timeout=5) + hw_info["memory_gb"] = int(result.stdout.strip()) // (1024**3) + except Exception: + pass + + # Apple Silicon Detection (M1/M2/M3/M4) + if machine == 'arm64': + hw_info["family"] = "Apple Silicon" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + brand = result.stdout.strip() + hw_info["cpu"] = brand + + if 'M4' in brand: + hw_info["arch"] = "M4" + elif 'M3' in brand: + hw_info["arch"] = "M3" + elif 'M2' in brand: + hw_info["arch"] = "M2" + elif 'M1' in brand: + hw_info["arch"] = "M1" + else: + hw_info["arch"] = "apple_silicon" + except Exception: + hw_info["arch"] = "apple_silicon" + hw_info["cpu"] = "Apple Silicon" + + # Intel Mac Detection + elif machine == 'x86_64': + hw_info["family"] = "x86_64" + try: + result = subprocess.run(['sysctl', '-n', 'machdep.cpu.brand_string'], + capture_output=True, text=True, timeout=5) + cpu_brand = result.stdout.strip() + hw_info["cpu"] = cpu_brand + + if CPU_DETECTION_AVAILABLE: + cpu_info = calculate_antiquity_multiplier(cpu_brand) + hw_info["arch"] = cpu_info.architecture + hw_info["cpu_vendor"] = cpu_info.vendor + hw_info["cpu_year"] = cpu_info.microarch_year + hw_info["cpu_generation"] = cpu_info.generation + hw_info["is_server"] = cpu_info.is_server + else: + cpu_lower = cpu_brand.lower() + if 'core 2' in cpu_lower or 'core(tm)2' in cpu_lower: + hw_info["arch"] = "core2" + elif 'xeon' in cpu_lower and ('e5-16' in cpu_lower or 'e5-26' in cpu_lower): + hw_info["arch"] = "ivy_bridge" + elif 'i7-3' in cpu_lower or 'i5-3' in cpu_lower or 'i3-3' in cpu_lower: + hw_info["arch"] = "ivy_bridge" + elif 'i7-2' in cpu_lower or 'i5-2' in cpu_lower or 'i3-2' in cpu_lower: + hw_info["arch"] = "sandy_bridge" + elif 'i7-9' in cpu_lower and '900' in cpu_lower: + hw_info["arch"] = "nehalem" + elif 'i7-4' in cpu_lower or 'i5-4' in cpu_lower: + hw_info["arch"] = "haswell" + elif 'pentium' in cpu_lower: + hw_info["arch"] = "pentium4" + else: + hw_info["arch"] = "modern" + except Exception: + hw_info["arch"] = "modern" + hw_info["cpu"] = "Intel Mac" + + # PowerPC Detection (for vintage Macs) + elif machine in ('ppc', 'ppc64', 'powerpc', 'powerpc64', 'Power Macintosh'): + hw_info["family"] = "PowerPC" + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + output = result.stdout.lower() + + if 'g5' in output or 'powermac11' in output: + hw_info["arch"] = "G5" + hw_info["cpu"] = "PowerPC G5" + elif 'g4' in output or 'powermac3' in output or 'powerbook' in output: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + elif 'g3' in output: + hw_info["arch"] = "G3" + hw_info["cpu"] = "PowerPC G3" + else: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC" + except Exception: + hw_info["arch"] = "G4" + hw_info["cpu"] = "PowerPC G4" + + # Get model name + try: + result = subprocess.run(['system_profiler', 'SPHardwareDataType'], + capture_output=True, text=True, timeout=10) + for line in result.stdout.split('\n'): + if 'Model Name' in line or 'Model Identifier' in line: + hw_info["model"] = line.split(':')[1].strip() + break + except Exception: + pass + + return hw_info + + +def collect_entropy(cycles=48, inner_loop=25000): + """Collect timing entropy for hardware attestation.""" + samples = [] + for _ in range(cycles): + start = time.perf_counter_ns() + acc = 0 + for j in range(inner_loop): + acc ^= (j * 31) & 0xFFFFFFFF + duration = time.perf_counter_ns() - start + samples.append(duration) + + mean_ns = sum(samples) / len(samples) + variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 + + return { + "mean_ns": mean_ns, + "variance_ns": variance_ns, + "min_ns": min(samples), + "max_ns": max(samples), + "sample_count": len(samples), + "samples_preview": samples[:12], + } + + +# ── Miner Class ───────────────────────────────────────────────────── + +class MacMiner: + def __init__(self, miner_id=None, wallet=None, node_url=None, proxy_url=None): + self.hw_info = detect_hardware() + self.fingerprint_data = {} + self.fingerprint_passed = False + + # Generate miner_id from hardware + if miner_id: + self.miner_id = miner_id + else: + hw_hash = hashlib.sha256( + "{}-{}".format( + self.hw_info['hostname'], + self.hw_info['serial'] or 'unknown' + ).encode() + ).hexdigest()[:8] + arch = self.hw_info['arch'].lower().replace(' ', '_') + self.miner_id = "{}-{}-{}".format(arch, self.hw_info['hostname'][:10], hw_hash) + + # Generate wallet address + if wallet: + self.wallet = wallet + else: + wallet_hash = hashlib.sha256( + "{}-rustchain".format(self.miner_id).encode() + ).hexdigest()[:38] + family = self.hw_info['family'].lower().replace(' ', '_') + self.wallet = "{}_{}RTC".format(family, wallet_hash) + + # Set up transport (HTTPS direct or HTTP proxy) + self.transport = NodeTransport( + node_url or NODE_URL, + proxy_url or PROXY_URL + ) + + self.attestation_valid_until = 0 + self.shares_submitted = 0 + self.shares_accepted = 0 + self.last_entropy = {} + self._last_system_time = time.monotonic() + + self._print_banner() + + # Run initial fingerprint check + if FINGERPRINT_AVAILABLE: + self._run_fingerprint_checks() + + def _run_fingerprint_checks(self): + """Run hardware fingerprint checks for RIP-PoA.""" + print(info("\n[FINGERPRINT] Running hardware fingerprint checks...")) + try: + passed, results = validate_all_checks() + self.fingerprint_passed = passed + self.fingerprint_data = {"checks": results, "all_passed": passed} + if passed: + print(success("[FINGERPRINT] All checks PASSED - eligible for full rewards")) + else: + failed = [k for k, v in results.items() if not v.get("passed")] + print(warning("[FINGERPRINT] FAILED checks: {}".format(failed))) + print(warning("[FINGERPRINT] WARNING: May receive reduced/zero rewards")) + except Exception as e: + print(error("[FINGERPRINT] Error running checks: {}".format(e))) + self.fingerprint_passed = False + self.fingerprint_data = {"error": str(e), "all_passed": False} + + def _print_banner(self): + print("=" * 70) + print("RustChain Mac Miner v{} - Serial Binding + Fingerprint".format(MINER_VERSION)) + print("=" * 70) + print("Miner ID: {}".format(self.miner_id)) + print("Wallet: {}".format(self.wallet)) + print("Transport: {}".format( + "PROXY ({})".format(self.transport.proxy_url) if self.transport.use_proxy + else "DIRECT ({})".format(self.transport.node_url) + )) + print("Serial: {}".format(self.hw_info.get('serial', 'N/A'))) + print("-" * 70) + print("Hardware: {} / {}".format(self.hw_info['family'], self.hw_info['arch'])) + print("Model: {}".format(self.hw_info['model'])) + print("CPU: {}".format(self.hw_info['cpu'])) + print("Cores: {}".format(self.hw_info['cores'])) + print("Memory: {} GB".format(self.hw_info['memory_gb'])) + print("-" * 70) + weight = self._get_expected_weight() + print("Expected Weight: {}x (Proof of Antiquity)".format(weight)) + print("=" * 70) + + def _get_expected_weight(self): + """Calculate expected PoA weight.""" + arch = self.hw_info['arch'].lower() + family = self.hw_info['family'].lower() + + if family == 'powerpc': + if arch == 'g3': return 3.0 + if arch == 'g4': return 2.5 + if arch == 'g5': return 2.0 + elif 'apple' in family or 'silicon' in family: + if arch in ('m1', 'm2', 'm3', 'm4', 'apple_silicon'): + return 1.2 + elif family == 'x86_64': + if arch == 'core2': return 1.5 + return 1.0 + + return 1.0 + + def _detect_sleep_wake(self): + """Detect if the machine slept (large time jump).""" + now = time.monotonic() + gap = now - self._last_system_time + self._last_system_time = now + # If more than 2x the check interval elapsed, we probably slept + if gap > LOTTERY_CHECK_INTERVAL * 3: + return True + return False + + def attest(self): + """Complete hardware attestation with fingerprint.""" + ts = datetime.now().strftime('%H:%M:%S') + print(info("\n[{}] Attesting hardware...".format(ts))) + + try: + resp = self.transport.post("/attest/challenge", json={}, timeout=15) + if resp.status_code != 200: + print(error(" ERROR: Challenge failed ({})".format(resp.status_code))) + return False + + challenge = resp.json() + nonce = challenge.get("nonce", "") + print(success(" Got challenge nonce: {}...".format(nonce[:16]))) + + except Exception as e: + print(error(" ERROR: Challenge error: {}".format(e))) + return False + + # Collect entropy + entropy = collect_entropy() + self.last_entropy = entropy + + # Re-run fingerprint checks if needed + if FINGERPRINT_AVAILABLE and not self.fingerprint_data: + self._run_fingerprint_checks() + + # Build attestation payload + commitment = hashlib.sha256( + (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() + ).hexdigest() + + attestation = { + "miner": self.wallet, + "miner_id": self.miner_id, + "nonce": nonce, + "report": { + "nonce": nonce, + "commitment": commitment, + "derived": entropy, + "entropy_score": entropy.get("variance_ns", 0.0) + }, + "device": { + "family": self.hw_info["family"], + "arch": self.hw_info["arch"], + "model": self.hw_info["model"], + "cpu": self.hw_info["cpu"], + "cores": self.hw_info["cores"], + "memory_gb": self.hw_info["memory_gb"], + "serial": self.hw_info.get("serial") + }, + "signals": { + "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), + "hostname": self.hw_info["hostname"] + }, + "fingerprint": self.fingerprint_data, + "miner_version": MINER_VERSION, + } + + try: + resp = self.transport.post("/attest/submit", json=attestation, timeout=30) + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.attestation_valid_until = time.time() + ATTESTATION_TTL + print(success(" SUCCESS: Attestation accepted!")) + if self.fingerprint_passed: + print(success(" Fingerprint: PASSED")) + else: + print(warning(" Fingerprint: FAILED (reduced rewards)")) + return True + else: + print(warning(" WARNING: {}".format(result))) + return False + else: + print(error(" ERROR: HTTP {}: {}".format(resp.status_code, resp.text[:200]))) + return False + + except Exception as e: + print(error(" ERROR: {}".format(e))) + return False + + def check_eligibility(self): + """Check lottery eligibility.""" + try: + resp = self.transport.get( + "/lottery/eligibility", + params={"miner_id": self.miner_id}, + timeout=10, + ) + if resp.status_code == 200: + return resp.json() + return {"eligible": False, "reason": "HTTP {}".format(resp.status_code)} + except Exception as e: + return {"eligible": False, "reason": str(e)} + + def submit_header(self, slot): + """Submit header for slot.""" + try: + message = "slot:{}:miner:{}:ts:{}".format(slot, self.miner_id, int(time.time())) + message_hex = message.encode().hex() + sig_data = hashlib.sha512( + "{}{}".format(message, self.wallet).encode() + ).hexdigest() + + header_payload = { + "miner_id": self.miner_id, + "header": { + "slot": slot, + "miner": self.miner_id, + "timestamp": int(time.time()) + }, + "message": message_hex, + "signature": sig_data, + "pubkey": self.wallet + } + + resp = self.transport.post( + "/headers/ingest_signed", + json=header_payload, + timeout=15, + ) + + self.shares_submitted += 1 + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.shares_accepted += 1 + return True, result + return False, result + return False, {"error": "HTTP {}".format(resp.status_code)} + + except Exception as e: + return False, {"error": str(e)} + + def run(self): + """Main mining loop with sleep-wake detection.""" + ts = datetime.now().strftime('%H:%M:%S') + print("\n[{}] Starting miner...".format(ts)) + + # Initial attestation + while not self.attest(): + print(" Retrying attestation in 30 seconds...") + time.sleep(30) + + last_slot = 0 + status_counter = 0 + + while True: + try: + # Detect sleep/wake — force re-attest + if self._detect_sleep_wake(): + ts = datetime.now().strftime('%H:%M:%S') + print("\n[{}] Sleep/wake detected - re-attesting...".format(ts)) + self.attestation_valid_until = 0 + + # Re-attest if expired + if time.time() > self.attestation_valid_until: + self.attest() + + # Check eligibility + eligibility = self.check_eligibility() + slot = eligibility.get("slot", 0) + + if eligibility.get("eligible"): + ts = datetime.now().strftime('%H:%M:%S') + print("\n[{}] ELIGIBLE for slot {}!".format(ts, slot)) + + if slot != last_slot: + ok, result = self.submit_header(slot) + if ok: + print(" Header ACCEPTED! Slot {}".format(slot)) + else: + print(" Header rejected: {}".format(result)) + last_slot = slot + else: + reason = eligibility.get("reason", "unknown") + if reason == "not_attested": + ts = datetime.now().strftime('%H:%M:%S') + print("[{}] Not attested - re-attesting...".format(ts)) + self.attest() + + # Status every ~60 seconds + status_counter += 1 + if status_counter >= (60 // LOTTERY_CHECK_INTERVAL): + ts = datetime.now().strftime('%H:%M:%S') + print("[{}] Slot {} | Submitted: {} | Accepted: {}".format( + ts, slot, self.shares_submitted, self.shares_accepted + )) + status_counter = 0 + + time.sleep(LOTTERY_CHECK_INTERVAL) + + except KeyboardInterrupt: + print("\n\nShutting down miner...") + break + except Exception as e: + ts = datetime.now().strftime('%H:%M:%S') + print("[{}] Error: {}".format(ts, e)) + time.sleep(30) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="RustChain Mac Miner v{}".format(MINER_VERSION)) + parser.add_argument("--version", "-v", action="version", + version="rustchain-mac-miner {}".format(MINER_VERSION)) + parser.add_argument("--miner-id", "-m", help="Custom miner ID") + parser.add_argument("--wallet", "-w", help="Custom wallet address") + parser.add_argument("--node", "-n", default=NODE_URL, help="Node URL (default: {})".format(NODE_URL)) + parser.add_argument("--proxy", "-p", default=PROXY_URL, + help="HTTP proxy URL for legacy Macs (default: {})".format(PROXY_URL)) + parser.add_argument("--no-proxy", action="store_true", + help="Disable proxy fallback (HTTPS only)") + args = parser.parse_args() + + node = args.node + proxy = None if args.no_proxy else args.proxy + + miner = MacMiner( + miner_id=args.miner_id, + wallet=args.wallet, + node_url=node, + proxy_url=proxy, + ) + miner.run() diff --git a/miners/power8/fingerprint_checks_power8.py b/miners/power8/fingerprint_checks_power8.py index 6b0abcbd..de643f5c 100644 --- a/miners/power8/fingerprint_checks_power8.py +++ b/miners/power8/fingerprint_checks_power8.py @@ -1,499 +1,499 @@ -#!/usr/bin/env python3 -""" -RIP-PoA Hardware Fingerprint Validation - POWER8 Optimized -=========================================================== -7 Required Checks for RTC Reward Approval -ALL MUST PASS for antiquity multiplier rewards - -POWER8 Modifications: -- Larger buffer sizes for cache timing (POWER8 has huge caches) -- Random access patterns to defeat aggressive prefetching -- Adjusted thresholds for server-class CPUs -""" - -import hashlib -import os -import platform -import random -import statistics -import subprocess -import time -from typing import Dict, List, Optional, Tuple - - -def check_clock_drift(samples: int = 200) -> Tuple[bool, Dict]: - """Check 1: Clock-Skew & Oscillator Drift""" - intervals = [] - reference_ops = 5000 - - for i in range(samples): - data = "drift_{}".format(i).encode() - start = time.perf_counter_ns() - for _ in range(reference_ops): - hashlib.sha256(data).digest() - elapsed = time.perf_counter_ns() - start - intervals.append(elapsed) - if i % 50 == 0: - time.sleep(0.001) - - mean_ns = statistics.mean(intervals) - stdev_ns = statistics.stdev(intervals) - cv = stdev_ns / mean_ns if mean_ns > 0 else 0 - - drift_pairs = [intervals[i] - intervals[i-1] for i in range(1, len(intervals))] - drift_stdev = statistics.stdev(drift_pairs) if len(drift_pairs) > 1 else 0 - - data = { - "mean_ns": int(mean_ns), - "stdev_ns": int(stdev_ns), - "cv": round(cv, 6), - "drift_stdev": int(drift_stdev), - } - - valid = True - if cv < 0.0001: - valid = False - data["fail_reason"] = "synthetic_timing" - elif drift_stdev == 0: - valid = False - data["fail_reason"] = "no_drift" - - return valid, data - - -def check_cache_timing_power8(iterations: int = 50) -> Tuple[bool, Dict]: - """ - Check 2: Cache Timing Fingerprint - POWER8 Optimized - - POWER8 S824 cache sizes: - - L1: 32KB per core (instruction) + 64KB per core (data) - - L2: 512KB per core - - L3: 8MB per core pair (shared) - - L4 (off-chip eDRAM): 128MB per chip (optional) - - Uses random access pattern to defeat POWER8's aggressive prefetching. - """ - # Much larger buffers for POWER8's huge caches - l1_size = 32 * 1024 # 32KB - fits in L1 - l2_size = 1 * 1024 * 1024 # 1MB - exceeds L1, fits L2 - l3_size = 16 * 1024 * 1024 # 16MB - exceeds L2, hits L3 - - def measure_random_access_time(buffer_size: int, accesses: int = 2000) -> float: - """Random access defeats prefetching, reveals true cache latency""" - buf = bytearray(buffer_size) - # Initialize - for i in range(0, buffer_size, 64): - buf[i] = i % 256 - - # Generate random indices ahead of time - indices = [random.randint(0, buffer_size - 1) for _ in range(accesses)] - - # Measure random access - start = time.perf_counter_ns() - acc = 0 - for idx in indices: - acc ^= buf[idx] - elapsed = time.perf_counter_ns() - start - return elapsed / accesses, acc - - l1_times = [] - l2_times = [] - l3_times = [] - - for _ in range(iterations): - t1, _ = measure_random_access_time(l1_size) - l1_times.append(t1) - t2, _ = measure_random_access_time(l2_size) - l2_times.append(t2) - t3, _ = measure_random_access_time(l3_size) - l3_times.append(t3) - - l1_avg = statistics.mean(l1_times) - l2_avg = statistics.mean(l2_times) - l3_avg = statistics.mean(l3_times) - - l1_stdev = statistics.stdev(l1_times) if len(l1_times) > 1 else 0 - l2_stdev = statistics.stdev(l2_times) if len(l2_times) > 1 else 0 - l3_stdev = statistics.stdev(l3_times) if len(l3_times) > 1 else 0 - - l2_l1_ratio = l2_avg / l1_avg if l1_avg > 0 else 0 - l3_l2_ratio = l3_avg / l2_avg if l2_avg > 0 else 0 - - data = { - "l1_ns": round(l1_avg, 2), - "l2_ns": round(l2_avg, 2), - "l3_ns": round(l3_avg, 2), - "l1_stdev": round(l1_stdev, 2), - "l2_stdev": round(l2_stdev, 2), - "l3_stdev": round(l3_stdev, 2), - "l2_l1_ratio": round(l2_l1_ratio, 3), - "l3_l2_ratio": round(l3_l2_ratio, 3), - "platform": "power8_optimized", - } - - valid = True - # For POWER8, any positive variance indicates real cache hierarchy - # VMs/emulators have flat latency profiles - total_variance = l1_stdev + l2_stdev + l3_stdev - if total_variance < 1.0: # No variance at all = synthetic - valid = False - data["fail_reason"] = "no_timing_variance" - elif l1_avg == 0 or l2_avg == 0 or l3_avg == 0: - valid = False - data["fail_reason"] = "zero_latency" - # POWER8's excellent prefetching might show small ratios, but should still have variance - elif l2_l1_ratio < 0.95 and l3_l2_ratio < 0.95 and total_variance < 5.0: - valid = False - data["fail_reason"] = "no_cache_hierarchy" - - return valid, data - - -def check_simd_identity() -> Tuple[bool, Dict]: - """Check 3: SIMD Unit Identity (SSE/AVX/AltiVec/NEON/VSX)""" - flags = [] - arch = platform.machine().lower() - - try: - with open("/proc/cpuinfo", "r") as f: - for line in f: - if "flags" in line.lower() or "features" in line.lower(): - parts = line.split(":") - if len(parts) > 1: - flags = parts[1].strip().split() - break - except: - pass - - # POWER8-specific: check for VSX/AltiVec - if not flags and ("ppc" in arch or "power" in arch): - try: - result = subprocess.run( - ["grep", "-i", "vsx\|altivec\|dfp", "/proc/cpuinfo"], - capture_output=True, text=True, timeout=5 - ) - if result.stdout: - flags = ["vsx", "altivec", "dfp", "power8"] - except: - # For POWER8, these are always present - flags = ["vsx", "altivec", "dfp", "power8"] - - has_sse = any("sse" in f.lower() for f in flags) - has_avx = any("avx" in f.lower() for f in flags) - has_altivec = any("altivec" in f.lower() for f in flags) or "ppc" in arch - has_vsx = any("vsx" in f.lower() for f in flags) or "power" in arch - has_neon = any("neon" in f.lower() for f in flags) or "arm" in arch - - data = { - "arch": arch, - "simd_flags_count": len(flags), - "has_sse": has_sse, - "has_avx": has_avx, - "has_altivec": has_altivec, - "has_vsx": has_vsx, - "has_neon": has_neon, - "sample_flags": flags[:10] if flags else [], - } - - # POWER8 always has AltiVec and VSX - valid = has_sse or has_avx or has_altivec or has_vsx or has_neon or len(flags) > 0 - if not valid: - data["fail_reason"] = "no_simd_detected" - - return valid, data - - -def check_thermal_drift(samples: int = 50) -> Tuple[bool, Dict]: - """Check 4: Thermal Drift Entropy""" - cold_times = [] - for i in range(samples): - start = time.perf_counter_ns() - for _ in range(10000): - hashlib.sha256("cold_{}".format(i).encode()).digest() - cold_times.append(time.perf_counter_ns() - start) - - # Warm up the CPU - for _ in range(100): - for __ in range(50000): - hashlib.sha256(b"warmup").digest() - - hot_times = [] - for i in range(samples): - start = time.perf_counter_ns() - for _ in range(10000): - hashlib.sha256("hot_{}".format(i).encode()).digest() - hot_times.append(time.perf_counter_ns() - start) - - cold_avg = statistics.mean(cold_times) - hot_avg = statistics.mean(hot_times) - cold_stdev = statistics.stdev(cold_times) - hot_stdev = statistics.stdev(hot_times) - drift_ratio = hot_avg / cold_avg if cold_avg > 0 else 0 - - data = { - "cold_avg_ns": int(cold_avg), - "hot_avg_ns": int(hot_avg), - "cold_stdev": int(cold_stdev), - "hot_stdev": int(hot_stdev), - "drift_ratio": round(drift_ratio, 4), - } - - valid = True - if cold_stdev == 0 and hot_stdev == 0: - valid = False - data["fail_reason"] = "no_thermal_variance" - - return valid, data - - -def check_instruction_jitter(samples: int = 100) -> Tuple[bool, Dict]: - """Check 5: Instruction Path Jitter""" - def measure_int_ops(count: int = 10000) -> float: - start = time.perf_counter_ns() - x = 1 - for i in range(count): - x = (x * 7 + 13) % 65537 - return time.perf_counter_ns() - start - - def measure_fp_ops(count: int = 10000) -> float: - start = time.perf_counter_ns() - x = 1.5 - for i in range(count): - x = (x * 1.414 + 0.5) % 1000.0 - return time.perf_counter_ns() - start - - def measure_branch_ops(count: int = 10000) -> float: - start = time.perf_counter_ns() - x = 0 - for i in range(count): - if i % 2 == 0: - x += 1 - else: - x -= 1 - return time.perf_counter_ns() - start - - int_times = [measure_int_ops() for _ in range(samples)] - fp_times = [measure_fp_ops() for _ in range(samples)] - branch_times = [measure_branch_ops() for _ in range(samples)] - - int_avg = statistics.mean(int_times) - fp_avg = statistics.mean(fp_times) - branch_avg = statistics.mean(branch_times) - - int_stdev = statistics.stdev(int_times) - fp_stdev = statistics.stdev(fp_times) - branch_stdev = statistics.stdev(branch_times) - - data = { - "int_avg_ns": int(int_avg), - "fp_avg_ns": int(fp_avg), - "branch_avg_ns": int(branch_avg), - "int_stdev": int(int_stdev), - "fp_stdev": int(fp_stdev), - "branch_stdev": int(branch_stdev), - } - - valid = True - if int_stdev == 0 and fp_stdev == 0 and branch_stdev == 0: - valid = False - data["fail_reason"] = "no_jitter" - - return valid, data - - -def check_anti_emulation() -> Tuple[bool, Dict]: - """Check 6: Anti-Emulation Behavioral Checks - - Detects: - - x86 hypervisors (VMware, VirtualBox, KVM, QEMU, Xen, Hyper-V) - - IBM LPAR/PowerVM (POWER systems virtualization) - - Container environments (Docker, Kubernetes) - - For POWER systems: - - LPAR = virtualized (blocked) - even if full-system LPAR - - PowerNV/Petitboot = bare metal (allowed) - """ - vm_indicators = [] - - # x86 VM paths - vm_paths = [ - "/sys/class/dmi/id/product_name", - "/sys/class/dmi/id/sys_vendor", - "/proc/scsi/scsi", - ] - - vm_strings = ["vmware", "virtualbox", "kvm", "qemu", "xen", "hyperv", "parallels"] - - for path in vm_paths: - try: - with open(path, "r") as f: - content = f.read().lower() - for vm in vm_strings: - if vm in content: - vm_indicators.append("{}:{}".format(path, vm)) - except: - pass - - for key in ["KUBERNETES", "DOCKER", "VIRTUAL", "container"]: - if key in os.environ: - vm_indicators.append("ENV:{}".format(key)) - - try: - with open("/proc/cpuinfo", "r") as f: - if "hypervisor" in f.read().lower(): - vm_indicators.append("cpuinfo:hypervisor") - except: - pass - - # === IBM POWER LPAR Detection === - # LPAR = Logical Partition under PowerVM hypervisor (virtualized) - # PowerNV/Petitboot = OPAL firmware, bare metal (not virtualized) - arch = platform.machine().lower() - if "ppc64" in arch or "powerpc" in arch: - # Check for LPAR config (exists only under PowerVM hypervisor) - if os.path.exists("/proc/ppc64/lparcfg"): - vm_indicators.append("power:lpar_detected") - # Read LPAR details for logging - try: - with open("/proc/ppc64/lparcfg", "r") as f: - for line in f: - if line.startswith("partition_id="): - vm_indicators.append("power:lpar_partition_id=" + line.split("=")[1].strip()) - elif line.startswith("NumLpars="): - vm_indicators.append("power:num_lpars=" + line.split("=")[1].strip()) - except: - pass - - # Check for partition name (another LPAR indicator) - if os.path.exists("/proc/device-tree/ibm,partition-name"): - try: - with open("/proc/device-tree/ibm,partition-name", "rb") as f: - partition_name = f.read().decode().strip().rstrip('\x00') - if partition_name: - vm_indicators.append("power:partition_name=" + partition_name) - except: - pass - - # PowerNV (bare metal) detection - this is the ALLOWED mode - # PowerNV systems don't have lparcfg - is_powernv = not os.path.exists("/proc/ppc64/lparcfg") - if is_powernv: - # Double-check with dmesg for OPAL - try: - result = subprocess.run( - ["dmesg"], - capture_output=True, text=True, timeout=5 - ) - if "OPAL" in result.stdout or "powernv" in result.stdout.lower(): - # This is bare metal PowerNV - NOT a VM indicator - pass # Don't add to vm_indicators - except: - pass - - data = { - "vm_indicators": vm_indicators, - "indicator_count": len(vm_indicators), - "is_likely_vm": len(vm_indicators) > 0, - "arch": arch, - } - - valid = len(vm_indicators) == 0 - if not valid: - data["fail_reason"] = "vm_detected" - - return valid, data - - -def check_power8_hardware() -> Tuple[bool, Dict]: - """Check 7: POWER8 Hardware Verification""" - arch = platform.machine().lower() - - data = { - "arch": arch, - "is_power8": False, - "cpu_model": "", - "smt_threads": 0, - } - - # Check if actually POWER8 - if "ppc64" not in arch and "powerpc" not in arch: - data["fail_reason"] = "not_powerpc" - return True, data # Pass for non-PPC (they'll use other checks) - - # Get CPU info - try: - with open("/proc/cpuinfo", "r") as f: - content = f.read() - if "POWER8" in content: - data["is_power8"] = True - # Extract CPU model - for line in content.split("\n"): - if line.startswith("cpu"): - data["cpu_model"] = line.split(":")[-1].strip() - break - except: - pass - - # Check SMT threads (POWER8 has SMT8 = 128 threads for 16 cores) - try: - result = subprocess.run(["nproc"], capture_output=True, text=True, timeout=5) - data["smt_threads"] = int(result.stdout.strip()) - except: - pass - - # POWER8 S824 should have 128 threads (16 cores x 8 SMT) - valid = True - if data["is_power8"] and data["smt_threads"] < 64: - # If claiming POWER8 but not enough threads, suspicious - valid = False - data["fail_reason"] = "insufficient_threads_for_power8" - - return valid, data - - -def validate_all_checks(include_rom_check: bool = False) -> Tuple[bool, Dict]: - """Run all fingerprint checks - POWER8 optimized version.""" - results = {} - all_passed = True - - checks = [ - ("clock_drift", "Clock-Skew & Oscillator Drift", check_clock_drift), - ("cache_timing", "Cache Timing Fingerprint (POWER8)", check_cache_timing_power8), - ("simd_identity", "SIMD Unit Identity", check_simd_identity), - ("thermal_drift", "Thermal Drift Entropy", check_thermal_drift), - ("instruction_jitter", "Instruction Path Jitter", check_instruction_jitter), - ("anti_emulation", "Anti-Emulation Checks", check_anti_emulation), - ("power8_verify", "POWER8 Hardware Verification", check_power8_hardware), - ] - - print(f"Running {len(checks)} Hardware Fingerprint Checks (POWER8 Optimized)...") - print("=" * 50) - - total_checks = len(checks) - for i, (key, name, func) in enumerate(checks, 1): - print(f"\n[{i}/{total_checks}] {name}...") - try: - passed, data = func() - except Exception as e: - passed = False - data = {"error": str(e)} - results[key] = {"passed": passed, "data": data} - if not passed: - all_passed = False - print(" Result: {}".format("PASS" if passed else "FAIL")) - - print("\n" + "=" * 50) - print("OVERALL RESULT: {}".format("ALL CHECKS PASSED" if all_passed else "FAILED")) - - if not all_passed: - failed = [k for k, v in results.items() if not v["passed"]] - print("Failed checks: {}".format(failed)) - - return all_passed, results - - -if __name__ == "__main__": - import json - passed, results = validate_all_checks() - print("\n\nDetailed Results:") - print(json.dumps(results, indent=2, default=str)) +#!/usr/bin/env python3 +""" +RIP-PoA Hardware Fingerprint Validation - POWER8 Optimized +=========================================================== +7 Required Checks for RTC Reward Approval +ALL MUST PASS for antiquity multiplier rewards + +POWER8 Modifications: +- Larger buffer sizes for cache timing (POWER8 has huge caches) +- Random access patterns to defeat aggressive prefetching +- Adjusted thresholds for server-class CPUs +""" + +import hashlib +import os +import platform +import random +import statistics +import subprocess +import time +from typing import Dict, List, Optional, Tuple + + +def check_clock_drift(samples: int = 200) -> Tuple[bool, Dict]: + """Check 1: Clock-Skew & Oscillator Drift""" + intervals = [] + reference_ops = 5000 + + for i in range(samples): + data = "drift_{}".format(i).encode() + start = time.perf_counter_ns() + for _ in range(reference_ops): + hashlib.sha256(data).digest() + elapsed = time.perf_counter_ns() - start + intervals.append(elapsed) + if i % 50 == 0: + time.sleep(0.001) + + mean_ns = statistics.mean(intervals) + stdev_ns = statistics.stdev(intervals) + cv = stdev_ns / mean_ns if mean_ns > 0 else 0 + + drift_pairs = [intervals[i] - intervals[i-1] for i in range(1, len(intervals))] + drift_stdev = statistics.stdev(drift_pairs) if len(drift_pairs) > 1 else 0 + + data = { + "mean_ns": int(mean_ns), + "stdev_ns": int(stdev_ns), + "cv": round(cv, 6), + "drift_stdev": int(drift_stdev), + } + + valid = True + if cv < 0.0001: + valid = False + data["fail_reason"] = "synthetic_timing" + elif drift_stdev == 0: + valid = False + data["fail_reason"] = "no_drift" + + return valid, data + + +def check_cache_timing_power8(iterations: int = 50) -> Tuple[bool, Dict]: + """ + Check 2: Cache Timing Fingerprint - POWER8 Optimized + + POWER8 S824 cache sizes: + - L1: 32KB per core (instruction) + 64KB per core (data) + - L2: 512KB per core + - L3: 8MB per core pair (shared) + - L4 (off-chip eDRAM): 128MB per chip (optional) + + Uses random access pattern to defeat POWER8's aggressive prefetching. + """ + # Much larger buffers for POWER8's huge caches + l1_size = 32 * 1024 # 32KB - fits in L1 + l2_size = 1 * 1024 * 1024 # 1MB - exceeds L1, fits L2 + l3_size = 16 * 1024 * 1024 # 16MB - exceeds L2, hits L3 + + def measure_random_access_time(buffer_size: int, accesses: int = 2000) -> float: + """Random access defeats prefetching, reveals true cache latency""" + buf = bytearray(buffer_size) + # Initialize + for i in range(0, buffer_size, 64): + buf[i] = i % 256 + + # Generate random indices ahead of time + indices = [random.randint(0, buffer_size - 1) for _ in range(accesses)] + + # Measure random access + start = time.perf_counter_ns() + acc = 0 + for idx in indices: + acc ^= buf[idx] + elapsed = time.perf_counter_ns() - start + return elapsed / accesses, acc + + l1_times = [] + l2_times = [] + l3_times = [] + + for _ in range(iterations): + t1, _ = measure_random_access_time(l1_size) + l1_times.append(t1) + t2, _ = measure_random_access_time(l2_size) + l2_times.append(t2) + t3, _ = measure_random_access_time(l3_size) + l3_times.append(t3) + + l1_avg = statistics.mean(l1_times) + l2_avg = statistics.mean(l2_times) + l3_avg = statistics.mean(l3_times) + + l1_stdev = statistics.stdev(l1_times) if len(l1_times) > 1 else 0 + l2_stdev = statistics.stdev(l2_times) if len(l2_times) > 1 else 0 + l3_stdev = statistics.stdev(l3_times) if len(l3_times) > 1 else 0 + + l2_l1_ratio = l2_avg / l1_avg if l1_avg > 0 else 0 + l3_l2_ratio = l3_avg / l2_avg if l2_avg > 0 else 0 + + data = { + "l1_ns": round(l1_avg, 2), + "l2_ns": round(l2_avg, 2), + "l3_ns": round(l3_avg, 2), + "l1_stdev": round(l1_stdev, 2), + "l2_stdev": round(l2_stdev, 2), + "l3_stdev": round(l3_stdev, 2), + "l2_l1_ratio": round(l2_l1_ratio, 3), + "l3_l2_ratio": round(l3_l2_ratio, 3), + "platform": "power8_optimized", + } + + valid = True + # For POWER8, any positive variance indicates real cache hierarchy + # VMs/emulators have flat latency profiles + total_variance = l1_stdev + l2_stdev + l3_stdev + if total_variance < 1.0: # No variance at all = synthetic + valid = False + data["fail_reason"] = "no_timing_variance" + elif l1_avg == 0 or l2_avg == 0 or l3_avg == 0: + valid = False + data["fail_reason"] = "zero_latency" + # POWER8's excellent prefetching might show small ratios, but should still have variance + elif l2_l1_ratio < 0.95 and l3_l2_ratio < 0.95 and total_variance < 5.0: + valid = False + data["fail_reason"] = "no_cache_hierarchy" + + return valid, data + + +def check_simd_identity() -> Tuple[bool, Dict]: + """Check 3: SIMD Unit Identity (SSE/AVX/AltiVec/NEON/VSX)""" + flags = [] + arch = platform.machine().lower() + + try: + with open("/proc/cpuinfo", "r") as f: + for line in f: + if "flags" in line.lower() or "features" in line.lower(): + parts = line.split(":") + if len(parts) > 1: + flags = parts[1].strip().split() + break + except: + pass + + # POWER8-specific: check for VSX/AltiVec + if not flags and ("ppc" in arch or "power" in arch): + try: + result = subprocess.run( + ["grep", "-i", "vsx\|altivec\|dfp", "/proc/cpuinfo"], + capture_output=True, text=True, timeout=5 + ) + if result.stdout: + flags = ["vsx", "altivec", "dfp", "power8"] + except: + # For POWER8, these are always present + flags = ["vsx", "altivec", "dfp", "power8"] + + has_sse = any("sse" in f.lower() for f in flags) + has_avx = any("avx" in f.lower() for f in flags) + has_altivec = any("altivec" in f.lower() for f in flags) or "ppc" in arch + has_vsx = any("vsx" in f.lower() for f in flags) or "power" in arch + has_neon = any("neon" in f.lower() for f in flags) or "arm" in arch + + data = { + "arch": arch, + "simd_flags_count": len(flags), + "has_sse": has_sse, + "has_avx": has_avx, + "has_altivec": has_altivec, + "has_vsx": has_vsx, + "has_neon": has_neon, + "sample_flags": flags[:10] if flags else [], + } + + # POWER8 always has AltiVec and VSX + valid = has_sse or has_avx or has_altivec or has_vsx or has_neon or len(flags) > 0 + if not valid: + data["fail_reason"] = "no_simd_detected" + + return valid, data + + +def check_thermal_drift(samples: int = 50) -> Tuple[bool, Dict]: + """Check 4: Thermal Drift Entropy""" + cold_times = [] + for i in range(samples): + start = time.perf_counter_ns() + for _ in range(10000): + hashlib.sha256("cold_{}".format(i).encode()).digest() + cold_times.append(time.perf_counter_ns() - start) + + # Warm up the CPU + for _ in range(100): + for __ in range(50000): + hashlib.sha256(b"warmup").digest() + + hot_times = [] + for i in range(samples): + start = time.perf_counter_ns() + for _ in range(10000): + hashlib.sha256("hot_{}".format(i).encode()).digest() + hot_times.append(time.perf_counter_ns() - start) + + cold_avg = statistics.mean(cold_times) + hot_avg = statistics.mean(hot_times) + cold_stdev = statistics.stdev(cold_times) + hot_stdev = statistics.stdev(hot_times) + drift_ratio = hot_avg / cold_avg if cold_avg > 0 else 0 + + data = { + "cold_avg_ns": int(cold_avg), + "hot_avg_ns": int(hot_avg), + "cold_stdev": int(cold_stdev), + "hot_stdev": int(hot_stdev), + "drift_ratio": round(drift_ratio, 4), + } + + valid = True + if cold_stdev == 0 and hot_stdev == 0: + valid = False + data["fail_reason"] = "no_thermal_variance" + + return valid, data + + +def check_instruction_jitter(samples: int = 100) -> Tuple[bool, Dict]: + """Check 5: Instruction Path Jitter""" + def measure_int_ops(count: int = 10000) -> float: + start = time.perf_counter_ns() + x = 1 + for i in range(count): + x = (x * 7 + 13) % 65537 + return time.perf_counter_ns() - start + + def measure_fp_ops(count: int = 10000) -> float: + start = time.perf_counter_ns() + x = 1.5 + for i in range(count): + x = (x * 1.414 + 0.5) % 1000.0 + return time.perf_counter_ns() - start + + def measure_branch_ops(count: int = 10000) -> float: + start = time.perf_counter_ns() + x = 0 + for i in range(count): + if i % 2 == 0: + x += 1 + else: + x -= 1 + return time.perf_counter_ns() - start + + int_times = [measure_int_ops() for _ in range(samples)] + fp_times = [measure_fp_ops() for _ in range(samples)] + branch_times = [measure_branch_ops() for _ in range(samples)] + + int_avg = statistics.mean(int_times) + fp_avg = statistics.mean(fp_times) + branch_avg = statistics.mean(branch_times) + + int_stdev = statistics.stdev(int_times) + fp_stdev = statistics.stdev(fp_times) + branch_stdev = statistics.stdev(branch_times) + + data = { + "int_avg_ns": int(int_avg), + "fp_avg_ns": int(fp_avg), + "branch_avg_ns": int(branch_avg), + "int_stdev": int(int_stdev), + "fp_stdev": int(fp_stdev), + "branch_stdev": int(branch_stdev), + } + + valid = True + if int_stdev == 0 and fp_stdev == 0 and branch_stdev == 0: + valid = False + data["fail_reason"] = "no_jitter" + + return valid, data + + +def check_anti_emulation() -> Tuple[bool, Dict]: + """Check 6: Anti-Emulation Behavioral Checks + + Detects: + - x86 hypervisors (VMware, VirtualBox, KVM, QEMU, Xen, Hyper-V) + - IBM LPAR/PowerVM (POWER systems virtualization) + - Container environments (Docker, Kubernetes) + + For POWER systems: + - LPAR = virtualized (blocked) - even if full-system LPAR + - PowerNV/Petitboot = bare metal (allowed) + """ + vm_indicators = [] + + # x86 VM paths + vm_paths = [ + "/sys/class/dmi/id/product_name", + "/sys/class/dmi/id/sys_vendor", + "/proc/scsi/scsi", + ] + + vm_strings = ["vmware", "virtualbox", "kvm", "qemu", "xen", "hyperv", "parallels"] + + for path in vm_paths: + try: + with open(path, "r") as f: + content = f.read().lower() + for vm in vm_strings: + if vm in content: + vm_indicators.append("{}:{}".format(path, vm)) + except: + pass + + for key in ["KUBERNETES", "DOCKER", "VIRTUAL", "container"]: + if key in os.environ: + vm_indicators.append("ENV:{}".format(key)) + + try: + with open("/proc/cpuinfo", "r") as f: + if "hypervisor" in f.read().lower(): + vm_indicators.append("cpuinfo:hypervisor") + except: + pass + + # === IBM POWER LPAR Detection === + # LPAR = Logical Partition under PowerVM hypervisor (virtualized) + # PowerNV/Petitboot = OPAL firmware, bare metal (not virtualized) + arch = platform.machine().lower() + if "ppc64" in arch or "powerpc" in arch: + # Check for LPAR config (exists only under PowerVM hypervisor) + if os.path.exists("/proc/ppc64/lparcfg"): + vm_indicators.append("power:lpar_detected") + # Read LPAR details for logging + try: + with open("/proc/ppc64/lparcfg", "r") as f: + for line in f: + if line.startswith("partition_id="): + vm_indicators.append("power:lpar_partition_id=" + line.split("=")[1].strip()) + elif line.startswith("NumLpars="): + vm_indicators.append("power:num_lpars=" + line.split("=")[1].strip()) + except: + pass + + # Check for partition name (another LPAR indicator) + if os.path.exists("/proc/device-tree/ibm,partition-name"): + try: + with open("/proc/device-tree/ibm,partition-name", "rb") as f: + partition_name = f.read().decode().strip().rstrip('\x00') + if partition_name: + vm_indicators.append("power:partition_name=" + partition_name) + except: + pass + + # PowerNV (bare metal) detection - this is the ALLOWED mode + # PowerNV systems don't have lparcfg + is_powernv = not os.path.exists("/proc/ppc64/lparcfg") + if is_powernv: + # Double-check with dmesg for OPAL + try: + result = subprocess.run( + ["dmesg"], + capture_output=True, text=True, timeout=5 + ) + if "OPAL" in result.stdout or "powernv" in result.stdout.lower(): + # This is bare metal PowerNV - NOT a VM indicator + pass # Don't add to vm_indicators + except: + pass + + data = { + "vm_indicators": vm_indicators, + "indicator_count": len(vm_indicators), + "is_likely_vm": len(vm_indicators) > 0, + "arch": arch, + } + + valid = len(vm_indicators) == 0 + if not valid: + data["fail_reason"] = "vm_detected" + + return valid, data + + +def check_power8_hardware() -> Tuple[bool, Dict]: + """Check 7: POWER8 Hardware Verification""" + arch = platform.machine().lower() + + data = { + "arch": arch, + "is_power8": False, + "cpu_model": "", + "smt_threads": 0, + } + + # Check if actually POWER8 + if "ppc64" not in arch and "powerpc" not in arch: + data["fail_reason"] = "not_powerpc" + return True, data # Pass for non-PPC (they'll use other checks) + + # Get CPU info + try: + with open("/proc/cpuinfo", "r") as f: + content = f.read() + if "POWER8" in content: + data["is_power8"] = True + # Extract CPU model + for line in content.split("\n"): + if line.startswith("cpu"): + data["cpu_model"] = line.split(":")[-1].strip() + break + except: + pass + + # Check SMT threads (POWER8 has SMT8 = 128 threads for 16 cores) + try: + result = subprocess.run(["nproc"], capture_output=True, text=True, timeout=5) + data["smt_threads"] = int(result.stdout.strip()) + except: + pass + + # POWER8 S824 should have 128 threads (16 cores x 8 SMT) + valid = True + if data["is_power8"] and data["smt_threads"] < 64: + # If claiming POWER8 but not enough threads, suspicious + valid = False + data["fail_reason"] = "insufficient_threads_for_power8" + + return valid, data + + +def validate_all_checks(include_rom_check: bool = False) -> Tuple[bool, Dict]: + """Run all fingerprint checks - POWER8 optimized version.""" + results = {} + all_passed = True + + checks = [ + ("clock_drift", "Clock-Skew & Oscillator Drift", check_clock_drift), + ("cache_timing", "Cache Timing Fingerprint (POWER8)", check_cache_timing_power8), + ("simd_identity", "SIMD Unit Identity", check_simd_identity), + ("thermal_drift", "Thermal Drift Entropy", check_thermal_drift), + ("instruction_jitter", "Instruction Path Jitter", check_instruction_jitter), + ("anti_emulation", "Anti-Emulation Checks", check_anti_emulation), + ("power8_verify", "POWER8 Hardware Verification", check_power8_hardware), + ] + + print(f"Running {len(checks)} Hardware Fingerprint Checks (POWER8 Optimized)...") + print("=" * 50) + + total_checks = len(checks) + for i, (key, name, func) in enumerate(checks, 1): + print(f"\n[{i}/{total_checks}] {name}...") + try: + passed, data = func() + except Exception as e: + passed = False + data = {"error": str(e)} + results[key] = {"passed": passed, "data": data} + if not passed: + all_passed = False + print(" Result: {}".format("PASS" if passed else "FAIL")) + + print("\n" + "=" * 50) + print("OVERALL RESULT: {}".format("ALL CHECKS PASSED" if all_passed else "FAILED")) + + if not all_passed: + failed = [k for k, v in results.items() if not v["passed"]] + print("Failed checks: {}".format(failed)) + + return all_passed, results + + +if __name__ == "__main__": + import json + passed, results = validate_all_checks() + print("\n\nDetailed Results:") + print(json.dumps(results, indent=2, default=str)) diff --git a/miners/power8/rustchain_power8_miner.py b/miners/power8/rustchain_power8_miner.py index 35310a8f..2dc935a6 100644 --- a/miners/power8/rustchain_power8_miner.py +++ b/miners/power8/rustchain_power8_miner.py @@ -1,409 +1,409 @@ -#!/usr/bin/env python3 -""" -RustChain POWER8 S824 Miner -With RIP-PoA Hardware Fingerprint Attestation -""" -import os, sys, json, time, hashlib, uuid, requests, socket, subprocess, platform, statistics, re, warnings -from datetime import datetime - -# Suppress SSL warnings for self-signed cert -warnings.filterwarnings('ignore', message='Unverified HTTPS request') - -# Import fingerprint checks -try: - from fingerprint_checks import validate_all_checks - FINGERPRINT_AVAILABLE = True -except ImportError: - FINGERPRINT_AVAILABLE = False - print("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled") - -NODE_URL = "https://rustchain.org" # Use HTTPS via nginx -BLOCK_TIME = 600 # 10 minutes - -WALLET_FILE = os.path.expanduser("~/rustchain/power8_wallet.txt") - -class LocalMiner: - def __init__(self, wallet=None): - self.node_url = NODE_URL - self.wallet = wallet or self._load_or_gen_wallet() - self.hw_info = {} - self.enrolled = False - self.attestation_valid_until = 0 - self.last_entropy = {} - self.fingerprint_data = {} - self.fingerprint_passed = False - - print("="*70) - print("RustChain POWER8 S824 Miner") - print("IBM Power System S824 - Dual 8-core POWER8") - print("RIP-PoA Hardware Fingerprint Enabled") - print("="*70) - print(f"Node: {self.node_url}") - print(f"Wallet: {self.wallet}") - print("="*70) - - # Run initial fingerprint check - if FINGERPRINT_AVAILABLE: - self._run_fingerprint_checks() - - def _load_or_gen_wallet(self): - """Load wallet from file or generate new one (persist on first run)""" - if os.path.exists(WALLET_FILE): - with open(WALLET_FILE, 'r') as f: - wallet = f.read().strip() - if wallet: - print(f"[WALLET] Loaded existing wallet from {WALLET_FILE}") - return wallet - # Generate new wallet - wallet = self._gen_wallet() - # Save it - os.makedirs(os.path.dirname(WALLET_FILE), exist_ok=True) - with open(WALLET_FILE, 'w') as f: - f.write(wallet) - print(f"[WALLET] Generated and saved new wallet to {WALLET_FILE}") - return wallet - - def _run_fingerprint_checks(self): - """Run 6 hardware fingerprint checks for RIP-PoA""" - print("\n[FINGERPRINT] Running 6 hardware fingerprint checks...") - try: - passed, results = validate_all_checks() - self.fingerprint_passed = passed - self.fingerprint_data = {"checks": results, "all_passed": passed} - if passed: - print("[FINGERPRINT] All checks PASSED - eligible for full rewards") - else: - failed = [k for k, v in results.items() if not v.get("passed")] - print(f"[FINGERPRINT] FAILED checks: {failed}") - print("[FINGERPRINT] WARNING: May receive reduced/zero rewards") - except Exception as e: - print(f"[FINGERPRINT] Error running checks: {e}") - self.fingerprint_passed = False - self.fingerprint_data = {"error": str(e), "all_passed": False} - - def _gen_wallet(self): - data = f"power8-s824-{uuid.uuid4().hex}-{time.time()}" - return hashlib.sha256(data.encode()).hexdigest()[:38] + "RTC" - - def _run_cmd(self, cmd): - try: - return subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True, timeout=10, shell=True).stdout.strip() - except: - return "" - - def _get_mac_addresses(self): - """Return list of real MAC addresses present on the system.""" - macs = [] - try: - output = subprocess.run( - ["ip", "-o", "link"], - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - text=True, - timeout=5, - ).stdout.splitlines() - for line in output: - m = re.search(r"link/(?:ether|loopback)\s+([0-9a-f:]{17})", line, re.IGNORECASE) - if m: - mac = m.group(1).lower() - if mac != "00:00:00:00:00:00": - macs.append(mac) - except Exception: - pass - - if not macs: - try: - output = subprocess.run( - ["ifconfig", "-a"], - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - text=True, - timeout=5, - ).stdout.splitlines() - for line in output: - m = re.search(r"(?:ether|HWaddr)\s+([0-9a-f:]{17})", line, re.IGNORECASE) - if m: - mac = m.group(1).lower() - if mac != "00:00:00:00:00:00": - macs.append(mac) - except Exception: - pass - - return macs or ["00:00:00:00:00:01"] - - def _collect_entropy(self, cycles: int = 48, inner_loop: int = 25000): - """ - Collect simple timing entropy by measuring tight CPU loops. - Returns summary statistics the node can score. - """ - samples = [] - for _ in range(cycles): - start = time.perf_counter_ns() - acc = 0 - for j in range(inner_loop): - acc ^= (j * 31) & 0xFFFFFFFF - duration = time.perf_counter_ns() - start - samples.append(duration) - - mean_ns = sum(samples) / len(samples) - variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 - - return { - "mean_ns": mean_ns, - "variance_ns": variance_ns, - "min_ns": min(samples), - "max_ns": max(samples), - "sample_count": len(samples), - "samples_preview": samples[:12], - } - - def _get_hw_info(self): - """Collect hardware info for POWER8""" - hw = { - "platform": platform.system(), - "machine": platform.machine(), - "hostname": socket.gethostname(), - "family": "PowerPC", - "arch": "power8" # Server-class POWER8 - } - - # Get CPU info for POWER8 - cpu = self._run_cmd("lscpu | grep 'Model name' | cut -d: -f2 | xargs") - if not cpu: - cpu = self._run_cmd("cat /proc/cpuinfo | grep 'cpu' | head -1 | cut -d: -f2 | xargs") - hw["cpu"] = cpu or "IBM POWER8" - - # Get cores (POWER8 has 16 cores, 128 threads with SMT8) - cores = self._run_cmd("nproc") - hw["cores"] = int(cores) if cores else 128 - - # Get memory (576GB on S824) - mem = self._run_cmd("free -g | grep Mem | awk '{print $2}'") - hw["memory_gb"] = int(mem) if mem else 576 - - # Get MACs - macs = self._get_mac_addresses() - hw["macs"] = macs - hw["mac"] = macs[0] - - self.hw_info = hw - return hw - - def attest(self): - """Hardware attestation""" - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting...") - - self._get_hw_info() - - try: - resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=10, verify=False) - if resp.status_code != 200: - print(f"[FAIL] Challenge failed: {resp.status_code}") - return False - - challenge = resp.json() - nonce = challenge.get("nonce") - print(f"[OK] Got challenge nonce") - - except Exception as e: - print(f"[FAIL] Challenge error: {e}") - return False - - # Collect entropy just before signing the report - entropy = self._collect_entropy() - self.last_entropy = entropy - - # Re-run fingerprint checks if needed - if FINGERPRINT_AVAILABLE and not self.fingerprint_data: - self._run_fingerprint_checks() - - # Submit attestation with fingerprint data - attestation = { - "miner": self.wallet, - "miner_id": f"power8-s824-{self.hw_info['hostname']}", - "nonce": nonce, - "report": { - "nonce": nonce, - "commitment": hashlib.sha256( - (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() - ).hexdigest(), - "derived": entropy, - "entropy_score": entropy.get("variance_ns", 0.0) - }, - "device": { - "device_family": self.hw_info["family"], - "device_arch": self.hw_info["arch"], - "device_model": "IBM POWER8 S824 (8286-42A)", - "family": self.hw_info["family"], - "arch": self.hw_info["arch"], - "model": "IBM POWER8 S824 (8286-42A)", - "cpu": self.hw_info["cpu"], - "cores": self.hw_info["cores"], - "memory_gb": self.hw_info["memory_gb"] - }, - "signals": { - "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), - "hostname": self.hw_info["hostname"] - }, - # RIP-PoA hardware fingerprint attestation - "fingerprint": self.fingerprint_data - } - - try: - resp = requests.post(f"{self.node_url}/attest/submit", - json=attestation, timeout=30, verify=False) - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.attestation_valid_until = time.time() + 580 - print(f"[PASS] Attestation accepted!") - print(f" CPU: {self.hw_info['cpu']}") - print(f" Arch: {self.hw_info.get('machine', 'ppc64le')}/{self.hw_info.get('arch', 'power8')}") - - if self.fingerprint_passed: - print(f" Fingerprint: PASSED") - else: - print(f" Fingerprint: FAILED") - if self.fingerprint_data: - checks = self.fingerprint_data.get("checks", {}) - failed_checks = [] - for name, check in checks.items(): - if not check.get("passed"): - reason = check.get("data", {}).get("fail_reason", "unknown") - failed_checks.append(f"{name}:{reason}") - if failed_checks: - print(f" Failed: {', '.join(failed_checks)}") - - return True - else: - print(f"[FAIL] {result.get('error', 'Unknown error')}") - else: - error_data = resp.json() if resp.headers.get('content-type') == 'application/json' else {} - print(f"[FAIL] HTTP {resp.status_code}: {error_data.get('error', resp.text[:200])}") - - except Exception as e: - print(f"[FAIL] Error: {e}") - - return False - - def enroll(self): - """Epoch enrollment""" - print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Enrolling for epoch...") - - # First attest - if not self.attest(): - return False - - try: - # Get challenge - resp = requests.post(f"{self.node_url}/epoch/enroll", json={ - "miner_id": f"power8-s824-{self.hw_info['hostname']}", - "miner_pubkey": self.wallet, # Testnet: wallet as pubkey - "signature": "0" * 128 # Testnet: mock signature - }, timeout=10, verify=False) - - if resp.status_code == 200: - result = resp.json() - if result.get("ok"): - self.enrolled = True - weight = result.get('weight', 1.0) - hw_weight = result.get('hw_weight', 1.0) - fingerprint_failed = result.get('fingerprint_failed', False) - - print(f"[OK] Enrolled!") - print(f" Epoch: {result.get('epoch')}") - print(f" Weight: {weight}x") - - if fingerprint_failed or weight < 0.001: - print("") - print("=" * 60) - print("[!] VM/CONTAINER DETECTED - MINIMAL REWARDS") - print("=" * 60) - print(" Your fingerprint check failed, indicating you are") - print(" running in a virtual machine or container.") - print("") - print(" Hardware weight would be: {:.1f}x".format(hw_weight)) - print(" Actual weight assigned: {:.9f}x".format(weight)) - print("") - print(" VMs/containers CAN mine, but earn ~1 billionth") - print(" of what real hardware earns per epoch.") - print(" Run on real hardware for meaningful rewards.") - print("=" * 60) - print("") - - return True - else: - print(f"[FAIL] {result}") - else: - error_data = resp.json() if resp.headers.get('content-type') == 'application/json' else {} - print(f"[FAIL] HTTP {resp.status_code}: {error_data.get('error', resp.text[:200])}") - - except Exception as e: - print(f"[FAIL] Error: {e}") - - return False - - def check_balance(self): - """Check balance""" - try: - resp = requests.get(f"{self.node_url}/balance/{self.wallet}", timeout=10, verify=False) - if resp.status_code == 200: - result = resp.json() - balance = result.get('balance_rtc', 0) - print(f"\n[BALANCE] {balance} RTC") - return balance - except: - pass - return 0 - - def mine(self): - """Start mining""" - print(f"\n[START] Mining...") - print(f"Block time: {BLOCK_TIME//60} minutes") - print(f"Press Ctrl+C to stop\n") - - # Save wallet - wallet_file = os.path.expanduser("~/rustchain/power8_wallet.txt") - with open(wallet_file, "w") as f: - f.write(self.wallet) - print(f"[SAVE] Wallet saved to: {wallet_file}\n") - - cycle = 0 - - try: - while True: - cycle += 1 - print(f"\n{'='*70}") - print(f"Cycle #{cycle} - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") - print(f"{'='*70}") - - if self.enroll(): - print(f"[MINING] Mining for {BLOCK_TIME//60} minutes...") - - for i in range(BLOCK_TIME // 30): - time.sleep(30) - elapsed = (i + 1) * 30 - remaining = BLOCK_TIME - elapsed - print(f" [{elapsed}s elapsed, {remaining}s remaining...]") - - self.check_balance() - - else: - print("[RETRY] Enrollment failed. Retrying in 60s...") - time.sleep(60) - - except KeyboardInterrupt: - print(f"\n\n[STOP] Mining stopped") - print(f" Wallet: {self.wallet}") - self.check_balance() - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("--wallet", help="Wallet address") - args = parser.parse_args() - - miner = LocalMiner(wallet=args.wallet) - miner.mine() +#!/usr/bin/env python3 +""" +RustChain POWER8 S824 Miner +With RIP-PoA Hardware Fingerprint Attestation +""" +import os, sys, json, time, hashlib, uuid, requests, socket, subprocess, platform, statistics, re, warnings +from datetime import datetime + +# Suppress SSL warnings for self-signed cert +warnings.filterwarnings('ignore', message='Unverified HTTPS request') + +# Import fingerprint checks +try: + from fingerprint_checks import validate_all_checks + FINGERPRINT_AVAILABLE = True +except ImportError: + FINGERPRINT_AVAILABLE = False + print("[WARN] fingerprint_checks.py not found - fingerprint attestation disabled") + +NODE_URL = "https://rustchain.org" # Use HTTPS via nginx +BLOCK_TIME = 600 # 10 minutes + +WALLET_FILE = os.path.expanduser("~/rustchain/power8_wallet.txt") + +class LocalMiner: + def __init__(self, wallet=None): + self.node_url = NODE_URL + self.wallet = wallet or self._load_or_gen_wallet() + self.hw_info = {} + self.enrolled = False + self.attestation_valid_until = 0 + self.last_entropy = {} + self.fingerprint_data = {} + self.fingerprint_passed = False + + print("="*70) + print("RustChain POWER8 S824 Miner") + print("IBM Power System S824 - Dual 8-core POWER8") + print("RIP-PoA Hardware Fingerprint Enabled") + print("="*70) + print(f"Node: {self.node_url}") + print(f"Wallet: {self.wallet}") + print("="*70) + + # Run initial fingerprint check + if FINGERPRINT_AVAILABLE: + self._run_fingerprint_checks() + + def _load_or_gen_wallet(self): + """Load wallet from file or generate new one (persist on first run)""" + if os.path.exists(WALLET_FILE): + with open(WALLET_FILE, 'r') as f: + wallet = f.read().strip() + if wallet: + print(f"[WALLET] Loaded existing wallet from {WALLET_FILE}") + return wallet + # Generate new wallet + wallet = self._gen_wallet() + # Save it + os.makedirs(os.path.dirname(WALLET_FILE), exist_ok=True) + with open(WALLET_FILE, 'w') as f: + f.write(wallet) + print(f"[WALLET] Generated and saved new wallet to {WALLET_FILE}") + return wallet + + def _run_fingerprint_checks(self): + """Run 6 hardware fingerprint checks for RIP-PoA""" + print("\n[FINGERPRINT] Running 6 hardware fingerprint checks...") + try: + passed, results = validate_all_checks() + self.fingerprint_passed = passed + self.fingerprint_data = {"checks": results, "all_passed": passed} + if passed: + print("[FINGERPRINT] All checks PASSED - eligible for full rewards") + else: + failed = [k for k, v in results.items() if not v.get("passed")] + print(f"[FINGERPRINT] FAILED checks: {failed}") + print("[FINGERPRINT] WARNING: May receive reduced/zero rewards") + except Exception as e: + print(f"[FINGERPRINT] Error running checks: {e}") + self.fingerprint_passed = False + self.fingerprint_data = {"error": str(e), "all_passed": False} + + def _gen_wallet(self): + data = f"power8-s824-{uuid.uuid4().hex}-{time.time()}" + return hashlib.sha256(data.encode()).hexdigest()[:38] + "RTC" + + def _run_cmd(self, cmd): + try: + return subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + text=True, timeout=10, shell=True).stdout.strip() + except: + return "" + + def _get_mac_addresses(self): + """Return list of real MAC addresses present on the system.""" + macs = [] + try: + output = subprocess.run( + ["ip", "-o", "link"], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + text=True, + timeout=5, + ).stdout.splitlines() + for line in output: + m = re.search(r"link/(?:ether|loopback)\s+([0-9a-f:]{17})", line, re.IGNORECASE) + if m: + mac = m.group(1).lower() + if mac != "00:00:00:00:00:00": + macs.append(mac) + except Exception: + pass + + if not macs: + try: + output = subprocess.run( + ["ifconfig", "-a"], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + text=True, + timeout=5, + ).stdout.splitlines() + for line in output: + m = re.search(r"(?:ether|HWaddr)\s+([0-9a-f:]{17})", line, re.IGNORECASE) + if m: + mac = m.group(1).lower() + if mac != "00:00:00:00:00:00": + macs.append(mac) + except Exception: + pass + + return macs or ["00:00:00:00:00:01"] + + def _collect_entropy(self, cycles: int = 48, inner_loop: int = 25000): + """ + Collect simple timing entropy by measuring tight CPU loops. + Returns summary statistics the node can score. + """ + samples = [] + for _ in range(cycles): + start = time.perf_counter_ns() + acc = 0 + for j in range(inner_loop): + acc ^= (j * 31) & 0xFFFFFFFF + duration = time.perf_counter_ns() - start + samples.append(duration) + + mean_ns = sum(samples) / len(samples) + variance_ns = statistics.pvariance(samples) if len(samples) > 1 else 0.0 + + return { + "mean_ns": mean_ns, + "variance_ns": variance_ns, + "min_ns": min(samples), + "max_ns": max(samples), + "sample_count": len(samples), + "samples_preview": samples[:12], + } + + def _get_hw_info(self): + """Collect hardware info for POWER8""" + hw = { + "platform": platform.system(), + "machine": platform.machine(), + "hostname": socket.gethostname(), + "family": "PowerPC", + "arch": "power8" # Server-class POWER8 + } + + # Get CPU info for POWER8 + cpu = self._run_cmd("lscpu | grep 'Model name' | cut -d: -f2 | xargs") + if not cpu: + cpu = self._run_cmd("cat /proc/cpuinfo | grep 'cpu' | head -1 | cut -d: -f2 | xargs") + hw["cpu"] = cpu or "IBM POWER8" + + # Get cores (POWER8 has 16 cores, 128 threads with SMT8) + cores = self._run_cmd("nproc") + hw["cores"] = int(cores) if cores else 128 + + # Get memory (576GB on S824) + mem = self._run_cmd("free -g | grep Mem | awk '{print $2}'") + hw["memory_gb"] = int(mem) if mem else 576 + + # Get MACs + macs = self._get_mac_addresses() + hw["macs"] = macs + hw["mac"] = macs[0] + + self.hw_info = hw + return hw + + def attest(self): + """Hardware attestation""" + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Attesting...") + + self._get_hw_info() + + try: + resp = requests.post(f"{self.node_url}/attest/challenge", json={}, timeout=10, verify=False) + if resp.status_code != 200: + print(f"[FAIL] Challenge failed: {resp.status_code}") + return False + + challenge = resp.json() + nonce = challenge.get("nonce") + print(f"[OK] Got challenge nonce") + + except Exception as e: + print(f"[FAIL] Challenge error: {e}") + return False + + # Collect entropy just before signing the report + entropy = self._collect_entropy() + self.last_entropy = entropy + + # Re-run fingerprint checks if needed + if FINGERPRINT_AVAILABLE and not self.fingerprint_data: + self._run_fingerprint_checks() + + # Submit attestation with fingerprint data + attestation = { + "miner": self.wallet, + "miner_id": f"power8-s824-{self.hw_info['hostname']}", + "nonce": nonce, + "report": { + "nonce": nonce, + "commitment": hashlib.sha256( + (nonce + self.wallet + json.dumps(entropy, sort_keys=True)).encode() + ).hexdigest(), + "derived": entropy, + "entropy_score": entropy.get("variance_ns", 0.0) + }, + "device": { + "device_family": self.hw_info["family"], + "device_arch": self.hw_info["arch"], + "device_model": "IBM POWER8 S824 (8286-42A)", + "family": self.hw_info["family"], + "arch": self.hw_info["arch"], + "model": "IBM POWER8 S824 (8286-42A)", + "cpu": self.hw_info["cpu"], + "cores": self.hw_info["cores"], + "memory_gb": self.hw_info["memory_gb"] + }, + "signals": { + "macs": self.hw_info.get("macs", [self.hw_info["mac"]]), + "hostname": self.hw_info["hostname"] + }, + # RIP-PoA hardware fingerprint attestation + "fingerprint": self.fingerprint_data + } + + try: + resp = requests.post(f"{self.node_url}/attest/submit", + json=attestation, timeout=30, verify=False) + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.attestation_valid_until = time.time() + 580 + print(f"[PASS] Attestation accepted!") + print(f" CPU: {self.hw_info['cpu']}") + print(f" Arch: {self.hw_info.get('machine', 'ppc64le')}/{self.hw_info.get('arch', 'power8')}") + + if self.fingerprint_passed: + print(f" Fingerprint: PASSED") + else: + print(f" Fingerprint: FAILED") + if self.fingerprint_data: + checks = self.fingerprint_data.get("checks", {}) + failed_checks = [] + for name, check in checks.items(): + if not check.get("passed"): + reason = check.get("data", {}).get("fail_reason", "unknown") + failed_checks.append(f"{name}:{reason}") + if failed_checks: + print(f" Failed: {', '.join(failed_checks)}") + + return True + else: + print(f"[FAIL] {result.get('error', 'Unknown error')}") + else: + error_data = resp.json() if resp.headers.get('content-type') == 'application/json' else {} + print(f"[FAIL] HTTP {resp.status_code}: {error_data.get('error', resp.text[:200])}") + + except Exception as e: + print(f"[FAIL] Error: {e}") + + return False + + def enroll(self): + """Epoch enrollment""" + print(f"\n[{datetime.now().strftime('%H:%M:%S')}] Enrolling for epoch...") + + # First attest + if not self.attest(): + return False + + try: + # Get challenge + resp = requests.post(f"{self.node_url}/epoch/enroll", json={ + "miner_id": f"power8-s824-{self.hw_info['hostname']}", + "miner_pubkey": self.wallet, # Testnet: wallet as pubkey + "signature": "0" * 128 # Testnet: mock signature + }, timeout=10, verify=False) + + if resp.status_code == 200: + result = resp.json() + if result.get("ok"): + self.enrolled = True + weight = result.get('weight', 1.0) + hw_weight = result.get('hw_weight', 1.0) + fingerprint_failed = result.get('fingerprint_failed', False) + + print(f"[OK] Enrolled!") + print(f" Epoch: {result.get('epoch')}") + print(f" Weight: {weight}x") + + if fingerprint_failed or weight < 0.001: + print("") + print("=" * 60) + print("[!] VM/CONTAINER DETECTED - MINIMAL REWARDS") + print("=" * 60) + print(" Your fingerprint check failed, indicating you are") + print(" running in a virtual machine or container.") + print("") + print(" Hardware weight would be: {:.1f}x".format(hw_weight)) + print(" Actual weight assigned: {:.9f}x".format(weight)) + print("") + print(" VMs/containers CAN mine, but earn ~1 billionth") + print(" of what real hardware earns per epoch.") + print(" Run on real hardware for meaningful rewards.") + print("=" * 60) + print("") + + return True + else: + print(f"[FAIL] {result}") + else: + error_data = resp.json() if resp.headers.get('content-type') == 'application/json' else {} + print(f"[FAIL] HTTP {resp.status_code}: {error_data.get('error', resp.text[:200])}") + + except Exception as e: + print(f"[FAIL] Error: {e}") + + return False + + def check_balance(self): + """Check balance""" + try: + resp = requests.get(f"{self.node_url}/balance/{self.wallet}", timeout=10, verify=False) + if resp.status_code == 200: + result = resp.json() + balance = result.get('balance_rtc', 0) + print(f"\n[BALANCE] {balance} RTC") + return balance + except: + pass + return 0 + + def mine(self): + """Start mining""" + print(f"\n[START] Mining...") + print(f"Block time: {BLOCK_TIME//60} minutes") + print(f"Press Ctrl+C to stop\n") + + # Save wallet + wallet_file = os.path.expanduser("~/rustchain/power8_wallet.txt") + with open(wallet_file, "w") as f: + f.write(self.wallet) + print(f"[SAVE] Wallet saved to: {wallet_file}\n") + + cycle = 0 + + try: + while True: + cycle += 1 + print(f"\n{'='*70}") + print(f"Cycle #{cycle} - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") + print(f"{'='*70}") + + if self.enroll(): + print(f"[MINING] Mining for {BLOCK_TIME//60} minutes...") + + for i in range(BLOCK_TIME // 30): + time.sleep(30) + elapsed = (i + 1) * 30 + remaining = BLOCK_TIME - elapsed + print(f" [{elapsed}s elapsed, {remaining}s remaining...]") + + self.check_balance() + + else: + print("[RETRY] Enrollment failed. Retrying in 60s...") + time.sleep(60) + + except KeyboardInterrupt: + print(f"\n\n[STOP] Mining stopped") + print(f" Wallet: {self.wallet}") + self.check_balance() + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--wallet", help="Wallet address") + args = parser.parse_args() + + miner = LocalMiner(wallet=args.wallet) + miner.mine() diff --git a/node/beacon_x402.py b/node/beacon_x402.py index 8b681109..c63d910f 100644 --- a/node/beacon_x402.py +++ b/node/beacon_x402.py @@ -1,364 +1,364 @@ -""" -Beacon Atlas x402 Integration Module -Adds Coinbase wallet support for beacon agents and x402 payments on contracts. - -Usage in beacon_chat.py: - import beacon_x402 - beacon_x402.init_app(app, get_db) -""" - -import json -import logging -import os -import sqlite3 -import time - -from flask import g, jsonify, request -from functools import wraps - -log = logging.getLogger("beacon.x402") - -# --- Optional imports (graceful degradation) --- -try: - import sys - sys.path.insert(0, "/root/shared") - from x402_config import ( - BEACON_TREASURY, FACILITATOR_URL, X402_NETWORK, USDC_BASE, - PRICE_BEACON_CONTRACT, PRICE_RELAY_REGISTER, PRICE_REPUTATION_EXPORT, - is_free, has_cdp_credentials, create_agentkit_wallet, SWAP_INFO, - ) - X402_CONFIG_OK = True -except ImportError: - log.warning("x402_config not found — x402 features disabled") - X402_CONFIG_OK = False - - -# --------------------------------------------------------------------------- -# Database setup -# --------------------------------------------------------------------------- - -X402_BEACON_SCHEMA = """ -CREATE TABLE IF NOT EXISTS x402_beacon_payments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - payer_address TEXT NOT NULL, - payer_agent_id TEXT, - action TEXT NOT NULL, - amount_usdc TEXT NOT NULL, - tx_hash TEXT, - contract_id TEXT, - created_at REAL NOT NULL -); - -CREATE TABLE IF NOT EXISTS beacon_wallets ( - agent_id TEXT PRIMARY KEY, - coinbase_address TEXT, - created_at REAL NOT NULL -); -""" - -RELAY_MIGRATION_SQL = [ - "ALTER TABLE relay_agents ADD COLUMN coinbase_address TEXT DEFAULT NULL", -] - - -def _run_migrations(db_path): - """Run x402 migrations on the beacon database.""" - conn = sqlite3.connect(db_path) - conn.row_factory = sqlite3.Row - conn.executescript(X402_BEACON_SCHEMA) - - # Add coinbase_address to relay_agents if missing - cursor = conn.execute("PRAGMA table_info(relay_agents)") - existing_cols = {row[1] if isinstance(row, tuple) else row["name"] - for row in cursor.fetchall()} - - for sql in RELAY_MIGRATION_SQL: - col_name = sql.split("ADD COLUMN ")[1].split()[0] - if col_name not in existing_cols: - try: - conn.execute(sql) - log.info(f"Migration: added column {col_name} to relay_agents") - except sqlite3.OperationalError: - pass - conn.commit() - conn.close() - - -# --------------------------------------------------------------------------- -# CORS helper (match beacon_chat.py pattern) -# --------------------------------------------------------------------------- - -def _cors_json(data, status=200): - """Return JSON response with CORS headers (matching beacon_chat.py pattern).""" - resp = jsonify(data) if not isinstance(data, str) else data - if hasattr(resp, 'headers'): - resp.headers["Access-Control-Allow-Origin"] = "*" - resp.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization, X-PAYMENT" - resp.headers["Access-Control-Allow-Methods"] = "GET, POST, PATCH, OPTIONS" - return resp, status - - -# --------------------------------------------------------------------------- -# x402 payment check -# --------------------------------------------------------------------------- - -def _check_x402_payment(price_str, action_name): - """ - Check for x402 payment. Returns (passed, response_or_none). - When price is "0", always passes. - """ - if not X402_CONFIG_OK or is_free(price_str): - return True, None - - payment_header = request.headers.get("X-PAYMENT", "") - if not payment_header: - return False, _cors_json({ - "error": "Payment Required", - "x402": { - "version": "1", - "network": X402_NETWORK, - "facilitator": FACILITATOR_URL, - "payTo": BEACON_TREASURY, - "maxAmountRequired": price_str, - "asset": USDC_BASE, - "resource": request.url, - "description": f"Beacon Atlas: {action_name}", - } - }, 402) - - # Log payment - try: - db = g.get("db") - if db: - db.execute( - "INSERT INTO x402_beacon_payments (payer_address, action, amount_usdc, created_at) " - "VALUES (?, ?, ?, ?)", - ("unknown", action_name, price_str, time.time()), - ) - db.commit() - except Exception as e: - log.debug(f"Payment logging failed: {e}") - - return True, None - - -# --------------------------------------------------------------------------- -# Route registration -# --------------------------------------------------------------------------- - -def init_app(app, get_db_func): - """Register x402 routes on the Beacon Atlas Flask app.""" - - # Determine DB path from the app's existing config - db_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "beacon_atlas.db" - ) - - # Run migrations - try: - _run_migrations(db_path) - log.info("Beacon x402 migrations complete") - except Exception as e: - log.error(f"Beacon x402 migration failed: {e}") - - # --------------------------------------------------------------- - # Wallet Management — Native Agents - # --------------------------------------------------------------- - - @app.route("/api/agents//wallet", methods=["POST", "OPTIONS"]) - def set_agent_wallet(agent_id): - """Set Coinbase wallet for a native beacon agent (admin only).""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - # Simple admin check — require admin key in header - admin_key = request.headers.get("X-Admin-Key", "") - expected = os.environ.get("BEACON_ADMIN_KEY", "beacon_admin_2025") - if admin_key != expected: - return _cors_json({"error": "Unauthorized — admin key required"}, 401) - - data = request.get_json(silent=True) or {} - address = data.get("coinbase_address", "").strip() - if not address or not address.startswith("0x") or len(address) != 42: - return _cors_json({"error": "Invalid Base address"}, 400) - - db = get_db_func() - db.execute( - """INSERT INTO beacon_wallets (agent_id, coinbase_address, created_at) - VALUES (?, ?, ?) - ON CONFLICT(agent_id) DO UPDATE SET coinbase_address = excluded.coinbase_address""", - (agent_id, address, time.time()), - ) - db.commit() - - return _cors_json({ - "ok": True, - "agent_id": agent_id, - "coinbase_address": address, - "network": "Base (eip155:8453)", - }) - - @app.route("/api/agents//wallet", methods=["GET", "OPTIONS"]) - def get_agent_wallet(agent_id): - """Get a beacon agent's Coinbase wallet info.""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - db = get_db_func() - - # Check beacon_wallets table (native agents) - row = db.execute( - "SELECT coinbase_address FROM beacon_wallets WHERE agent_id = ?", - (agent_id,), - ).fetchone() - - if row and row["coinbase_address"]: - return _cors_json({ - "agent_id": agent_id, - "coinbase_address": row["coinbase_address"], - "source": "native", - "network": "Base (eip155:8453)", - "swap_info": SWAP_INFO if X402_CONFIG_OK else None, - }) - - # Check relay_agents table - try: - relay = db.execute( - "SELECT coinbase_address FROM relay_agents WHERE agent_id = ?", - (agent_id,), - ).fetchone() - if relay and relay.get("coinbase_address"): - return _cors_json({ - "agent_id": agent_id, - "coinbase_address": relay["coinbase_address"], - "source": "relay", - "network": "Base (eip155:8453)", - "swap_info": SWAP_INFO if X402_CONFIG_OK else None, - }) - except (sqlite3.OperationalError, KeyError): - pass # Column may not exist yet - - return _cors_json({ - "agent_id": agent_id, - "coinbase_address": None, - "hint": "POST /api/agents//wallet with admin key to set wallet", - }) - - # --------------------------------------------------------------- - # Premium Endpoints (x402 paywalled) - # --------------------------------------------------------------- - - @app.route("/api/premium/reputation", methods=["GET", "OPTIONS"]) - def premium_reputation(): - """Full reputation export for all agents.""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - passed, err_resp = _check_x402_payment( - PRICE_REPUTATION_EXPORT if X402_CONFIG_OK else "0", - "reputation_export", - ) - if not passed: - return err_resp - - db = get_db_func() - try: - rows = db.execute( - "SELECT * FROM reputation ORDER BY score DESC" - ).fetchall() - reputation = [dict(r) for r in rows] - except sqlite3.OperationalError: - reputation = [] - - return _cors_json({ - "total": len(reputation), - "reputation": reputation, - "exported_at": time.time(), - }) - - @app.route("/api/premium/contracts/export", methods=["GET", "OPTIONS"]) - def premium_contracts_export(): - """Full contracts export with payment status.""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - passed, err_resp = _check_x402_payment( - PRICE_BEACON_CONTRACT if X402_CONFIG_OK else "0", - "contracts_export", - ) - if not passed: - return err_resp - - db = get_db_func() - rows = db.execute( - "SELECT * FROM contracts ORDER BY created_at DESC" - ).fetchall() - - contracts = [] - for r in rows: - d = dict(r) - # Check if contract has wallet info - for field in ("from_agent", "to_agent"): - agent_id = d.get(field, "") - wallet_row = db.execute( - "SELECT coinbase_address FROM beacon_wallets WHERE agent_id = ?", - (agent_id,), - ).fetchone() - d[f"{field}_wallet"] = wallet_row["coinbase_address"] if wallet_row else None - contracts.append(d) - - return _cors_json({ - "total": len(contracts), - "contracts": contracts, - "exported_at": time.time(), - }) - - # --------------------------------------------------------------- - # x402 Payment History - # --------------------------------------------------------------- - - @app.route("/api/x402/payments", methods=["GET", "OPTIONS"]) - def x402_beacon_payments(): - """View x402 payment history for beacon.""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - db = get_db_func() - try: - rows = db.execute( - "SELECT * FROM x402_beacon_payments ORDER BY created_at DESC LIMIT 50" - ).fetchall() - except sqlite3.OperationalError: - rows = [] - - return _cors_json({ - "payments": [dict(r) for r in rows], - "total": len(rows), - }) - - # --------------------------------------------------------------- - # x402 Status - # --------------------------------------------------------------- - - @app.route("/api/x402/status", methods=["GET", "OPTIONS"]) - def x402_beacon_status(): - """Public endpoint showing x402 integration status for Beacon Atlas.""" - if request.method == "OPTIONS": - return _cors_json({"ok": True}) - - return _cors_json({ - "x402_enabled": X402_CONFIG_OK, - "cdp_configured": has_cdp_credentials() if X402_CONFIG_OK else False, - "network": "Base (eip155:8453)", - "facilitator": FACILITATOR_URL if X402_CONFIG_OK else None, - "pricing_mode": "free" if not X402_CONFIG_OK or is_free( - PRICE_BEACON_CONTRACT if X402_CONFIG_OK else "0" - ) else "paid", - "swap_info": SWAP_INFO if X402_CONFIG_OK else None, - "premium_endpoints": [ - "/api/premium/reputation", - "/api/premium/contracts/export", - ], - }) - - log.info("Beacon Atlas x402 module initialized") +""" +Beacon Atlas x402 Integration Module +Adds Coinbase wallet support for beacon agents and x402 payments on contracts. + +Usage in beacon_chat.py: + import beacon_x402 + beacon_x402.init_app(app, get_db) +""" + +import json +import logging +import os +import sqlite3 +import time + +from flask import g, jsonify, request +from functools import wraps + +log = logging.getLogger("beacon.x402") + +# --- Optional imports (graceful degradation) --- +try: + import sys + sys.path.insert(0, "/root/shared") + from x402_config import ( + BEACON_TREASURY, FACILITATOR_URL, X402_NETWORK, USDC_BASE, + PRICE_BEACON_CONTRACT, PRICE_RELAY_REGISTER, PRICE_REPUTATION_EXPORT, + is_free, has_cdp_credentials, create_agentkit_wallet, SWAP_INFO, + ) + X402_CONFIG_OK = True +except ImportError: + log.warning("x402_config not found — x402 features disabled") + X402_CONFIG_OK = False + + +# --------------------------------------------------------------------------- +# Database setup +# --------------------------------------------------------------------------- + +X402_BEACON_SCHEMA = """ +CREATE TABLE IF NOT EXISTS x402_beacon_payments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + payer_address TEXT NOT NULL, + payer_agent_id TEXT, + action TEXT NOT NULL, + amount_usdc TEXT NOT NULL, + tx_hash TEXT, + contract_id TEXT, + created_at REAL NOT NULL +); + +CREATE TABLE IF NOT EXISTS beacon_wallets ( + agent_id TEXT PRIMARY KEY, + coinbase_address TEXT, + created_at REAL NOT NULL +); +""" + +RELAY_MIGRATION_SQL = [ + "ALTER TABLE relay_agents ADD COLUMN coinbase_address TEXT DEFAULT NULL", +] + + +def _run_migrations(db_path): + """Run x402 migrations on the beacon database.""" + conn = sqlite3.connect(db_path) + conn.row_factory = sqlite3.Row + conn.executescript(X402_BEACON_SCHEMA) + + # Add coinbase_address to relay_agents if missing + cursor = conn.execute("PRAGMA table_info(relay_agents)") + existing_cols = {row[1] if isinstance(row, tuple) else row["name"] + for row in cursor.fetchall()} + + for sql in RELAY_MIGRATION_SQL: + col_name = sql.split("ADD COLUMN ")[1].split()[0] + if col_name not in existing_cols: + try: + conn.execute(sql) + log.info(f"Migration: added column {col_name} to relay_agents") + except sqlite3.OperationalError: + pass + conn.commit() + conn.close() + + +# --------------------------------------------------------------------------- +# CORS helper (match beacon_chat.py pattern) +# --------------------------------------------------------------------------- + +def _cors_json(data, status=200): + """Return JSON response with CORS headers (matching beacon_chat.py pattern).""" + resp = jsonify(data) if not isinstance(data, str) else data + if hasattr(resp, 'headers'): + resp.headers["Access-Control-Allow-Origin"] = "*" + resp.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization, X-PAYMENT" + resp.headers["Access-Control-Allow-Methods"] = "GET, POST, PATCH, OPTIONS" + return resp, status + + +# --------------------------------------------------------------------------- +# x402 payment check +# --------------------------------------------------------------------------- + +def _check_x402_payment(price_str, action_name): + """ + Check for x402 payment. Returns (passed, response_or_none). + When price is "0", always passes. + """ + if not X402_CONFIG_OK or is_free(price_str): + return True, None + + payment_header = request.headers.get("X-PAYMENT", "") + if not payment_header: + return False, _cors_json({ + "error": "Payment Required", + "x402": { + "version": "1", + "network": X402_NETWORK, + "facilitator": FACILITATOR_URL, + "payTo": BEACON_TREASURY, + "maxAmountRequired": price_str, + "asset": USDC_BASE, + "resource": request.url, + "description": f"Beacon Atlas: {action_name}", + } + }, 402) + + # Log payment + try: + db = g.get("db") + if db: + db.execute( + "INSERT INTO x402_beacon_payments (payer_address, action, amount_usdc, created_at) " + "VALUES (?, ?, ?, ?)", + ("unknown", action_name, price_str, time.time()), + ) + db.commit() + except Exception as e: + log.debug(f"Payment logging failed: {e}") + + return True, None + + +# --------------------------------------------------------------------------- +# Route registration +# --------------------------------------------------------------------------- + +def init_app(app, get_db_func): + """Register x402 routes on the Beacon Atlas Flask app.""" + + # Determine DB path from the app's existing config + db_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "beacon_atlas.db" + ) + + # Run migrations + try: + _run_migrations(db_path) + log.info("Beacon x402 migrations complete") + except Exception as e: + log.error(f"Beacon x402 migration failed: {e}") + + # --------------------------------------------------------------- + # Wallet Management — Native Agents + # --------------------------------------------------------------- + + @app.route("/api/agents//wallet", methods=["POST", "OPTIONS"]) + def set_agent_wallet(agent_id): + """Set Coinbase wallet for a native beacon agent (admin only).""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + # Simple admin check — require admin key in header + admin_key = request.headers.get("X-Admin-Key", "") + expected = os.environ.get("BEACON_ADMIN_KEY", "beacon_admin_2025") + if admin_key != expected: + return _cors_json({"error": "Unauthorized — admin key required"}, 401) + + data = request.get_json(silent=True) or {} + address = data.get("coinbase_address", "").strip() + if not address or not address.startswith("0x") or len(address) != 42: + return _cors_json({"error": "Invalid Base address"}, 400) + + db = get_db_func() + db.execute( + """INSERT INTO beacon_wallets (agent_id, coinbase_address, created_at) + VALUES (?, ?, ?) + ON CONFLICT(agent_id) DO UPDATE SET coinbase_address = excluded.coinbase_address""", + (agent_id, address, time.time()), + ) + db.commit() + + return _cors_json({ + "ok": True, + "agent_id": agent_id, + "coinbase_address": address, + "network": "Base (eip155:8453)", + }) + + @app.route("/api/agents//wallet", methods=["GET", "OPTIONS"]) + def get_agent_wallet(agent_id): + """Get a beacon agent's Coinbase wallet info.""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + db = get_db_func() + + # Check beacon_wallets table (native agents) + row = db.execute( + "SELECT coinbase_address FROM beacon_wallets WHERE agent_id = ?", + (agent_id,), + ).fetchone() + + if row and row["coinbase_address"]: + return _cors_json({ + "agent_id": agent_id, + "coinbase_address": row["coinbase_address"], + "source": "native", + "network": "Base (eip155:8453)", + "swap_info": SWAP_INFO if X402_CONFIG_OK else None, + }) + + # Check relay_agents table + try: + relay = db.execute( + "SELECT coinbase_address FROM relay_agents WHERE agent_id = ?", + (agent_id,), + ).fetchone() + if relay and relay.get("coinbase_address"): + return _cors_json({ + "agent_id": agent_id, + "coinbase_address": relay["coinbase_address"], + "source": "relay", + "network": "Base (eip155:8453)", + "swap_info": SWAP_INFO if X402_CONFIG_OK else None, + }) + except (sqlite3.OperationalError, KeyError): + pass # Column may not exist yet + + return _cors_json({ + "agent_id": agent_id, + "coinbase_address": None, + "hint": "POST /api/agents//wallet with admin key to set wallet", + }) + + # --------------------------------------------------------------- + # Premium Endpoints (x402 paywalled) + # --------------------------------------------------------------- + + @app.route("/api/premium/reputation", methods=["GET", "OPTIONS"]) + def premium_reputation(): + """Full reputation export for all agents.""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + passed, err_resp = _check_x402_payment( + PRICE_REPUTATION_EXPORT if X402_CONFIG_OK else "0", + "reputation_export", + ) + if not passed: + return err_resp + + db = get_db_func() + try: + rows = db.execute( + "SELECT * FROM reputation ORDER BY score DESC" + ).fetchall() + reputation = [dict(r) for r in rows] + except sqlite3.OperationalError: + reputation = [] + + return _cors_json({ + "total": len(reputation), + "reputation": reputation, + "exported_at": time.time(), + }) + + @app.route("/api/premium/contracts/export", methods=["GET", "OPTIONS"]) + def premium_contracts_export(): + """Full contracts export with payment status.""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + passed, err_resp = _check_x402_payment( + PRICE_BEACON_CONTRACT if X402_CONFIG_OK else "0", + "contracts_export", + ) + if not passed: + return err_resp + + db = get_db_func() + rows = db.execute( + "SELECT * FROM contracts ORDER BY created_at DESC" + ).fetchall() + + contracts = [] + for r in rows: + d = dict(r) + # Check if contract has wallet info + for field in ("from_agent", "to_agent"): + agent_id = d.get(field, "") + wallet_row = db.execute( + "SELECT coinbase_address FROM beacon_wallets WHERE agent_id = ?", + (agent_id,), + ).fetchone() + d[f"{field}_wallet"] = wallet_row["coinbase_address"] if wallet_row else None + contracts.append(d) + + return _cors_json({ + "total": len(contracts), + "contracts": contracts, + "exported_at": time.time(), + }) + + # --------------------------------------------------------------- + # x402 Payment History + # --------------------------------------------------------------- + + @app.route("/api/x402/payments", methods=["GET", "OPTIONS"]) + def x402_beacon_payments(): + """View x402 payment history for beacon.""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + db = get_db_func() + try: + rows = db.execute( + "SELECT * FROM x402_beacon_payments ORDER BY created_at DESC LIMIT 50" + ).fetchall() + except sqlite3.OperationalError: + rows = [] + + return _cors_json({ + "payments": [dict(r) for r in rows], + "total": len(rows), + }) + + # --------------------------------------------------------------- + # x402 Status + # --------------------------------------------------------------- + + @app.route("/api/x402/status", methods=["GET", "OPTIONS"]) + def x402_beacon_status(): + """Public endpoint showing x402 integration status for Beacon Atlas.""" + if request.method == "OPTIONS": + return _cors_json({"ok": True}) + + return _cors_json({ + "x402_enabled": X402_CONFIG_OK, + "cdp_configured": has_cdp_credentials() if X402_CONFIG_OK else False, + "network": "Base (eip155:8453)", + "facilitator": FACILITATOR_URL if X402_CONFIG_OK else None, + "pricing_mode": "free" if not X402_CONFIG_OK or is_free( + PRICE_BEACON_CONTRACT if X402_CONFIG_OK else "0" + ) else "paid", + "swap_info": SWAP_INFO if X402_CONFIG_OK else None, + "premium_endpoints": [ + "/api/premium/reputation", + "/api/premium/contracts/export", + ], + }) + + log.info("Beacon Atlas x402 module initialized") diff --git a/node/rip_200_round_robin_1cpu1vote_v2.py b/node/rip_200_round_robin_1cpu1vote_v2.py index 7ba96702..e562abe4 100644 --- a/node/rip_200_round_robin_1cpu1vote_v2.py +++ b/node/rip_200_round_robin_1cpu1vote_v2.py @@ -1,426 +1,426 @@ -#!/usr/bin/env python3 -""" -RIP-200 v2: Round-Robin Consensus (1 CPU = 1 Vote) -================================================== - -Updated Antiquity Multiplier System: -- PowerPC: High multipliers (2.0-2.5x) - true vintage -- Intel Mac (2006-2019): Sliding scale based on age (1.0-1.5x) -- Server x86 (5+ years): Medium multiplier (0.5-1.0x) -- Modern x86 (<5 years): Starts at 0.1x, earns 15%/year loyalty bonus -- Apple Silicon: 1.2x (modern but premium hardware) -""" - -import sqlite3 -import time -from typing import List, Tuple, Dict -from datetime import datetime - -# Genesis timestamp -GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC -BLOCK_TIME = 600 # 10 minutes -ATTESTATION_TTL = 600 # 10 minutes -CURRENT_YEAR = 2025 - -# ============================================================================= -# ANTIQUITY MULTIPLIER SYSTEM v2 -# ============================================================================= - -# Base multipliers by architecture class -BASE_MULTIPLIERS = { - # PowerPC - True Vintage (pre-2006) - "g4": 2.5, # PowerPC G4 (2001-2005) - Most valuable - "g5": 2.0, # PowerPC G5 (2003-2006) - High value - - # Apple Silicon - Modern Premium - "apple_silicon": 1.2, # M1/M2/M3 (2020+) - Premium but modern - "m1": 1.2, - "m2": 1.2, - "m3": 1.2, - - # Placeholders - calculated dynamically - "intel_mac": None, # Calculated based on model year - "server_x86": None, # Calculated based on age - "modern_x86": 0.1, # Base rate, can earn loyalty bonus -} - -# Intel Mac model years (for sliding scale) -INTEL_MAC_MODELS = { - "MacPro1,1": 2006, - "MacPro2,1": 2007, - "MacPro3,1": 2008, - "MacPro4,1": 2009, - "MacPro5,1": 2010, - "MacPro6,1": 2013, # Trash can Mac Pro - "MacPro7,1": 2019, # Cheese grater Mac Pro - "iMacPro1,1": 2017, - "Macmini6,1": 2012, - "Macmini6,2": 2012, - "Macmini7,1": 2014, - "MacBookPro11,1": 2013, - "MacBookPro11,2": 2013, - "MacBookPro11,3": 2013, - "MacBookPro12,1": 2015, - "MacBookPro13,1": 2016, - "MacBookPro14,1": 2017, - "MacBookPro15,1": 2018, - "MacBookPro16,1": 2019, -} - -# Time decay parameters -DECAY_RATE_PER_YEAR = 0.15 # 15% decay per year for vintage bonus -LOYALTY_RATE_PER_YEAR = 0.15 # 15% bonus per year for modern x86 uptime - - -def get_intel_mac_multiplier(model_identifier: str, manufacture_year: int = None) -> float: - """ - Calculate multiplier for Intel Macs based on age - - Sliding scale: - - 15+ years old: 1.5x (2006-2010 Mac Pros) - - 12-14 years old: 1.3x (2011-2013 Mac Pros) - - 8-11 years old: 1.1x (2014-2017) - - 5-7 years old: 1.0x (2018-2020) - - <5 years old: 0.8x (2021+, unlikely for Intel) - """ - # Try to get year from model identifier - if manufacture_year is None: - manufacture_year = INTEL_MAC_MODELS.get(model_identifier, CURRENT_YEAR - 5) - - age = CURRENT_YEAR - manufacture_year - - if age >= 15: - return 1.5 # True vintage Intel (2006-2010) - elif age >= 12: - return 1.3 # Classic Intel (2011-2013) - elif age >= 8: - return 1.1 # Aging Intel (2014-2017) - elif age >= 5: - return 1.0 # Recent Intel (2018-2020) - else: - return 0.8 # Very recent Intel - - -def get_server_x86_multiplier(manufacture_year: int) -> float: - """ - Calculate multiplier for server/workstation x86 based on age - - Sliding scale: - - 10+ years old: 1.0x (pre-2015) - - 8-9 years old: 0.7x (2016-2017) - - 6-7 years old: 0.5x (2018-2019) - - 5 years old: 0.3x (2020) - - <5 years old: 0.1x (2021+) - modern baseline - """ - age = CURRENT_YEAR - manufacture_year - - if age >= 10: - return 1.0 # Vintage server - elif age >= 8: - return 0.7 # Aging server (like 2017 PowerEdge) - elif age >= 6: - return 0.5 # Middle-aged server - elif age >= 5: - return 0.3 # Recent server - else: - return 0.1 # Modern server - - -def get_loyalty_bonus(miner_id: str, db_path: str, base_multiplier: float) -> float: - """ - Calculate loyalty bonus for modern x86 miners - - Modern x86 (<5 years) starts at 0.1x but earns 15% per year - for consistent uptime (measured by attestation history) - - Max bonus caps at 1.0x total (10 years of perfect uptime) - """ - if base_multiplier > 0.1: - return 0.0 # Only modern x86 gets loyalty bonus - - try: - with sqlite3.connect(db_path) as conn: - cursor = conn.cursor() - - # Get first attestation timestamp for this miner - cursor.execute(""" - SELECT MIN(ts_ok) FROM miner_attest_history - WHERE miner = ? - """, (miner_id,)) - - result = cursor.fetchone() - if not result or not result[0]: - return 0.0 - - first_attest = result[0] - - # Calculate years of uptime - now = int(time.time()) - years_online = (now - first_attest) / (365.25 * 24 * 3600) - - # 15% bonus per year, capped at 0.9 additional (total max 1.0) - loyalty_bonus = min(years_online * LOYALTY_RATE_PER_YEAR, 0.9) - - return loyalty_bonus - - except Exception: - return 0.0 - - -def get_device_multiplier(device_info: Dict, db_path: str = None, miner_id: str = None) -> float: - """ - Master function to calculate multiplier for any device - - device_info should contain: - - arch: Architecture key (g4, g5, apple_silicon, intel_mac, server_x86, modern_x86) - - model: Model identifier (optional, for Intel Macs) - - year: Manufacture year (optional) - - family: Family name (optional, for display) - """ - arch = device_info.get("arch", "modern_x86").lower() - model = device_info.get("model", "") - year = device_info.get("year", CURRENT_YEAR) - - # PowerPC - Fixed high multipliers - if arch in ["g4", "ppc_g4", "powerpc_g4"]: - return 2.5 - elif arch in ["g5", "ppc_g5", "powerpc_g5"]: - return 2.0 - - # Apple Silicon - Fixed premium multiplier - elif arch in ["apple_silicon", "m1", "m2", "m3", "arm64_apple"]: - return 1.2 - - # Intel Mac - Sliding scale based on age - elif arch in ["intel_mac", "x86_64_mac", "mac_intel"]: - return get_intel_mac_multiplier(model, year) - - # Server/Workstation x86 - Sliding scale based on age - elif arch in ["server_x86", "workstation_x86", "xeon", "epyc"]: - return get_server_x86_multiplier(year) - - # Modern x86 - Base 0.1x + loyalty bonus - else: - base = 0.1 - loyalty = 0.0 - if db_path and miner_id: - loyalty = get_loyalty_bonus(miner_id, db_path, base) - return base + loyalty - - -def get_time_aged_multiplier(device_arch: str, chain_age_years: float, device_info: Dict = None) -> float: - """ - Calculate time-aged antiquity multiplier with decay - - Vintage hardware bonus decays linearly over blockchain lifetime: - - Year 0: Full multiplier - - Year 10: Significantly reduced - - Year 16.67: Vintage bonus fully decayed to modern baseline - - Modern x86 with loyalty bonus does NOT decay (reward for commitment) - """ - if device_info: - base_multiplier = get_device_multiplier(device_info) - else: - # Fallback to simple lookup - base_multiplier = BASE_MULTIPLIERS.get(device_arch.lower(), 0.1) - - # Modern x86 doesn't decay (loyalty bonus is earned, not given) - if base_multiplier <= 0.1: - return base_multiplier - - # Apple Silicon gets slight decay (it's modern hardware) - if device_arch.lower() in ["apple_silicon", "m1", "m2", "m3", "arm64_apple"]: - decay_rate = 0.05 # 5% per year (slower decay for premium) - else: - decay_rate = DECAY_RATE_PER_YEAR - - # Calculate decayed bonus - if base_multiplier <= 1.0: - return base_multiplier # No bonus to decay - - vintage_bonus = base_multiplier - 1.0 - aged_bonus = max(0, vintage_bonus * (1 - decay_rate * chain_age_years)) - - return 1.0 + aged_bonus - - -# ============================================================================= -# ROUND-ROBIN CONSENSUS FUNCTIONS -# ============================================================================= - -def get_chain_age_years(current_slot: int) -> float: - """Calculate blockchain age in years from slot number""" - chain_age_seconds = current_slot * BLOCK_TIME - return chain_age_seconds / (365.25 * 24 * 3600) - - -def get_attested_miners(db_path: str, current_ts: int) -> List[Tuple[str, str, Dict]]: - """ - Get all currently attested miners (within TTL window) - - Returns: List of (miner_id, device_arch, device_info) tuples, sorted alphabetically - """ - with sqlite3.connect(db_path) as conn: - cursor = conn.cursor() - - cursor.execute(""" - SELECT miner, device_arch, device_family, device_model, device_year - FROM miner_attest_recent - WHERE ts_ok >= ? - ORDER BY miner ASC - """, (current_ts - ATTESTATION_TTL,)) - - results = [] - for row in cursor.fetchall(): - miner_id, arch, family, model, year = row - device_info = { - "arch": arch or "modern_x86", - "family": family or "", - "model": model or "", - "year": year or CURRENT_YEAR - } - results.append((miner_id, arch, device_info)) - - return results - - -def get_round_robin_producer(slot: int, attested_miners: List) -> str: - """Deterministic round-robin block producer selection""" - if not attested_miners: - return None - producer_index = slot % len(attested_miners) - return attested_miners[producer_index][0] - - -def calculate_epoch_rewards_v2( - db_path: str, - epoch: int, - total_reward_urtc: int, - current_slot: int -) -> Dict[str, int]: - """ - Calculate reward distribution with v2 multiplier system - """ - chain_age_years = get_chain_age_years(current_slot) - - epoch_start_slot = epoch * 144 - epoch_end_slot = epoch_start_slot + 143 - epoch_start_ts = GENESIS_TIMESTAMP + (epoch_start_slot * BLOCK_TIME) - epoch_end_ts = GENESIS_TIMESTAMP + (epoch_end_slot * BLOCK_TIME) - - with sqlite3.connect(db_path) as conn: - cursor = conn.cursor() - - cursor.execute(""" - SELECT DISTINCT miner, device_arch, device_family, device_model, device_year - FROM miner_attest_recent - WHERE ts_ok >= ? AND ts_ok <= ? - """, (epoch_start_ts - ATTESTATION_TTL, epoch_end_ts)) - - epoch_miners = cursor.fetchall() - - if not epoch_miners: - return {} - - # Calculate weights with v2 system - weighted_miners = [] - total_weight = 0.0 - - for row in epoch_miners: - miner_id, arch, family, model, year = row - device_info = { - "arch": arch or "modern_x86", - "family": family or "", - "model": model or "", - "year": year or CURRENT_YEAR - } - - base_mult = get_device_multiplier(device_info, db_path, miner_id) - weight = get_time_aged_multiplier(arch, chain_age_years, device_info) - - weighted_miners.append((miner_id, weight, device_info)) - total_weight += weight - - # Distribute rewards - rewards = {} - remaining = total_reward_urtc - - for i, (miner_id, weight, device_info) in enumerate(weighted_miners): - if i == len(weighted_miners) - 1: - share = remaining - else: - share = int((weight / total_weight) * total_reward_urtc) - remaining -= share - - rewards[miner_id] = share - - return rewards - - -# ============================================================================= -# EXAMPLE / TEST -# ============================================================================= - -if __name__ == "__main__": - print("=" * 70) - print("RustChain Antiquity Multiplier System v2") - print("=" * 70) - - # Test devices - test_devices = [ - {"arch": "g4", "family": "PowerPC G4", "year": 2003}, - {"arch": "g5", "family": "PowerPC G5", "year": 2005}, - {"arch": "intel_mac", "model": "MacPro6,1", "year": 2013}, # 12 years old - {"arch": "server_x86", "family": "Dell PowerEdge", "year": 2017}, # 8 years old - {"arch": "apple_silicon", "family": "Apple M2", "year": 2022}, - {"arch": "modern_x86", "family": "Modern Desktop", "year": 2023}, - ] - - print("\n=== Base Multipliers (Year 0) ===") - print(f"{'Device':<30} {'Age':>8} {'Multiplier':>12}") - print("-" * 52) - - for device in test_devices: - mult = get_device_multiplier(device) - age = CURRENT_YEAR - device.get("year", CURRENT_YEAR) - name = device.get("family", device.get("arch")) - print(f"{name:<30} {age:>5} yr {mult:>10.2f}x") - - print("\n=== Multiplier Decay Over Blockchain Lifetime ===") - for years in [0, 2, 5, 10, 15]: - print(f"\n--- Chain Age: {years} years ---") - for device in test_devices: - arch = device.get("arch") - mult = get_time_aged_multiplier(arch, years, device) - name = device.get("family", device.get("arch"))[:25] - print(f" {name:<25}: {mult:.3f}x") - - print("\n=== Reward Distribution Example (1.5 RTC) ===") - total_reward = 150_000_000 # 1.5 RTC in uRTC - - weights = [] - for device in test_devices: - mult = get_device_multiplier(device) - weights.append((device.get("family", device.get("arch")), mult)) - - total_weight = sum(w[1] for w in weights) - - print(f"{'Device':<30} {'Multiplier':>10} {'Share (RTC)':>12} {'Percent':>8}") - print("-" * 62) - - for name, mult in weights: - share_urtc = int((mult / total_weight) * total_reward) - share_rtc = share_urtc / 100_000_000 - pct = (mult / total_weight) * 100 - print(f"{name:<30} {mult:>8.2f}x {share_rtc:>10.6f} {pct:>7.1f}%") - - print("\n" + "=" * 70) - print("Key Points:") - print("- PowerPC G4/G5: Highest multipliers (true vintage)") - print("- Intel Mac: Sliding scale 0.8-1.5x based on age") - print("- Server x86: Sliding scale 0.1-1.0x based on age") - print("- Modern x86: 0.1x base + 15%/year loyalty bonus") - print("- Vintage bonuses decay 15%/year over chain lifetime") - print("- Loyalty bonuses do NOT decay (reward for commitment)") - print("=" * 70) +#!/usr/bin/env python3 +""" +RIP-200 v2: Round-Robin Consensus (1 CPU = 1 Vote) +================================================== + +Updated Antiquity Multiplier System: +- PowerPC: High multipliers (2.0-2.5x) - true vintage +- Intel Mac (2006-2019): Sliding scale based on age (1.0-1.5x) +- Server x86 (5+ years): Medium multiplier (0.5-1.0x) +- Modern x86 (<5 years): Starts at 0.1x, earns 15%/year loyalty bonus +- Apple Silicon: 1.2x (modern but premium hardware) +""" + +import sqlite3 +import time +from typing import List, Tuple, Dict +from datetime import datetime + +# Genesis timestamp +GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC +BLOCK_TIME = 600 # 10 minutes +ATTESTATION_TTL = 600 # 10 minutes +CURRENT_YEAR = 2025 + +# ============================================================================= +# ANTIQUITY MULTIPLIER SYSTEM v2 +# ============================================================================= + +# Base multipliers by architecture class +BASE_MULTIPLIERS = { + # PowerPC - True Vintage (pre-2006) + "g4": 2.5, # PowerPC G4 (2001-2005) - Most valuable + "g5": 2.0, # PowerPC G5 (2003-2006) - High value + + # Apple Silicon - Modern Premium + "apple_silicon": 1.2, # M1/M2/M3 (2020+) - Premium but modern + "m1": 1.2, + "m2": 1.2, + "m3": 1.2, + + # Placeholders - calculated dynamically + "intel_mac": None, # Calculated based on model year + "server_x86": None, # Calculated based on age + "modern_x86": 0.1, # Base rate, can earn loyalty bonus +} + +# Intel Mac model years (for sliding scale) +INTEL_MAC_MODELS = { + "MacPro1,1": 2006, + "MacPro2,1": 2007, + "MacPro3,1": 2008, + "MacPro4,1": 2009, + "MacPro5,1": 2010, + "MacPro6,1": 2013, # Trash can Mac Pro + "MacPro7,1": 2019, # Cheese grater Mac Pro + "iMacPro1,1": 2017, + "Macmini6,1": 2012, + "Macmini6,2": 2012, + "Macmini7,1": 2014, + "MacBookPro11,1": 2013, + "MacBookPro11,2": 2013, + "MacBookPro11,3": 2013, + "MacBookPro12,1": 2015, + "MacBookPro13,1": 2016, + "MacBookPro14,1": 2017, + "MacBookPro15,1": 2018, + "MacBookPro16,1": 2019, +} + +# Time decay parameters +DECAY_RATE_PER_YEAR = 0.15 # 15% decay per year for vintage bonus +LOYALTY_RATE_PER_YEAR = 0.15 # 15% bonus per year for modern x86 uptime + + +def get_intel_mac_multiplier(model_identifier: str, manufacture_year: int = None) -> float: + """ + Calculate multiplier for Intel Macs based on age + + Sliding scale: + - 15+ years old: 1.5x (2006-2010 Mac Pros) + - 12-14 years old: 1.3x (2011-2013 Mac Pros) + - 8-11 years old: 1.1x (2014-2017) + - 5-7 years old: 1.0x (2018-2020) + - <5 years old: 0.8x (2021+, unlikely for Intel) + """ + # Try to get year from model identifier + if manufacture_year is None: + manufacture_year = INTEL_MAC_MODELS.get(model_identifier, CURRENT_YEAR - 5) + + age = CURRENT_YEAR - manufacture_year + + if age >= 15: + return 1.5 # True vintage Intel (2006-2010) + elif age >= 12: + return 1.3 # Classic Intel (2011-2013) + elif age >= 8: + return 1.1 # Aging Intel (2014-2017) + elif age >= 5: + return 1.0 # Recent Intel (2018-2020) + else: + return 0.8 # Very recent Intel + + +def get_server_x86_multiplier(manufacture_year: int) -> float: + """ + Calculate multiplier for server/workstation x86 based on age + + Sliding scale: + - 10+ years old: 1.0x (pre-2015) + - 8-9 years old: 0.7x (2016-2017) + - 6-7 years old: 0.5x (2018-2019) + - 5 years old: 0.3x (2020) + - <5 years old: 0.1x (2021+) - modern baseline + """ + age = CURRENT_YEAR - manufacture_year + + if age >= 10: + return 1.0 # Vintage server + elif age >= 8: + return 0.7 # Aging server (like 2017 PowerEdge) + elif age >= 6: + return 0.5 # Middle-aged server + elif age >= 5: + return 0.3 # Recent server + else: + return 0.1 # Modern server + + +def get_loyalty_bonus(miner_id: str, db_path: str, base_multiplier: float) -> float: + """ + Calculate loyalty bonus for modern x86 miners + + Modern x86 (<5 years) starts at 0.1x but earns 15% per year + for consistent uptime (measured by attestation history) + + Max bonus caps at 1.0x total (10 years of perfect uptime) + """ + if base_multiplier > 0.1: + return 0.0 # Only modern x86 gets loyalty bonus + + try: + with sqlite3.connect(db_path) as conn: + cursor = conn.cursor() + + # Get first attestation timestamp for this miner + cursor.execute(""" + SELECT MIN(ts_ok) FROM miner_attest_history + WHERE miner = ? + """, (miner_id,)) + + result = cursor.fetchone() + if not result or not result[0]: + return 0.0 + + first_attest = result[0] + + # Calculate years of uptime + now = int(time.time()) + years_online = (now - first_attest) / (365.25 * 24 * 3600) + + # 15% bonus per year, capped at 0.9 additional (total max 1.0) + loyalty_bonus = min(years_online * LOYALTY_RATE_PER_YEAR, 0.9) + + return loyalty_bonus + + except Exception: + return 0.0 + + +def get_device_multiplier(device_info: Dict, db_path: str = None, miner_id: str = None) -> float: + """ + Master function to calculate multiplier for any device + + device_info should contain: + - arch: Architecture key (g4, g5, apple_silicon, intel_mac, server_x86, modern_x86) + - model: Model identifier (optional, for Intel Macs) + - year: Manufacture year (optional) + - family: Family name (optional, for display) + """ + arch = device_info.get("arch", "modern_x86").lower() + model = device_info.get("model", "") + year = device_info.get("year", CURRENT_YEAR) + + # PowerPC - Fixed high multipliers + if arch in ["g4", "ppc_g4", "powerpc_g4"]: + return 2.5 + elif arch in ["g5", "ppc_g5", "powerpc_g5"]: + return 2.0 + + # Apple Silicon - Fixed premium multiplier + elif arch in ["apple_silicon", "m1", "m2", "m3", "arm64_apple"]: + return 1.2 + + # Intel Mac - Sliding scale based on age + elif arch in ["intel_mac", "x86_64_mac", "mac_intel"]: + return get_intel_mac_multiplier(model, year) + + # Server/Workstation x86 - Sliding scale based on age + elif arch in ["server_x86", "workstation_x86", "xeon", "epyc"]: + return get_server_x86_multiplier(year) + + # Modern x86 - Base 0.1x + loyalty bonus + else: + base = 0.1 + loyalty = 0.0 + if db_path and miner_id: + loyalty = get_loyalty_bonus(miner_id, db_path, base) + return base + loyalty + + +def get_time_aged_multiplier(device_arch: str, chain_age_years: float, device_info: Dict = None) -> float: + """ + Calculate time-aged antiquity multiplier with decay + + Vintage hardware bonus decays linearly over blockchain lifetime: + - Year 0: Full multiplier + - Year 10: Significantly reduced + - Year 16.67: Vintage bonus fully decayed to modern baseline + + Modern x86 with loyalty bonus does NOT decay (reward for commitment) + """ + if device_info: + base_multiplier = get_device_multiplier(device_info) + else: + # Fallback to simple lookup + base_multiplier = BASE_MULTIPLIERS.get(device_arch.lower(), 0.1) + + # Modern x86 doesn't decay (loyalty bonus is earned, not given) + if base_multiplier <= 0.1: + return base_multiplier + + # Apple Silicon gets slight decay (it's modern hardware) + if device_arch.lower() in ["apple_silicon", "m1", "m2", "m3", "arm64_apple"]: + decay_rate = 0.05 # 5% per year (slower decay for premium) + else: + decay_rate = DECAY_RATE_PER_YEAR + + # Calculate decayed bonus + if base_multiplier <= 1.0: + return base_multiplier # No bonus to decay + + vintage_bonus = base_multiplier - 1.0 + aged_bonus = max(0, vintage_bonus * (1 - decay_rate * chain_age_years)) + + return 1.0 + aged_bonus + + +# ============================================================================= +# ROUND-ROBIN CONSENSUS FUNCTIONS +# ============================================================================= + +def get_chain_age_years(current_slot: int) -> float: + """Calculate blockchain age in years from slot number""" + chain_age_seconds = current_slot * BLOCK_TIME + return chain_age_seconds / (365.25 * 24 * 3600) + + +def get_attested_miners(db_path: str, current_ts: int) -> List[Tuple[str, str, Dict]]: + """ + Get all currently attested miners (within TTL window) + + Returns: List of (miner_id, device_arch, device_info) tuples, sorted alphabetically + """ + with sqlite3.connect(db_path) as conn: + cursor = conn.cursor() + + cursor.execute(""" + SELECT miner, device_arch, device_family, device_model, device_year + FROM miner_attest_recent + WHERE ts_ok >= ? + ORDER BY miner ASC + """, (current_ts - ATTESTATION_TTL,)) + + results = [] + for row in cursor.fetchall(): + miner_id, arch, family, model, year = row + device_info = { + "arch": arch or "modern_x86", + "family": family or "", + "model": model or "", + "year": year or CURRENT_YEAR + } + results.append((miner_id, arch, device_info)) + + return results + + +def get_round_robin_producer(slot: int, attested_miners: List) -> str: + """Deterministic round-robin block producer selection""" + if not attested_miners: + return None + producer_index = slot % len(attested_miners) + return attested_miners[producer_index][0] + + +def calculate_epoch_rewards_v2( + db_path: str, + epoch: int, + total_reward_urtc: int, + current_slot: int +) -> Dict[str, int]: + """ + Calculate reward distribution with v2 multiplier system + """ + chain_age_years = get_chain_age_years(current_slot) + + epoch_start_slot = epoch * 144 + epoch_end_slot = epoch_start_slot + 143 + epoch_start_ts = GENESIS_TIMESTAMP + (epoch_start_slot * BLOCK_TIME) + epoch_end_ts = GENESIS_TIMESTAMP + (epoch_end_slot * BLOCK_TIME) + + with sqlite3.connect(db_path) as conn: + cursor = conn.cursor() + + cursor.execute(""" + SELECT DISTINCT miner, device_arch, device_family, device_model, device_year + FROM miner_attest_recent + WHERE ts_ok >= ? AND ts_ok <= ? + """, (epoch_start_ts - ATTESTATION_TTL, epoch_end_ts)) + + epoch_miners = cursor.fetchall() + + if not epoch_miners: + return {} + + # Calculate weights with v2 system + weighted_miners = [] + total_weight = 0.0 + + for row in epoch_miners: + miner_id, arch, family, model, year = row + device_info = { + "arch": arch or "modern_x86", + "family": family or "", + "model": model or "", + "year": year or CURRENT_YEAR + } + + base_mult = get_device_multiplier(device_info, db_path, miner_id) + weight = get_time_aged_multiplier(arch, chain_age_years, device_info) + + weighted_miners.append((miner_id, weight, device_info)) + total_weight += weight + + # Distribute rewards + rewards = {} + remaining = total_reward_urtc + + for i, (miner_id, weight, device_info) in enumerate(weighted_miners): + if i == len(weighted_miners) - 1: + share = remaining + else: + share = int((weight / total_weight) * total_reward_urtc) + remaining -= share + + rewards[miner_id] = share + + return rewards + + +# ============================================================================= +# EXAMPLE / TEST +# ============================================================================= + +if __name__ == "__main__": + print("=" * 70) + print("RustChain Antiquity Multiplier System v2") + print("=" * 70) + + # Test devices + test_devices = [ + {"arch": "g4", "family": "PowerPC G4", "year": 2003}, + {"arch": "g5", "family": "PowerPC G5", "year": 2005}, + {"arch": "intel_mac", "model": "MacPro6,1", "year": 2013}, # 12 years old + {"arch": "server_x86", "family": "Dell PowerEdge", "year": 2017}, # 8 years old + {"arch": "apple_silicon", "family": "Apple M2", "year": 2022}, + {"arch": "modern_x86", "family": "Modern Desktop", "year": 2023}, + ] + + print("\n=== Base Multipliers (Year 0) ===") + print(f"{'Device':<30} {'Age':>8} {'Multiplier':>12}") + print("-" * 52) + + for device in test_devices: + mult = get_device_multiplier(device) + age = CURRENT_YEAR - device.get("year", CURRENT_YEAR) + name = device.get("family", device.get("arch")) + print(f"{name:<30} {age:>5} yr {mult:>10.2f}x") + + print("\n=== Multiplier Decay Over Blockchain Lifetime ===") + for years in [0, 2, 5, 10, 15]: + print(f"\n--- Chain Age: {years} years ---") + for device in test_devices: + arch = device.get("arch") + mult = get_time_aged_multiplier(arch, years, device) + name = device.get("family", device.get("arch"))[:25] + print(f" {name:<25}: {mult:.3f}x") + + print("\n=== Reward Distribution Example (1.5 RTC) ===") + total_reward = 150_000_000 # 1.5 RTC in uRTC + + weights = [] + for device in test_devices: + mult = get_device_multiplier(device) + weights.append((device.get("family", device.get("arch")), mult)) + + total_weight = sum(w[1] for w in weights) + + print(f"{'Device':<30} {'Multiplier':>10} {'Share (RTC)':>12} {'Percent':>8}") + print("-" * 62) + + for name, mult in weights: + share_urtc = int((mult / total_weight) * total_reward) + share_rtc = share_urtc / 100_000_000 + pct = (mult / total_weight) * 100 + print(f"{name:<30} {mult:>8.2f}x {share_rtc:>10.6f} {pct:>7.1f}%") + + print("\n" + "=" * 70) + print("Key Points:") + print("- PowerPC G4/G5: Highest multipliers (true vintage)") + print("- Intel Mac: Sliding scale 0.8-1.5x based on age") + print("- Server x86: Sliding scale 0.1-1.0x based on age") + print("- Modern x86: 0.1x base + 15%/year loyalty bonus") + print("- Vintage bonuses decay 15%/year over chain lifetime") + print("- Loyalty bonuses do NOT decay (reward for commitment)") + print("=" * 70) diff --git a/node/rom_clustering_server.py b/node/rom_clustering_server.py index aab07b29..d4c1dc58 100644 --- a/node/rom_clustering_server.py +++ b/node/rom_clustering_server.py @@ -1,408 +1,408 @@ -#!/usr/bin/env python3 -""" -ROM Clustering Detection - Server Side -======================================= -Integrates with RustChain server to detect emulated miners. - -When multiple "different" miners report identical ROM hashes, -they're likely VMs using the same ROM pack - flag them. -""" - -import sqlite3 -import time -from typing import Dict, List, Optional, Tuple -from rom_fingerprint_db import ( - identify_rom, - is_known_emulator_rom, - AMIGA_KICKSTART_SHA1, - MAC_68K_CHECKSUMS, - MAC_PPC_MD5, -) - -# ============================================================================= -# DATABASE SCHEMA ADDITIONS -# ============================================================================= -ROM_CLUSTERING_SCHEMA = """ --- ROM hash reports from miners -CREATE TABLE IF NOT EXISTS miner_rom_reports ( - miner_id TEXT NOT NULL, - rom_hash TEXT NOT NULL, - hash_type TEXT NOT NULL, -- sha1, md5, apple - platform TEXT, -- amiga, mac_68k, mac_ppc - first_seen INTEGER NOT NULL, - last_seen INTEGER NOT NULL, - report_count INTEGER DEFAULT 1, - PRIMARY KEY (miner_id, rom_hash) -); - --- Index for clustering queries -CREATE INDEX IF NOT EXISTS idx_rom_hash ON miner_rom_reports(rom_hash); - --- Flagged clusters -CREATE TABLE IF NOT EXISTS rom_clusters ( - cluster_id INTEGER PRIMARY KEY AUTOINCREMENT, - rom_hash TEXT NOT NULL, - hash_type TEXT NOT NULL, - miners TEXT NOT NULL, -- JSON array of miner_ids - cluster_size INTEGER NOT NULL, - is_known_emulator_rom INTEGER DEFAULT 0, - known_rom_info TEXT, -- JSON if known - first_detected INTEGER NOT NULL, - last_updated INTEGER NOT NULL -); - --- Miner flags for ROM violations -CREATE TABLE IF NOT EXISTS miner_rom_flags ( - miner_id TEXT PRIMARY KEY, - flag_reason TEXT NOT NULL, - cluster_id INTEGER, - flagged_at INTEGER NOT NULL, - resolved INTEGER DEFAULT 0, - resolved_at INTEGER -); -""" - - -def init_rom_tables(db_path: str): - """Initialize ROM clustering tables in the database.""" - conn = sqlite3.connect(db_path) - conn.executescript(ROM_CLUSTERING_SCHEMA) - conn.commit() - conn.close() - - -class ROMClusteringServer: - """ - Server-side ROM clustering detection. - - Tracks ROM hashes reported by miners and flags: - 1. Known emulator ROM hashes (from our database) - 2. Clustered ROMs (multiple miners with identical hash) - """ - - def __init__(self, db_path: str, cluster_threshold: int = 2): - """ - Args: - db_path: Path to SQLite database - cluster_threshold: Number of miners sharing ROM before flagging - """ - self.db_path = db_path - self.cluster_threshold = cluster_threshold - init_rom_tables(db_path) - - def _get_conn(self): - return sqlite3.connect(self.db_path) - - def process_rom_report( - self, - miner_id: str, - rom_hash: str, - hash_type: str = "sha1", - platform: str = None - ) -> Tuple[bool, str, Optional[Dict]]: - """ - Process a ROM hash report from a miner. - - Returns: - (is_valid, reason, details) - """ - now = int(time.time()) - rom_hash_lower = rom_hash.lower() - - conn = self._get_conn() - cur = conn.cursor() - - # Check 1: Is this a known emulator ROM? - if is_known_emulator_rom(rom_hash, hash_type): - rom_info = identify_rom(rom_hash, hash_type) - - # Flag the miner - cur.execute(""" - INSERT OR REPLACE INTO miner_rom_flags - (miner_id, flag_reason, flagged_at) - VALUES (?, ?, ?) - """, (miner_id, f"known_emulator_rom:{rom_info}", now)) - - conn.commit() - conn.close() - - return False, "known_emulator_rom", rom_info - - # Check 2: Record the report - cur.execute(""" - INSERT INTO miner_rom_reports - (miner_id, rom_hash, hash_type, platform, first_seen, last_seen, report_count) - VALUES (?, ?, ?, ?, ?, ?, 1) - ON CONFLICT(miner_id, rom_hash) DO UPDATE SET - last_seen = excluded.last_seen, - report_count = report_count + 1 - """, (miner_id, rom_hash_lower, hash_type, platform, now, now)) - - # Check 3: Look for clustering - cur.execute(""" - SELECT miner_id FROM miner_rom_reports - WHERE rom_hash = ? AND miner_id != ? - """, (rom_hash_lower, miner_id)) - - other_miners = [row[0] for row in cur.fetchall()] - - if len(other_miners) >= self.cluster_threshold: - # Clustering detected! - all_miners = [miner_id] + other_miners - - # Record the cluster - import json - cur.execute(""" - INSERT INTO rom_clusters - (rom_hash, hash_type, miners, cluster_size, first_detected, last_updated) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT DO UPDATE SET - miners = excluded.miners, - cluster_size = excluded.cluster_size, - last_updated = excluded.last_updated - """, ( - rom_hash_lower, hash_type, - json.dumps(all_miners), len(all_miners), - now, now - )) - - cluster_id = cur.lastrowid - - # Flag all miners in the cluster - for m in all_miners: - cur.execute(""" - INSERT OR REPLACE INTO miner_rom_flags - (miner_id, flag_reason, cluster_id, flagged_at) - VALUES (?, ?, ?, ?) - """, (m, f"rom_cluster:{len(all_miners)}_miners", cluster_id, now)) - - conn.commit() - conn.close() - - return False, "rom_clustering", { - "cluster_size": len(all_miners), - "other_miners": other_miners, - "rom_hash": rom_hash_lower, - } - - conn.commit() - conn.close() - - return True, "unique_rom", None - - def is_miner_flagged(self, miner_id: str) -> Tuple[bool, Optional[str]]: - """Check if a miner is flagged for ROM violations.""" - conn = self._get_conn() - cur = conn.cursor() - - cur.execute(""" - SELECT flag_reason FROM miner_rom_flags - WHERE miner_id = ? AND resolved = 0 - """, (miner_id,)) - - row = cur.fetchone() - conn.close() - - if row: - return True, row[0] - return False, None - - def get_clusters(self) -> List[Dict]: - """Get all detected ROM clusters.""" - conn = self._get_conn() - cur = conn.cursor() - - cur.execute(""" - SELECT rom_hash, hash_type, miners, cluster_size, - is_known_emulator_rom, known_rom_info, - datetime(first_detected, 'unixepoch'), - datetime(last_updated, 'unixepoch') - FROM rom_clusters - ORDER BY cluster_size DESC - """) - - import json - clusters = [] - for row in cur.fetchall(): - clusters.append({ - "rom_hash": row[0], - "hash_type": row[1], - "miners": json.loads(row[2]), - "cluster_size": row[3], - "is_known_emulator": bool(row[4]), - "known_rom_info": json.loads(row[5]) if row[5] else None, - "first_detected": row[6], - "last_updated": row[7], - }) - - conn.close() - return clusters - - def get_flagged_miners(self) -> List[Dict]: - """Get all flagged miners.""" - conn = self._get_conn() - cur = conn.cursor() - - cur.execute(""" - SELECT miner_id, flag_reason, cluster_id, - datetime(flagged_at, 'unixepoch') - FROM miner_rom_flags - WHERE resolved = 0 - """) - - flagged = [] - for row in cur.fetchall(): - flagged.append({ - "miner_id": row[0], - "reason": row[1], - "cluster_id": row[2], - "flagged_at": row[3], - }) - - conn.close() - return flagged - - def get_stats(self) -> Dict: - """Get ROM clustering statistics.""" - conn = self._get_conn() - cur = conn.cursor() - - cur.execute("SELECT COUNT(*) FROM miner_rom_reports") - total_reports = cur.fetchone()[0] - - cur.execute("SELECT COUNT(DISTINCT miner_id) FROM miner_rom_reports") - unique_miners = cur.fetchone()[0] - - cur.execute("SELECT COUNT(DISTINCT rom_hash) FROM miner_rom_reports") - unique_roms = cur.fetchone()[0] - - cur.execute("SELECT COUNT(*) FROM rom_clusters") - clusters = cur.fetchone()[0] - - cur.execute("SELECT COUNT(*) FROM miner_rom_flags WHERE resolved = 0") - flagged = cur.fetchone()[0] - - conn.close() - - return { - "total_rom_reports": total_reports, - "unique_miners_reporting": unique_miners, - "unique_rom_hashes": unique_roms, - "clusters_detected": clusters, - "miners_flagged": flagged, - } - - -def integrate_with_attestation( - attestation_data: Dict, - rom_server: ROMClusteringServer -) -> Tuple[bool, str]: - """ - Integrate ROM checking with miner attestation. - - Call this from the /attest/submit endpoint handler. - - Args: - attestation_data: The attestation payload from miner - rom_server: ROMClusteringServer instance - - Returns: - (is_valid, reason) - """ - miner_id = attestation_data.get("miner_id") or attestation_data.get("miner") - fingerprint = attestation_data.get("fingerprint", {}) - - # Check if fingerprint includes ROM data - rom_check = fingerprint.get("checks", {}).get("rom_fingerprint", {}) - - if not rom_check or rom_check.get("skipped"): - # No ROM data reported - OK for modern hardware - return True, "no_rom_data" - - rom_data = rom_check.get("data", {}) - rom_hashes = rom_data.get("rom_hashes", {}) - - # Process each reported ROM hash - for platform, rom_hash in rom_hashes.items(): - if isinstance(rom_hash, dict): - # Complex format with hash_type - hash_val = rom_hash.get("hash") or rom_hash.get("header_md5") - hash_type = rom_hash.get("hash_type", "md5") - else: - hash_val = rom_hash - hash_type = "sha1" # Default - - if hash_val: - is_valid, reason, details = rom_server.process_rom_report( - miner_id, hash_val, hash_type, platform - ) - - if not is_valid: - return False, f"{reason}:{details}" - - return True, "rom_check_passed" - - -if __name__ == "__main__": - import tempfile - import os - - print("ROM Clustering Server - Test") - print("=" * 50) - - # Create temp database - db_path = "/tmp/test_rom_clustering.db" - if os.path.exists(db_path): - os.remove(db_path) - - server = ROMClusteringServer(db_path, cluster_threshold=2) - - # Test 1: Known emulator ROM - print("\n[Test 1] Known emulator ROM:") - result = server.process_rom_report( - "fake-amiga-miner", - "891e9a547772fe0c6c19b610baf8bc4ea7fcb785", # Kickstart 1.3 - "sha1", - "amiga" - ) - print(f" Result: {result}") - - # Test 2: Unique ROM - print("\n[Test 2] Unique ROM:") - result = server.process_rom_report( - "real-vintage-mac", - "abcd1234unique5678hash", - "apple", - "mac_68k" - ) - print(f" Result: {result}") - - # Test 3: Clustering detection - print("\n[Test 3] ROM Clustering:") - for i in range(3): - result = server.process_rom_report( - f"suspicious-miner-{i}", - "deadbeef1234same5678hash", - "md5", - "mac_ppc" - ) - print(f" Miner {i}: {result}") - - # Stats - print("\n[Stats]") - stats = server.get_stats() - for k, v in stats.items(): - print(f" {k}: {v}") - - # Clusters - print("\n[Clusters]") - for cluster in server.get_clusters(): - print(f" {cluster}") - - # Flagged miners - print("\n[Flagged Miners]") - for miner in server.get_flagged_miners(): - print(f" {miner}") - - # Cleanup - os.remove(db_path) +#!/usr/bin/env python3 +""" +ROM Clustering Detection - Server Side +======================================= +Integrates with RustChain server to detect emulated miners. + +When multiple "different" miners report identical ROM hashes, +they're likely VMs using the same ROM pack - flag them. +""" + +import sqlite3 +import time +from typing import Dict, List, Optional, Tuple +from rom_fingerprint_db import ( + identify_rom, + is_known_emulator_rom, + AMIGA_KICKSTART_SHA1, + MAC_68K_CHECKSUMS, + MAC_PPC_MD5, +) + +# ============================================================================= +# DATABASE SCHEMA ADDITIONS +# ============================================================================= +ROM_CLUSTERING_SCHEMA = """ +-- ROM hash reports from miners +CREATE TABLE IF NOT EXISTS miner_rom_reports ( + miner_id TEXT NOT NULL, + rom_hash TEXT NOT NULL, + hash_type TEXT NOT NULL, -- sha1, md5, apple + platform TEXT, -- amiga, mac_68k, mac_ppc + first_seen INTEGER NOT NULL, + last_seen INTEGER NOT NULL, + report_count INTEGER DEFAULT 1, + PRIMARY KEY (miner_id, rom_hash) +); + +-- Index for clustering queries +CREATE INDEX IF NOT EXISTS idx_rom_hash ON miner_rom_reports(rom_hash); + +-- Flagged clusters +CREATE TABLE IF NOT EXISTS rom_clusters ( + cluster_id INTEGER PRIMARY KEY AUTOINCREMENT, + rom_hash TEXT NOT NULL, + hash_type TEXT NOT NULL, + miners TEXT NOT NULL, -- JSON array of miner_ids + cluster_size INTEGER NOT NULL, + is_known_emulator_rom INTEGER DEFAULT 0, + known_rom_info TEXT, -- JSON if known + first_detected INTEGER NOT NULL, + last_updated INTEGER NOT NULL +); + +-- Miner flags for ROM violations +CREATE TABLE IF NOT EXISTS miner_rom_flags ( + miner_id TEXT PRIMARY KEY, + flag_reason TEXT NOT NULL, + cluster_id INTEGER, + flagged_at INTEGER NOT NULL, + resolved INTEGER DEFAULT 0, + resolved_at INTEGER +); +""" + + +def init_rom_tables(db_path: str): + """Initialize ROM clustering tables in the database.""" + conn = sqlite3.connect(db_path) + conn.executescript(ROM_CLUSTERING_SCHEMA) + conn.commit() + conn.close() + + +class ROMClusteringServer: + """ + Server-side ROM clustering detection. + + Tracks ROM hashes reported by miners and flags: + 1. Known emulator ROM hashes (from our database) + 2. Clustered ROMs (multiple miners with identical hash) + """ + + def __init__(self, db_path: str, cluster_threshold: int = 2): + """ + Args: + db_path: Path to SQLite database + cluster_threshold: Number of miners sharing ROM before flagging + """ + self.db_path = db_path + self.cluster_threshold = cluster_threshold + init_rom_tables(db_path) + + def _get_conn(self): + return sqlite3.connect(self.db_path) + + def process_rom_report( + self, + miner_id: str, + rom_hash: str, + hash_type: str = "sha1", + platform: str = None + ) -> Tuple[bool, str, Optional[Dict]]: + """ + Process a ROM hash report from a miner. + + Returns: + (is_valid, reason, details) + """ + now = int(time.time()) + rom_hash_lower = rom_hash.lower() + + conn = self._get_conn() + cur = conn.cursor() + + # Check 1: Is this a known emulator ROM? + if is_known_emulator_rom(rom_hash, hash_type): + rom_info = identify_rom(rom_hash, hash_type) + + # Flag the miner + cur.execute(""" + INSERT OR REPLACE INTO miner_rom_flags + (miner_id, flag_reason, flagged_at) + VALUES (?, ?, ?) + """, (miner_id, f"known_emulator_rom:{rom_info}", now)) + + conn.commit() + conn.close() + + return False, "known_emulator_rom", rom_info + + # Check 2: Record the report + cur.execute(""" + INSERT INTO miner_rom_reports + (miner_id, rom_hash, hash_type, platform, first_seen, last_seen, report_count) + VALUES (?, ?, ?, ?, ?, ?, 1) + ON CONFLICT(miner_id, rom_hash) DO UPDATE SET + last_seen = excluded.last_seen, + report_count = report_count + 1 + """, (miner_id, rom_hash_lower, hash_type, platform, now, now)) + + # Check 3: Look for clustering + cur.execute(""" + SELECT miner_id FROM miner_rom_reports + WHERE rom_hash = ? AND miner_id != ? + """, (rom_hash_lower, miner_id)) + + other_miners = [row[0] for row in cur.fetchall()] + + if len(other_miners) >= self.cluster_threshold: + # Clustering detected! + all_miners = [miner_id] + other_miners + + # Record the cluster + import json + cur.execute(""" + INSERT INTO rom_clusters + (rom_hash, hash_type, miners, cluster_size, first_detected, last_updated) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT DO UPDATE SET + miners = excluded.miners, + cluster_size = excluded.cluster_size, + last_updated = excluded.last_updated + """, ( + rom_hash_lower, hash_type, + json.dumps(all_miners), len(all_miners), + now, now + )) + + cluster_id = cur.lastrowid + + # Flag all miners in the cluster + for m in all_miners: + cur.execute(""" + INSERT OR REPLACE INTO miner_rom_flags + (miner_id, flag_reason, cluster_id, flagged_at) + VALUES (?, ?, ?, ?) + """, (m, f"rom_cluster:{len(all_miners)}_miners", cluster_id, now)) + + conn.commit() + conn.close() + + return False, "rom_clustering", { + "cluster_size": len(all_miners), + "other_miners": other_miners, + "rom_hash": rom_hash_lower, + } + + conn.commit() + conn.close() + + return True, "unique_rom", None + + def is_miner_flagged(self, miner_id: str) -> Tuple[bool, Optional[str]]: + """Check if a miner is flagged for ROM violations.""" + conn = self._get_conn() + cur = conn.cursor() + + cur.execute(""" + SELECT flag_reason FROM miner_rom_flags + WHERE miner_id = ? AND resolved = 0 + """, (miner_id,)) + + row = cur.fetchone() + conn.close() + + if row: + return True, row[0] + return False, None + + def get_clusters(self) -> List[Dict]: + """Get all detected ROM clusters.""" + conn = self._get_conn() + cur = conn.cursor() + + cur.execute(""" + SELECT rom_hash, hash_type, miners, cluster_size, + is_known_emulator_rom, known_rom_info, + datetime(first_detected, 'unixepoch'), + datetime(last_updated, 'unixepoch') + FROM rom_clusters + ORDER BY cluster_size DESC + """) + + import json + clusters = [] + for row in cur.fetchall(): + clusters.append({ + "rom_hash": row[0], + "hash_type": row[1], + "miners": json.loads(row[2]), + "cluster_size": row[3], + "is_known_emulator": bool(row[4]), + "known_rom_info": json.loads(row[5]) if row[5] else None, + "first_detected": row[6], + "last_updated": row[7], + }) + + conn.close() + return clusters + + def get_flagged_miners(self) -> List[Dict]: + """Get all flagged miners.""" + conn = self._get_conn() + cur = conn.cursor() + + cur.execute(""" + SELECT miner_id, flag_reason, cluster_id, + datetime(flagged_at, 'unixepoch') + FROM miner_rom_flags + WHERE resolved = 0 + """) + + flagged = [] + for row in cur.fetchall(): + flagged.append({ + "miner_id": row[0], + "reason": row[1], + "cluster_id": row[2], + "flagged_at": row[3], + }) + + conn.close() + return flagged + + def get_stats(self) -> Dict: + """Get ROM clustering statistics.""" + conn = self._get_conn() + cur = conn.cursor() + + cur.execute("SELECT COUNT(*) FROM miner_rom_reports") + total_reports = cur.fetchone()[0] + + cur.execute("SELECT COUNT(DISTINCT miner_id) FROM miner_rom_reports") + unique_miners = cur.fetchone()[0] + + cur.execute("SELECT COUNT(DISTINCT rom_hash) FROM miner_rom_reports") + unique_roms = cur.fetchone()[0] + + cur.execute("SELECT COUNT(*) FROM rom_clusters") + clusters = cur.fetchone()[0] + + cur.execute("SELECT COUNT(*) FROM miner_rom_flags WHERE resolved = 0") + flagged = cur.fetchone()[0] + + conn.close() + + return { + "total_rom_reports": total_reports, + "unique_miners_reporting": unique_miners, + "unique_rom_hashes": unique_roms, + "clusters_detected": clusters, + "miners_flagged": flagged, + } + + +def integrate_with_attestation( + attestation_data: Dict, + rom_server: ROMClusteringServer +) -> Tuple[bool, str]: + """ + Integrate ROM checking with miner attestation. + + Call this from the /attest/submit endpoint handler. + + Args: + attestation_data: The attestation payload from miner + rom_server: ROMClusteringServer instance + + Returns: + (is_valid, reason) + """ + miner_id = attestation_data.get("miner_id") or attestation_data.get("miner") + fingerprint = attestation_data.get("fingerprint", {}) + + # Check if fingerprint includes ROM data + rom_check = fingerprint.get("checks", {}).get("rom_fingerprint", {}) + + if not rom_check or rom_check.get("skipped"): + # No ROM data reported - OK for modern hardware + return True, "no_rom_data" + + rom_data = rom_check.get("data", {}) + rom_hashes = rom_data.get("rom_hashes", {}) + + # Process each reported ROM hash + for platform, rom_hash in rom_hashes.items(): + if isinstance(rom_hash, dict): + # Complex format with hash_type + hash_val = rom_hash.get("hash") or rom_hash.get("header_md5") + hash_type = rom_hash.get("hash_type", "md5") + else: + hash_val = rom_hash + hash_type = "sha1" # Default + + if hash_val: + is_valid, reason, details = rom_server.process_rom_report( + miner_id, hash_val, hash_type, platform + ) + + if not is_valid: + return False, f"{reason}:{details}" + + return True, "rom_check_passed" + + +if __name__ == "__main__": + import tempfile + import os + + print("ROM Clustering Server - Test") + print("=" * 50) + + # Create temp database + db_path = "/tmp/test_rom_clustering.db" + if os.path.exists(db_path): + os.remove(db_path) + + server = ROMClusteringServer(db_path, cluster_threshold=2) + + # Test 1: Known emulator ROM + print("\n[Test 1] Known emulator ROM:") + result = server.process_rom_report( + "fake-amiga-miner", + "891e9a547772fe0c6c19b610baf8bc4ea7fcb785", # Kickstart 1.3 + "sha1", + "amiga" + ) + print(f" Result: {result}") + + # Test 2: Unique ROM + print("\n[Test 2] Unique ROM:") + result = server.process_rom_report( + "real-vintage-mac", + "abcd1234unique5678hash", + "apple", + "mac_68k" + ) + print(f" Result: {result}") + + # Test 3: Clustering detection + print("\n[Test 3] ROM Clustering:") + for i in range(3): + result = server.process_rom_report( + f"suspicious-miner-{i}", + "deadbeef1234same5678hash", + "md5", + "mac_ppc" + ) + print(f" Miner {i}: {result}") + + # Stats + print("\n[Stats]") + stats = server.get_stats() + for k, v in stats.items(): + print(f" {k}: {v}") + + # Clusters + print("\n[Clusters]") + for cluster in server.get_clusters(): + print(f" {cluster}") + + # Flagged miners + print("\n[Flagged Miners]") + for miner in server.get_flagged_miners(): + print(f" {miner}") + + # Cleanup + os.remove(db_path) diff --git a/node/rom_fingerprint_db.py b/node/rom_fingerprint_db.py index 226f4845..bdb473c0 100644 --- a/node/rom_fingerprint_db.py +++ b/node/rom_fingerprint_db.py @@ -1,440 +1,440 @@ -#!/usr/bin/env python3 -""" -ROM Fingerprint Database for RIP-PoA Anti-Emulation -==================================================== -Catalogs known emulator ROM dumps - these hashes indicate emulated hardware. -If multiple "different" machines report the same ROM hash, they're likely VMs/emulators -using the same pirated ROM pack. - -Sources: -- FS-UAE: https://fs-uae.net/docs/kickstart-roms/ -- MAMEDEV: https://wiki.mamedev.org/index.php/Driver:Mac_68K:Tech_Info:ROMs -- Cloanto: https://cloanto.com/amiga/roms/ -- E-Maculation: https://www.emaculation.com/ -""" - -from typing import Dict, List, Optional, Tuple -import hashlib -import os - -# ============================================================================= -# AMIGA KICKSTART ROMS - Known emulator ROM hashes (SHA-1) -# Everyone using UAE/WinUAE/FS-UAE uses these same dumps -# ============================================================================= -AMIGA_KICKSTART_SHA1 = { - # Kickstart 1.2 (A500/A1000/A2000) - "11f9e62cf299f72184835b7b2a70a16333fc0d88": { - "version": "1.2 r33.180", - "year": 1986, - "models": ["A500", "A1000", "A2000"], - "common_in_emulators": True, - }, - # Kickstart 1.3 (A500) - MOST COMMON in emulators - "891e9a547772fe0c6c19b610baf8bc4ea7fcb785": { - "version": "1.3 r34.5", - "year": 1987, - "models": ["A500"], - "common_in_emulators": True, - }, - "90933936cce43ca9bc6bf375662c076b27e3c458": { - "version": "1.3 r34.5 (overdump)", - "year": 1987, - "models": ["A500"], - "common_in_emulators": True, - }, - # Kickstart 2.04 (A500+) - "c5839f5cb98a7a8947065c3ed2f14f5f42e334a1": { - "version": "2.04 r37.175", - "year": 1991, - "models": ["A500+"], - "common_in_emulators": True, - }, - # Kickstart 2.05 (A600) - "02843c4253bbd29aba535b0aa3bd9a85034ecde4": { - "version": "2.05 r37.350", - "year": 1992, - "models": ["A600"], - "common_in_emulators": True, - }, - # Kickstart 3.1 - MOST COMMON for "serious" Amiga emulation - "e21545723fe8374e91342617604f1b3d703094f1": { - "version": "3.1 r40.68", - "year": 1993, - "models": ["A1200"], - "common_in_emulators": True, - }, - "f8e210d72b4c4853e0c9b85d223ba20e3d1b36ee": { - "version": "3.1 r40.68", - "year": 1993, - "models": ["A3000"], - "common_in_emulators": True, - }, - "5fe04842d04a489720f0f4bb0e46948199406f49": { - "version": "3.1 r40.68", - "year": 1993, - "models": ["A4000"], - "common_in_emulators": True, - }, - # Cloanto Amiga Forever (modified) - still counts as emulator - "c3c481160866e60d085e436a24db3617ff60b5f9": { - "version": "3.1 r40.68 (Cloanto)", - "year": 1993, - "models": ["A4000"], - "common_in_emulators": True, - }, - # CD32 - "3525be8887f79b5929e017b42380a79edfee542d": { - "version": "3.1 r40.60", - "year": 1993, - "models": ["CD32"], - "common_in_emulators": True, - }, - "5bef3d628ce59cc02a66e6e4ae0da48f60e78f7f": { - "version": "r40.60 Extended", - "year": 1993, - "models": ["CD32"], - "common_in_emulators": True, - }, - # CDTV - "7ba40ffa17e500ed9fed041f3424bd81d9c907be": { - "version": "1.0 Extended", - "year": 1991, - "models": ["CDTV"], - "common_in_emulators": True, - }, -} - -# ============================================================================= -# MACINTOSH 68K ROMS - Known emulator ROM hashes -# Used by Basilisk II, Mini vMac, MAME -# ============================================================================= -MAC_68K_CHECKSUMS = { - # Apple internal checksum format (first 4 bytes of ROM) - # Classic Macs - "28BA61CE": {"models": ["Mac 128K"], "size": "64K", "year": 1984}, - "4D1EEEE1": {"models": ["Mac 512K"], "size": "64K", "year": 1984}, - "4D1EEAE1": {"models": ["Mac 512Ke"], "size": "128K", "year": 1986}, - "B2E362A8": {"models": ["Mac Plus v1"], "size": "128K", "year": 1986}, - "4D1F8172": {"models": ["Mac Plus v2"], "size": "128K", "year": 1986}, - "4D1F8132": {"models": ["Mac Plus v3"], "size": "128K", "year": 1986}, - # Mac II family - "97851DB6": {"models": ["Mac II FDHD"], "size": "256K", "year": 1987}, - "9779D2C4": {"models": ["Mac II"], "size": "256K", "year": 1987}, - "97221136": {"models": ["Mac IIx", "Mac IIcx", "Mac SE/30"], "size": "256K", "year": 1988}, - "368CADFE": {"models": ["Mac IIci"], "size": "512K", "year": 1989}, - "36B7FB6C": {"models": ["Mac IIsi"], "size": "512K", "year": 1990}, - "35C28F5F": {"models": ["Mac IIfx"], "size": "512K", "year": 1990}, - # LC family - "350EACF0": {"models": ["Mac LC"], "size": "512K", "year": 1990}, - "35C28C8F": {"models": ["Mac LC II"], "size": "512K", "year": 1992}, - "3193670E": {"models": ["Mac Classic II", "Performa 200"], "size": "512K", "year": 1991}, - # Quadra family - commonly used in Basilisk II - "420DBFF3": {"models": ["Quadra 700", "Quadra 900"], "size": "1M", "year": 1991}, - "3DC27823": {"models": ["Quadra 950"], "size": "1M", "year": 1992}, - "F1A6F343": {"models": ["Centris 610", "Centris 650"], "size": "1M", "year": 1993}, - "F1ACAD13": {"models": ["Quadra 610", "Quadra 650", "Quadra 800"], "size": "1M", "year": 1993}, - "FF7439EE": {"models": ["Quadra 605", "LC 475", "Performa 475/476"], "size": "1M", "year": 1993}, - "5BF10FD1": {"models": ["Quadra 660AV", "Quadra 840AV"], "size": "2M", "year": 1993}, - "EDE66CBD": {"models": ["Color Classic II", "LC 550", "Performa 275/550/560", "Mac TV"], "size": "1M", "year": 1993}, - "064DC91D": {"models": ["Performa 580", "Performa 588"], "size": "1M", "year": 1994}, - # PowerBooks - "63ABFD3F": {"models": ["PowerBook 5300", "PowerBook Duo 2300"], "size": "1M", "year": 1995}, -} - -# MD5 hashes for specific ROM files (from MAMEDEV) -MAC_68K_MD5 = { - "db7e6d3205a2b48023fba5aa867ac6d6": {"models": ["Mac 128/512"], "size": "64K"}, - "4d8d1e81fa606f57c7ed7188b8b5f410": {"models": ["Mac Plus/512Ke v1"], "size": "128K"}, - "74f4095f7d245a9fb099a6f4a9943572": {"models": ["Mac II"], "size": "256K"}, - "5d8662dfab70ac34663d6d54393f5018": {"models": ["Mac LC"], "size": "512K"}, - "af343f3f1362bf29cefd630687efaa25": {"models": ["Quadra 630"], "size": "1M"}, - "b029184cea925759bc81ecdfe1ccdabd": {"models": ["Quadra 660AV/840AV"], "size": "2M"}, -} - -# ============================================================================= -# MACINTOSH PPC ROMS - SheepShaver / PearPC -# ============================================================================= -MAC_PPC_MD5 = { - # Old World ROMs (4MB) - used by SheepShaver - "01a80c4452c8cdf385e11bd973b44f58": {"models": ["PowerBook G3 WallStreet PDQ"], "size": "4M"}, - "b8612cc39a56d141feade9dc6361ba20": {"models": ["Power Mac G3 Gossamer"], "size": "4M"}, - "bddae47c3475a9d29865612584e18df0": {"models": ["PowerBook G3 Kanga"], "size": "4M"}, - # New World ROMs (1MB) - also used by SheepShaver - "48f635ea8246e42d8edf6a82196d5f72": {"models": ["PowerBook G4"], "size": "1M"}, - "08a9111d0a63d9cbcc37b44a431539cf": {"models": ["Mac mini G4 (Mar 2005)"], "size": "1M"}, - "7bcb22816292a3ac46267b5f16e09806": {"models": ["Mac mini G4 (Dec 2004)"], "size": "1M"}, - "1a405eaa19c4474eb7c5e26eb8a7df80": {"models": ["iBook G4"], "size": "1M"}, - "548bc9cff3da74e9e4dee81ab9b241ce": {"models": ["Power Mac G5 1.6GHz"], "size": "1M"}, - "9f512f3d4ea399fecee413bba0b11bf9": {"models": ["Power Mac G4 FW800"], "size": "1M"}, - "b7af30d6ae7408f108f0484fea886aa7": {"models": ["Power Mac G4 MDD"], "size": "1M"}, - "68cb26e83bb1e80c6a4e899ddf609463": {"models": ["iMac G4 15in"], "size": "1M"}, - "7c35693d4a91b1ccf3d730b71013e285": {"models": ["Power Mac G4 Sawtooth"], "size": "1M"}, - "44cc08c8f14958371cd868770560dac4": {"models": ["Power Mac G4 Cube"], "size": "1M"}, - "af2d2a5a003776291edb533dd75bc2d0": {"models": ["iMac G3 Slot post-May2000"], "size": "1M"}, - "d3f0ded97a7e029e627ab38235ceb742": {"models": ["PowerBook G3 Pismo"], "size": "1M"}, - "c17552881a3999e4441847c8a286a318": {"models": ["iMac G3/PowerBook G3"], "size": "1M"}, - "2c4154c2399613b15d8786460972440e": {"models": ["iMac G3 tray-loading"], "size": "1M"}, - "cc184737ab210fe360a7be42df91be2c": {"models": ["Blue White G3"], "size": "1M"}, - "55dc974738657aebbb05fcccca51bbcc": {"models": ["PowerBook G3 Lombard"], "size": "1M"}, -} - -# ============================================================================= -# OTHER RETRO PLATFORMS -# ============================================================================= -ATARI_ST_ROMS = { - # TOS ROMs commonly used in Hatari, Steem - # SHA-1 hashes from Hatari documentation -} - -C64_ROMS = { - # Kernal/Basic ROMs used in VICE - # Everyone uses the same dumps -} - - -def compute_file_hash(filepath: str, algorithm: str = "sha1") -> Optional[str]: - """Compute hash of a file.""" - if not os.path.exists(filepath): - return None - - hasher = hashlib.new(algorithm) - with open(filepath, "rb") as f: - while chunk := f.read(8192): - hasher.update(chunk) - return hasher.hexdigest() - - -def compute_rom_checksum_apple(filepath: str) -> Optional[str]: - """Extract Apple ROM checksum (first 4 bytes, big-endian hex).""" - if not os.path.exists(filepath): - return None - - with open(filepath, "rb") as f: - first_four = f.read(4) - - if len(first_four) < 4: - return None - - return first_four.hex().upper() - - -def identify_rom(hash_value: str, hash_type: str = "sha1") -> Optional[Dict]: - """ - Identify a ROM by its hash. - Returns ROM info if known, None if unique/unknown. - """ - hash_lower = hash_value.lower() - hash_upper = hash_value.upper() - - # Check Amiga Kickstart (SHA-1) - if hash_type == "sha1" and hash_lower in AMIGA_KICKSTART_SHA1: - info = AMIGA_KICKSTART_SHA1[hash_lower].copy() - info["platform"] = "amiga" - info["hash_type"] = "sha1" - return info - - # Check Mac 68K (Apple checksum) - if hash_type == "apple" and hash_upper in MAC_68K_CHECKSUMS: - info = MAC_68K_CHECKSUMS[hash_upper].copy() - info["platform"] = "mac_68k" - info["hash_type"] = "apple_checksum" - return info - - # Check Mac 68K (MD5) - if hash_type == "md5" and hash_lower in MAC_68K_MD5: - info = MAC_68K_MD5[hash_lower].copy() - info["platform"] = "mac_68k" - info["hash_type"] = "md5" - return info - - # Check Mac PPC (MD5) - if hash_type == "md5" and hash_lower in MAC_PPC_MD5: - info = MAC_PPC_MD5[hash_lower].copy() - info["platform"] = "mac_ppc" - info["hash_type"] = "md5" - return info - - return None - - -def is_known_emulator_rom(hash_value: str, hash_type: str = "sha1") -> bool: - """Check if a ROM hash matches a known emulator ROM dump.""" - return identify_rom(hash_value, hash_type) is not None - - -def get_all_known_hashes() -> Dict[str, List[str]]: - """Get all known ROM hashes organized by platform.""" - return { - "amiga_sha1": list(AMIGA_KICKSTART_SHA1.keys()), - "mac_68k_apple": list(MAC_68K_CHECKSUMS.keys()), - "mac_68k_md5": list(MAC_68K_MD5.keys()), - "mac_ppc_md5": list(MAC_PPC_MD5.keys()), - } - - -# ============================================================================= -# ROM CLUSTERING DETECTION -# ============================================================================= -class ROMClusterDetector: - """ - Detects when multiple "different" miners report identical ROM hashes. - This indicates emulation - real machines have manufacturing variance. - """ - - def __init__(self, cluster_threshold: int = 2): - """ - Args: - cluster_threshold: Number of identical ROMs before flagging. - Default 2 = any duplicate is suspicious. - """ - self.cluster_threshold = cluster_threshold - self.rom_reports: Dict[str, List[str]] = {} # hash -> list of miner_ids - - def report_rom(self, miner_id: str, rom_hash: str, hash_type: str = "sha1") -> Tuple[bool, str]: - """ - Record a ROM hash report from a miner. - - Returns: - (is_valid, reason) - False if clustering detected - """ - key = f"{hash_type}:{rom_hash.lower()}" - - if key not in self.rom_reports: - self.rom_reports[key] = [] - - # Check for duplicate from same miner (OK) - if miner_id in self.rom_reports[key]: - return True, "same_miner_update" - - self.rom_reports[key].append(miner_id) - - # Check for known emulator ROM - if is_known_emulator_rom(rom_hash, hash_type): - rom_info = identify_rom(rom_hash, hash_type) - return False, f"known_emulator_rom:{rom_info.get('platform')}:{rom_info.get('models', [])}" - - # Check for clustering (multiple miners with same ROM) - if len(self.rom_reports[key]) > self.cluster_threshold: - other_miners = [m for m in self.rom_reports[key] if m != miner_id] - return False, f"rom_clustering_detected:shared_with:{other_miners}" - - return True, "unique_rom" - - def get_clusters(self) -> Dict[str, List[str]]: - """Get all ROM hashes that have multiple miners.""" - return {k: v for k, v in self.rom_reports.items() if len(v) > 1} - - def get_suspicious_miners(self) -> List[str]: - """Get list of miners involved in clustering.""" - suspicious = set() - for miners in self.rom_reports.values(): - if len(miners) > self.cluster_threshold: - suspicious.update(miners) - return list(suspicious) - - -# ============================================================================= -# PLATFORM-SPECIFIC ROM DETECTION -# ============================================================================= -def detect_platform_roms() -> Dict[str, Optional[str]]: - """ - Detect ROM files on the current system. - Returns dict of platform -> rom_hash. - """ - results = {} - - # Check for Amiga ROMs in common locations - amiga_paths = [ - "/usr/share/fs-uae/kickstarts/", - "/usr/share/uae/", - os.path.expanduser("~/.config/fs-uae/Kickstarts/"), - os.path.expanduser("~/Amiga/Kickstarts/"), - "/opt/amiga/rom/", - ] - - for base in amiga_paths: - if os.path.isdir(base): - for f in os.listdir(base): - if f.lower().endswith(".rom"): - path = os.path.join(base, f) - sha1 = compute_file_hash(path, "sha1") - if sha1: - results["amiga_kickstart"] = sha1 - break - - # Check for Mac ROMs in common locations - mac_paths = [ - os.path.expanduser("~/.basilisk_ii_prefs"), - os.path.expanduser("~/.sheepshaver_prefs"), - "/usr/share/basilisk2/", - os.path.expanduser("~/Library/Preferences/BasiliskII/"), - ] - - # For real hardware, try to read ROM from device - # (This would need platform-specific code) - - return results - - -def get_real_hardware_rom_signature() -> Optional[Dict]: - """ - Attempt to get ROM signature from real hardware. - - On real Macs: Read from /dev/rom or memory-mapped ROM area - On real Amigas: Read from $F80000-$FFFFFF - - Returns None if not running on real retro hardware. - """ - import platform - - arch = platform.machine().lower() - system = platform.system().lower() - - # PowerPC Mac - try to read ROM - if "ppc" in arch or "powerpc" in arch: - rom_paths = ["/dev/rom", "/dev/nvram"] - for path in rom_paths: - if os.path.exists(path): - try: - # Read first 4 bytes for Apple checksum - with open(path, "rb") as f: - header = f.read(256) - - # Compute signature - return { - "platform": "mac_ppc_real", - "header_md5": hashlib.md5(header).hexdigest(), - "source": path, - } - except: - pass - - # 68K would need different detection - # Amiga would read from chip memory - - return None - - -if __name__ == "__main__": - print("ROM Fingerprint Database") - print("=" * 50) - - stats = get_all_known_hashes() - print(f"Amiga Kickstart ROMs: {len(stats['amiga_sha1'])}") - print(f"Mac 68K ROMs (Apple checksum): {len(stats['mac_68k_apple'])}") - print(f"Mac 68K ROMs (MD5): {len(stats['mac_68k_md5'])}") - print(f"Mac PPC ROMs (MD5): {len(stats['mac_ppc_md5'])}") - - total = sum(len(v) for v in stats.values()) - print(f"\nTotal known emulator ROMs: {total}") - - print("\n--- Testing Cluster Detection ---") - detector = ROMClusterDetector(cluster_threshold=2) - - # Simulate reports - print(detector.report_rom("miner1", "891e9a547772fe0c6c19b610baf8bc4ea7fcb785")) - print(detector.report_rom("miner2", "891e9a547772fe0c6c19b610baf8bc4ea7fcb785")) - print(detector.report_rom("miner3", "unique_hash_abc123")) - - print(f"\nClusters: {detector.get_clusters()}") - print(f"Suspicious miners: {detector.get_suspicious_miners()}") +#!/usr/bin/env python3 +""" +ROM Fingerprint Database for RIP-PoA Anti-Emulation +==================================================== +Catalogs known emulator ROM dumps - these hashes indicate emulated hardware. +If multiple "different" machines report the same ROM hash, they're likely VMs/emulators +using the same pirated ROM pack. + +Sources: +- FS-UAE: https://fs-uae.net/docs/kickstart-roms/ +- MAMEDEV: https://wiki.mamedev.org/index.php/Driver:Mac_68K:Tech_Info:ROMs +- Cloanto: https://cloanto.com/amiga/roms/ +- E-Maculation: https://www.emaculation.com/ +""" + +from typing import Dict, List, Optional, Tuple +import hashlib +import os + +# ============================================================================= +# AMIGA KICKSTART ROMS - Known emulator ROM hashes (SHA-1) +# Everyone using UAE/WinUAE/FS-UAE uses these same dumps +# ============================================================================= +AMIGA_KICKSTART_SHA1 = { + # Kickstart 1.2 (A500/A1000/A2000) + "11f9e62cf299f72184835b7b2a70a16333fc0d88": { + "version": "1.2 r33.180", + "year": 1986, + "models": ["A500", "A1000", "A2000"], + "common_in_emulators": True, + }, + # Kickstart 1.3 (A500) - MOST COMMON in emulators + "891e9a547772fe0c6c19b610baf8bc4ea7fcb785": { + "version": "1.3 r34.5", + "year": 1987, + "models": ["A500"], + "common_in_emulators": True, + }, + "90933936cce43ca9bc6bf375662c076b27e3c458": { + "version": "1.3 r34.5 (overdump)", + "year": 1987, + "models": ["A500"], + "common_in_emulators": True, + }, + # Kickstart 2.04 (A500+) + "c5839f5cb98a7a8947065c3ed2f14f5f42e334a1": { + "version": "2.04 r37.175", + "year": 1991, + "models": ["A500+"], + "common_in_emulators": True, + }, + # Kickstart 2.05 (A600) + "02843c4253bbd29aba535b0aa3bd9a85034ecde4": { + "version": "2.05 r37.350", + "year": 1992, + "models": ["A600"], + "common_in_emulators": True, + }, + # Kickstart 3.1 - MOST COMMON for "serious" Amiga emulation + "e21545723fe8374e91342617604f1b3d703094f1": { + "version": "3.1 r40.68", + "year": 1993, + "models": ["A1200"], + "common_in_emulators": True, + }, + "f8e210d72b4c4853e0c9b85d223ba20e3d1b36ee": { + "version": "3.1 r40.68", + "year": 1993, + "models": ["A3000"], + "common_in_emulators": True, + }, + "5fe04842d04a489720f0f4bb0e46948199406f49": { + "version": "3.1 r40.68", + "year": 1993, + "models": ["A4000"], + "common_in_emulators": True, + }, + # Cloanto Amiga Forever (modified) - still counts as emulator + "c3c481160866e60d085e436a24db3617ff60b5f9": { + "version": "3.1 r40.68 (Cloanto)", + "year": 1993, + "models": ["A4000"], + "common_in_emulators": True, + }, + # CD32 + "3525be8887f79b5929e017b42380a79edfee542d": { + "version": "3.1 r40.60", + "year": 1993, + "models": ["CD32"], + "common_in_emulators": True, + }, + "5bef3d628ce59cc02a66e6e4ae0da48f60e78f7f": { + "version": "r40.60 Extended", + "year": 1993, + "models": ["CD32"], + "common_in_emulators": True, + }, + # CDTV + "7ba40ffa17e500ed9fed041f3424bd81d9c907be": { + "version": "1.0 Extended", + "year": 1991, + "models": ["CDTV"], + "common_in_emulators": True, + }, +} + +# ============================================================================= +# MACINTOSH 68K ROMS - Known emulator ROM hashes +# Used by Basilisk II, Mini vMac, MAME +# ============================================================================= +MAC_68K_CHECKSUMS = { + # Apple internal checksum format (first 4 bytes of ROM) + # Classic Macs + "28BA61CE": {"models": ["Mac 128K"], "size": "64K", "year": 1984}, + "4D1EEEE1": {"models": ["Mac 512K"], "size": "64K", "year": 1984}, + "4D1EEAE1": {"models": ["Mac 512Ke"], "size": "128K", "year": 1986}, + "B2E362A8": {"models": ["Mac Plus v1"], "size": "128K", "year": 1986}, + "4D1F8172": {"models": ["Mac Plus v2"], "size": "128K", "year": 1986}, + "4D1F8132": {"models": ["Mac Plus v3"], "size": "128K", "year": 1986}, + # Mac II family + "97851DB6": {"models": ["Mac II FDHD"], "size": "256K", "year": 1987}, + "9779D2C4": {"models": ["Mac II"], "size": "256K", "year": 1987}, + "97221136": {"models": ["Mac IIx", "Mac IIcx", "Mac SE/30"], "size": "256K", "year": 1988}, + "368CADFE": {"models": ["Mac IIci"], "size": "512K", "year": 1989}, + "36B7FB6C": {"models": ["Mac IIsi"], "size": "512K", "year": 1990}, + "35C28F5F": {"models": ["Mac IIfx"], "size": "512K", "year": 1990}, + # LC family + "350EACF0": {"models": ["Mac LC"], "size": "512K", "year": 1990}, + "35C28C8F": {"models": ["Mac LC II"], "size": "512K", "year": 1992}, + "3193670E": {"models": ["Mac Classic II", "Performa 200"], "size": "512K", "year": 1991}, + # Quadra family - commonly used in Basilisk II + "420DBFF3": {"models": ["Quadra 700", "Quadra 900"], "size": "1M", "year": 1991}, + "3DC27823": {"models": ["Quadra 950"], "size": "1M", "year": 1992}, + "F1A6F343": {"models": ["Centris 610", "Centris 650"], "size": "1M", "year": 1993}, + "F1ACAD13": {"models": ["Quadra 610", "Quadra 650", "Quadra 800"], "size": "1M", "year": 1993}, + "FF7439EE": {"models": ["Quadra 605", "LC 475", "Performa 475/476"], "size": "1M", "year": 1993}, + "5BF10FD1": {"models": ["Quadra 660AV", "Quadra 840AV"], "size": "2M", "year": 1993}, + "EDE66CBD": {"models": ["Color Classic II", "LC 550", "Performa 275/550/560", "Mac TV"], "size": "1M", "year": 1993}, + "064DC91D": {"models": ["Performa 580", "Performa 588"], "size": "1M", "year": 1994}, + # PowerBooks + "63ABFD3F": {"models": ["PowerBook 5300", "PowerBook Duo 2300"], "size": "1M", "year": 1995}, +} + +# MD5 hashes for specific ROM files (from MAMEDEV) +MAC_68K_MD5 = { + "db7e6d3205a2b48023fba5aa867ac6d6": {"models": ["Mac 128/512"], "size": "64K"}, + "4d8d1e81fa606f57c7ed7188b8b5f410": {"models": ["Mac Plus/512Ke v1"], "size": "128K"}, + "74f4095f7d245a9fb099a6f4a9943572": {"models": ["Mac II"], "size": "256K"}, + "5d8662dfab70ac34663d6d54393f5018": {"models": ["Mac LC"], "size": "512K"}, + "af343f3f1362bf29cefd630687efaa25": {"models": ["Quadra 630"], "size": "1M"}, + "b029184cea925759bc81ecdfe1ccdabd": {"models": ["Quadra 660AV/840AV"], "size": "2M"}, +} + +# ============================================================================= +# MACINTOSH PPC ROMS - SheepShaver / PearPC +# ============================================================================= +MAC_PPC_MD5 = { + # Old World ROMs (4MB) - used by SheepShaver + "01a80c4452c8cdf385e11bd973b44f58": {"models": ["PowerBook G3 WallStreet PDQ"], "size": "4M"}, + "b8612cc39a56d141feade9dc6361ba20": {"models": ["Power Mac G3 Gossamer"], "size": "4M"}, + "bddae47c3475a9d29865612584e18df0": {"models": ["PowerBook G3 Kanga"], "size": "4M"}, + # New World ROMs (1MB) - also used by SheepShaver + "48f635ea8246e42d8edf6a82196d5f72": {"models": ["PowerBook G4"], "size": "1M"}, + "08a9111d0a63d9cbcc37b44a431539cf": {"models": ["Mac mini G4 (Mar 2005)"], "size": "1M"}, + "7bcb22816292a3ac46267b5f16e09806": {"models": ["Mac mini G4 (Dec 2004)"], "size": "1M"}, + "1a405eaa19c4474eb7c5e26eb8a7df80": {"models": ["iBook G4"], "size": "1M"}, + "548bc9cff3da74e9e4dee81ab9b241ce": {"models": ["Power Mac G5 1.6GHz"], "size": "1M"}, + "9f512f3d4ea399fecee413bba0b11bf9": {"models": ["Power Mac G4 FW800"], "size": "1M"}, + "b7af30d6ae7408f108f0484fea886aa7": {"models": ["Power Mac G4 MDD"], "size": "1M"}, + "68cb26e83bb1e80c6a4e899ddf609463": {"models": ["iMac G4 15in"], "size": "1M"}, + "7c35693d4a91b1ccf3d730b71013e285": {"models": ["Power Mac G4 Sawtooth"], "size": "1M"}, + "44cc08c8f14958371cd868770560dac4": {"models": ["Power Mac G4 Cube"], "size": "1M"}, + "af2d2a5a003776291edb533dd75bc2d0": {"models": ["iMac G3 Slot post-May2000"], "size": "1M"}, + "d3f0ded97a7e029e627ab38235ceb742": {"models": ["PowerBook G3 Pismo"], "size": "1M"}, + "c17552881a3999e4441847c8a286a318": {"models": ["iMac G3/PowerBook G3"], "size": "1M"}, + "2c4154c2399613b15d8786460972440e": {"models": ["iMac G3 tray-loading"], "size": "1M"}, + "cc184737ab210fe360a7be42df91be2c": {"models": ["Blue White G3"], "size": "1M"}, + "55dc974738657aebbb05fcccca51bbcc": {"models": ["PowerBook G3 Lombard"], "size": "1M"}, +} + +# ============================================================================= +# OTHER RETRO PLATFORMS +# ============================================================================= +ATARI_ST_ROMS = { + # TOS ROMs commonly used in Hatari, Steem + # SHA-1 hashes from Hatari documentation +} + +C64_ROMS = { + # Kernal/Basic ROMs used in VICE + # Everyone uses the same dumps +} + + +def compute_file_hash(filepath: str, algorithm: str = "sha1") -> Optional[str]: + """Compute hash of a file.""" + if not os.path.exists(filepath): + return None + + hasher = hashlib.new(algorithm) + with open(filepath, "rb") as f: + while chunk := f.read(8192): + hasher.update(chunk) + return hasher.hexdigest() + + +def compute_rom_checksum_apple(filepath: str) -> Optional[str]: + """Extract Apple ROM checksum (first 4 bytes, big-endian hex).""" + if not os.path.exists(filepath): + return None + + with open(filepath, "rb") as f: + first_four = f.read(4) + + if len(first_four) < 4: + return None + + return first_four.hex().upper() + + +def identify_rom(hash_value: str, hash_type: str = "sha1") -> Optional[Dict]: + """ + Identify a ROM by its hash. + Returns ROM info if known, None if unique/unknown. + """ + hash_lower = hash_value.lower() + hash_upper = hash_value.upper() + + # Check Amiga Kickstart (SHA-1) + if hash_type == "sha1" and hash_lower in AMIGA_KICKSTART_SHA1: + info = AMIGA_KICKSTART_SHA1[hash_lower].copy() + info["platform"] = "amiga" + info["hash_type"] = "sha1" + return info + + # Check Mac 68K (Apple checksum) + if hash_type == "apple" and hash_upper in MAC_68K_CHECKSUMS: + info = MAC_68K_CHECKSUMS[hash_upper].copy() + info["platform"] = "mac_68k" + info["hash_type"] = "apple_checksum" + return info + + # Check Mac 68K (MD5) + if hash_type == "md5" and hash_lower in MAC_68K_MD5: + info = MAC_68K_MD5[hash_lower].copy() + info["platform"] = "mac_68k" + info["hash_type"] = "md5" + return info + + # Check Mac PPC (MD5) + if hash_type == "md5" and hash_lower in MAC_PPC_MD5: + info = MAC_PPC_MD5[hash_lower].copy() + info["platform"] = "mac_ppc" + info["hash_type"] = "md5" + return info + + return None + + +def is_known_emulator_rom(hash_value: str, hash_type: str = "sha1") -> bool: + """Check if a ROM hash matches a known emulator ROM dump.""" + return identify_rom(hash_value, hash_type) is not None + + +def get_all_known_hashes() -> Dict[str, List[str]]: + """Get all known ROM hashes organized by platform.""" + return { + "amiga_sha1": list(AMIGA_KICKSTART_SHA1.keys()), + "mac_68k_apple": list(MAC_68K_CHECKSUMS.keys()), + "mac_68k_md5": list(MAC_68K_MD5.keys()), + "mac_ppc_md5": list(MAC_PPC_MD5.keys()), + } + + +# ============================================================================= +# ROM CLUSTERING DETECTION +# ============================================================================= +class ROMClusterDetector: + """ + Detects when multiple "different" miners report identical ROM hashes. + This indicates emulation - real machines have manufacturing variance. + """ + + def __init__(self, cluster_threshold: int = 2): + """ + Args: + cluster_threshold: Number of identical ROMs before flagging. + Default 2 = any duplicate is suspicious. + """ + self.cluster_threshold = cluster_threshold + self.rom_reports: Dict[str, List[str]] = {} # hash -> list of miner_ids + + def report_rom(self, miner_id: str, rom_hash: str, hash_type: str = "sha1") -> Tuple[bool, str]: + """ + Record a ROM hash report from a miner. + + Returns: + (is_valid, reason) - False if clustering detected + """ + key = f"{hash_type}:{rom_hash.lower()}" + + if key not in self.rom_reports: + self.rom_reports[key] = [] + + # Check for duplicate from same miner (OK) + if miner_id in self.rom_reports[key]: + return True, "same_miner_update" + + self.rom_reports[key].append(miner_id) + + # Check for known emulator ROM + if is_known_emulator_rom(rom_hash, hash_type): + rom_info = identify_rom(rom_hash, hash_type) + return False, f"known_emulator_rom:{rom_info.get('platform')}:{rom_info.get('models', [])}" + + # Check for clustering (multiple miners with same ROM) + if len(self.rom_reports[key]) > self.cluster_threshold: + other_miners = [m for m in self.rom_reports[key] if m != miner_id] + return False, f"rom_clustering_detected:shared_with:{other_miners}" + + return True, "unique_rom" + + def get_clusters(self) -> Dict[str, List[str]]: + """Get all ROM hashes that have multiple miners.""" + return {k: v for k, v in self.rom_reports.items() if len(v) > 1} + + def get_suspicious_miners(self) -> List[str]: + """Get list of miners involved in clustering.""" + suspicious = set() + for miners in self.rom_reports.values(): + if len(miners) > self.cluster_threshold: + suspicious.update(miners) + return list(suspicious) + + +# ============================================================================= +# PLATFORM-SPECIFIC ROM DETECTION +# ============================================================================= +def detect_platform_roms() -> Dict[str, Optional[str]]: + """ + Detect ROM files on the current system. + Returns dict of platform -> rom_hash. + """ + results = {} + + # Check for Amiga ROMs in common locations + amiga_paths = [ + "/usr/share/fs-uae/kickstarts/", + "/usr/share/uae/", + os.path.expanduser("~/.config/fs-uae/Kickstarts/"), + os.path.expanduser("~/Amiga/Kickstarts/"), + "/opt/amiga/rom/", + ] + + for base in amiga_paths: + if os.path.isdir(base): + for f in os.listdir(base): + if f.lower().endswith(".rom"): + path = os.path.join(base, f) + sha1 = compute_file_hash(path, "sha1") + if sha1: + results["amiga_kickstart"] = sha1 + break + + # Check for Mac ROMs in common locations + mac_paths = [ + os.path.expanduser("~/.basilisk_ii_prefs"), + os.path.expanduser("~/.sheepshaver_prefs"), + "/usr/share/basilisk2/", + os.path.expanduser("~/Library/Preferences/BasiliskII/"), + ] + + # For real hardware, try to read ROM from device + # (This would need platform-specific code) + + return results + + +def get_real_hardware_rom_signature() -> Optional[Dict]: + """ + Attempt to get ROM signature from real hardware. + + On real Macs: Read from /dev/rom or memory-mapped ROM area + On real Amigas: Read from $F80000-$FFFFFF + + Returns None if not running on real retro hardware. + """ + import platform + + arch = platform.machine().lower() + system = platform.system().lower() + + # PowerPC Mac - try to read ROM + if "ppc" in arch or "powerpc" in arch: + rom_paths = ["/dev/rom", "/dev/nvram"] + for path in rom_paths: + if os.path.exists(path): + try: + # Read first 4 bytes for Apple checksum + with open(path, "rb") as f: + header = f.read(256) + + # Compute signature + return { + "platform": "mac_ppc_real", + "header_md5": hashlib.md5(header).hexdigest(), + "source": path, + } + except: + pass + + # 68K would need different detection + # Amiga would read from chip memory + + return None + + +if __name__ == "__main__": + print("ROM Fingerprint Database") + print("=" * 50) + + stats = get_all_known_hashes() + print(f"Amiga Kickstart ROMs: {len(stats['amiga_sha1'])}") + print(f"Mac 68K ROMs (Apple checksum): {len(stats['mac_68k_apple'])}") + print(f"Mac 68K ROMs (MD5): {len(stats['mac_68k_md5'])}") + print(f"Mac PPC ROMs (MD5): {len(stats['mac_ppc_md5'])}") + + total = sum(len(v) for v in stats.values()) + print(f"\nTotal known emulator ROMs: {total}") + + print("\n--- Testing Cluster Detection ---") + detector = ROMClusterDetector(cluster_threshold=2) + + # Simulate reports + print(detector.report_rom("miner1", "891e9a547772fe0c6c19b610baf8bc4ea7fcb785")) + print(detector.report_rom("miner2", "891e9a547772fe0c6c19b610baf8bc4ea7fcb785")) + print(detector.report_rom("miner3", "unique_hash_abc123")) + + print(f"\nClusters: {detector.get_clusters()}") + print(f"Suspicious miners: {detector.get_suspicious_miners()}") diff --git a/node/rustchain_bft_consensus.py b/node/rustchain_bft_consensus.py index 25f422d8..6027d69d 100644 --- a/node/rustchain_bft_consensus.py +++ b/node/rustchain_bft_consensus.py @@ -1,942 +1,942 @@ -#!/usr/bin/env python3 -""" -RustChain BFT Consensus Module - RIP-0202 -Byzantine Fault Tolerant Consensus for Multi-Node Operation - -This module implements a simplified PBFT (Practical Byzantine Fault Tolerance) -consensus mechanism adapted for RustChain's Proof of Antiquity (PoA) model. - -Key Features: -- 3-phase consensus: PRE-PREPARE, PREPARE, COMMIT -- Tolerates f byzantine nodes where total = 3f + 1 -- Epoch-based consensus (one decision per epoch) -- View change for leader failure -- Integrated with PoA hardware attestation - -Author: RustChain Team -RIP: 0202 -Version: 1.0.0 -""" - -import hashlib -import hmac -import json -import logging -import sqlite3 -import threading -import time -from dataclasses import dataclass, asdict -from enum import Enum -from typing import Dict, List, Optional, Set, Tuple -import requests - -# Configure logging -logging.basicConfig(level=logging.INFO, format='%(asctime)s [BFT] %(message)s') - -# ============================================================================ -# CONSTANTS -# ============================================================================ - -BLOCK_TIME = 600 # 10 minutes per epoch -PREPARE_THRESHOLD = 2/3 # Need 2/3 of nodes to prepare -COMMIT_THRESHOLD = 2/3 # Need 2/3 of nodes to commit -VIEW_CHANGE_TIMEOUT = 90 # Seconds before triggering view change -CONSENSUS_MESSAGE_TTL = 300 # 5 minutes message validity - - -class ConsensusPhase(Enum): - IDLE = "idle" - PRE_PREPARE = "pre_prepare" - PREPARE = "prepare" - COMMIT = "commit" - COMMITTED = "committed" - VIEW_CHANGE = "view_change" - - -class MessageType(Enum): - PRE_PREPARE = "pre_prepare" - PREPARE = "prepare" - COMMIT = "commit" - VIEW_CHANGE = "view_change" - NEW_VIEW = "new_view" - CHECKPOINT = "checkpoint" - - -# ============================================================================ -# DATA STRUCTURES -# ============================================================================ - -@dataclass -class ConsensusMessage: - """Message structure for BFT consensus""" - msg_type: str - view: int # Current view number - epoch: int # RustChain epoch - digest: str # Hash of proposal - node_id: str # Sender node ID - signature: str # HMAC signature - timestamp: int # Unix timestamp - proposal: Optional[Dict] = None # Actual data (only in PRE-PREPARE) - - def to_dict(self) -> Dict: - return asdict(self) - - @staticmethod - def from_dict(data: Dict) -> 'ConsensusMessage': - return ConsensusMessage(**data) - - def compute_digest(self) -> str: - """Compute digest of the proposal""" - if self.proposal: - return hashlib.sha256(json.dumps(self.proposal, sort_keys=True).encode()).hexdigest() - return self.digest - - -@dataclass -class EpochProposal: - """Proposal for epoch settlement""" - epoch: int - miners: List[Dict] # Miner attestations - total_reward: float # 1.5 RTC per epoch - distribution: Dict[str, float] # miner_id -> reward - proposer: str # Node that created proposal - merkle_root: str # Merkle root of miner data - - def compute_digest(self) -> str: - data = { - 'epoch': self.epoch, - 'miners': self.miners, - 'total_reward': self.total_reward, - 'distribution': self.distribution, - 'proposer': self.proposer, - 'merkle_root': self.merkle_root - } - return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest() - - -@dataclass -class ViewChangeMessage: - """View change request""" - view: int - epoch: int - node_id: str - prepared_cert: Optional[Dict] # Proof of prepared state - signature: str - - -# ============================================================================ -# BFT CONSENSUS ENGINE -# ============================================================================ - -class BFTConsensus: - """ - Practical Byzantine Fault Tolerance (PBFT) consensus engine for RustChain. - - Adapted for Proof of Antiquity: - - No block proposer election (round-robin based on view) - - Consensus on epoch settlements (miner rewards) - - Hardware attestation validation before accepting proposals - """ - - def __init__(self, node_id: str, db_path: str, secret_key: str): - self.node_id = node_id - self.db_path = db_path - self.secret_key = secret_key - - # State - self.current_view = 0 - self.current_epoch = 0 - self.phase = ConsensusPhase.IDLE - - # Message logs - self.pre_prepare_log: Dict[int, ConsensusMessage] = {} # epoch -> message - self.prepare_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} - self.commit_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} - self.view_change_log: Dict[int, Dict[str, ViewChangeMessage]] = {} # view -> {node_id: msg} - - # Committed epochs - self.committed_epochs: Set[int] = set() - - # Peer nodes - self.peers: Dict[str, str] = {} # node_id -> url - - # Thread synchronization - self.lock = threading.RLock() - - # Timer for view change - self.view_change_timer: Optional[threading.Timer] = None - - # Initialize database - self._init_db() - - def _init_db(self): - """Initialize BFT consensus tables""" - with sqlite3.connect(self.db_path) as conn: - # Consensus log table - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_consensus_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - epoch INTEGER NOT NULL, - view INTEGER NOT NULL, - msg_type TEXT NOT NULL, - node_id TEXT NOT NULL, - digest TEXT NOT NULL, - proposal_json TEXT, - signature TEXT NOT NULL, - timestamp INTEGER NOT NULL, - UNIQUE(epoch, msg_type, node_id) - ) - """) - - # Committed epochs table - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_committed_epochs ( - epoch INTEGER PRIMARY KEY, - view INTEGER NOT NULL, - digest TEXT NOT NULL, - committed_at INTEGER NOT NULL, - proposal_json TEXT NOT NULL - ) - """) - - # View change log - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_view_changes ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - view INTEGER NOT NULL, - node_id TEXT NOT NULL, - timestamp INTEGER NOT NULL, - UNIQUE(view, node_id) - ) - """) - - conn.commit() - - logging.info(f"BFT consensus initialized for node {self.node_id}") - - def register_peer(self, node_id: str, url: str): - """Register a peer node""" - with self.lock: - self.peers[node_id] = url - logging.info(f"Registered peer: {node_id} at {url}") - - def get_total_nodes(self) -> int: - """Get total number of nodes including self""" - return len(self.peers) + 1 - - def get_fault_tolerance(self) -> int: - """Calculate f (max faulty nodes we can tolerate)""" - # BFT requires n >= 3f + 1 - # So f = (n - 1) / 3 - n = self.get_total_nodes() - return (n - 1) // 3 - - def get_quorum_size(self) -> int: - """Get quorum size for consensus""" - # Quorum = 2f + 1 = ceil(2n/3) - n = self.get_total_nodes() - return (2 * n + 2) // 3 - - def is_leader(self, view: int = None) -> bool: - """Check if this node is the leader for current view""" - if view is None: - view = self.current_view - - # Round-robin leader election - nodes = sorted([self.node_id] + list(self.peers.keys())) - leader_idx = view % len(nodes) - return nodes[leader_idx] == self.node_id - - def get_leader(self, view: int = None) -> str: - """Get the leader node ID for a view""" - if view is None: - view = self.current_view - - nodes = sorted([self.node_id] + list(self.peers.keys())) - leader_idx = view % len(nodes) - return nodes[leader_idx] - - def _sign_message(self, data: str) -> str: - """Sign a message with HMAC""" - return hmac.new( - self.secret_key.encode(), - data.encode(), - hashlib.sha256 - ).hexdigest() - - def _verify_signature(self, node_id: str, data: str, signature: str) -> bool: - """Verify message signature (simplified - all nodes share key in testnet)""" - # In production, each node would have its own keypair - expected = hmac.new( - self.secret_key.encode(), - data.encode(), - hashlib.sha256 - ).hexdigest() - return hmac.compare_digest(signature, expected) - - # ======================================================================== - # PHASE 1: PRE-PREPARE (Leader proposes) - # ======================================================================== - - def propose_epoch_settlement(self, epoch: int, miners: List[Dict], - distribution: Dict[str, float]) -> Optional[ConsensusMessage]: - """ - Leader proposes epoch settlement (PRE-PREPARE phase). - Only the leader for current view can call this. - """ - with self.lock: - if not self.is_leader(): - logging.warning(f"Node {self.node_id} is not leader for view {self.current_view}") - return None - - if epoch in self.committed_epochs: - logging.info(f"Epoch {epoch} already committed") - return None - - # Create proposal - proposal = EpochProposal( - epoch=epoch, - miners=miners, - total_reward=1.5, # RTC per epoch - distribution=distribution, - proposer=self.node_id, - merkle_root=self._compute_merkle_root(miners) - ) - - digest = proposal.compute_digest() - timestamp = int(time.time()) - - # Sign the message - sign_data = f"{MessageType.PRE_PREPARE.value}:{self.current_view}:{epoch}:{digest}:{timestamp}" - signature = self._sign_message(sign_data) - - # Create PRE-PREPARE message - msg = ConsensusMessage( - msg_type=MessageType.PRE_PREPARE.value, - view=self.current_view, - epoch=epoch, - digest=digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp, - proposal=asdict(proposal) - ) - - # Log locally - self.pre_prepare_log[epoch] = msg - self.phase = ConsensusPhase.PRE_PREPARE - self._save_message_to_db(msg) - - # Start view change timer - self._start_view_change_timer() - - # Broadcast to peers - self._broadcast_message(msg) - - logging.info(f"[PRE-PREPARE] Leader proposed epoch {epoch} settlement") - - # Leader also prepares - self._handle_pre_prepare(msg) - - return msg - - def _compute_merkle_root(self, miners: List[Dict]) -> str: - """Compute merkle root of miner attestations""" - if not miners: - return hashlib.sha256(b"empty").hexdigest() - - # Simple merkle: hash all miner data - hashes = [ - hashlib.sha256(json.dumps(m, sort_keys=True).encode()).hexdigest() - for m in miners - ] - - while len(hashes) > 1: - if len(hashes) % 2 == 1: - hashes.append(hashes[-1]) - new_hashes = [] - for i in range(0, len(hashes), 2): - combined = hashes[i] + hashes[i + 1] - new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) - hashes = new_hashes - - return hashes[0] - - # ======================================================================== - # PHASE 2: PREPARE (Nodes validate and prepare) - # ======================================================================== - - def _handle_pre_prepare(self, msg: ConsensusMessage) -> Optional[ConsensusMessage]: - """Handle received PRE-PREPARE message""" - with self.lock: - epoch = msg.epoch - - # Validate message - if msg.view != self.current_view: - logging.warning(f"PRE-PREPARE for wrong view: {msg.view} != {self.current_view}") - return None - - if epoch in self.committed_epochs: - logging.info(f"Epoch {epoch} already committed") - return None - - # Verify it's from the leader - if msg.node_id != self.get_leader(msg.view): - logging.warning(f"PRE-PREPARE not from leader: {msg.node_id}") - return None - - # Validate proposal (hardware attestation checks) - if not self._validate_proposal(msg.proposal): - logging.warning(f"Invalid proposal for epoch {epoch}") - return None - - # Store PRE-PREPARE - if epoch not in self.pre_prepare_log: - self.pre_prepare_log[epoch] = msg - self._save_message_to_db(msg) - - # Send PREPARE message - timestamp = int(time.time()) - sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{timestamp}" - signature = self._sign_message(sign_data) - - prepare_msg = ConsensusMessage( - msg_type=MessageType.PREPARE.value, - view=msg.view, - epoch=epoch, - digest=msg.digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp - ) - - # Log prepare - if epoch not in self.prepare_log: - self.prepare_log[epoch] = {} - self.prepare_log[epoch][self.node_id] = prepare_msg - self._save_message_to_db(prepare_msg) - - self.phase = ConsensusPhase.PREPARE - - # Broadcast PREPARE - self._broadcast_message(prepare_msg) - - logging.info(f"[PREPARE] Node {self.node_id} prepared epoch {epoch}") - - # Check if we have quorum to commit - self._check_prepare_quorum(epoch) - - return prepare_msg - - def handle_prepare(self, msg: ConsensusMessage): - """Handle received PREPARE message from peer""" - with self.lock: - epoch = msg.epoch - - # Validate - if msg.view != self.current_view: - return - - if epoch in self.committed_epochs: - return - - # Verify signature - sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" - if not self._verify_signature(msg.node_id, sign_data, msg.signature): - logging.warning(f"Invalid PREPARE signature from {msg.node_id}") - return - - # Store prepare - if epoch not in self.prepare_log: - self.prepare_log[epoch] = {} - - if msg.node_id not in self.prepare_log[epoch]: - self.prepare_log[epoch][msg.node_id] = msg - self._save_message_to_db(msg) - logging.info(f"[PREPARE] Received from {msg.node_id} for epoch {epoch}") - - # Check quorum - self._check_prepare_quorum(epoch) - - def _check_prepare_quorum(self, epoch: int): - """Check if we have quorum of PREPARE messages""" - if epoch not in self.prepare_log: - return - - prepare_count = len(self.prepare_log[epoch]) - quorum = self.get_quorum_size() - - logging.info(f"[PREPARE] Epoch {epoch}: {prepare_count}/{quorum} prepares") - - if prepare_count >= quorum and self.phase == ConsensusPhase.PREPARE: - # Transition to COMMIT phase - self._send_commit(epoch) - - # ======================================================================== - # PHASE 3: COMMIT (Finalize consensus) - # ======================================================================== - - def _send_commit(self, epoch: int): - """Send COMMIT message after receiving quorum of PREPAREs""" - with self.lock: - if epoch not in self.pre_prepare_log: - return - - pre_prepare = self.pre_prepare_log[epoch] - timestamp = int(time.time()) - - sign_data = f"{MessageType.COMMIT.value}:{pre_prepare.view}:{epoch}:{pre_prepare.digest}:{timestamp}" - signature = self._sign_message(sign_data) - - commit_msg = ConsensusMessage( - msg_type=MessageType.COMMIT.value, - view=pre_prepare.view, - epoch=epoch, - digest=pre_prepare.digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp - ) - - # Log commit - if epoch not in self.commit_log: - self.commit_log[epoch] = {} - self.commit_log[epoch][self.node_id] = commit_msg - self._save_message_to_db(commit_msg) - - self.phase = ConsensusPhase.COMMIT - - # Broadcast COMMIT - self._broadcast_message(commit_msg) - - logging.info(f"[COMMIT] Node {self.node_id} committed epoch {epoch}") - - # Check commit quorum - self._check_commit_quorum(epoch) - - def handle_commit(self, msg: ConsensusMessage): - """Handle received COMMIT message""" - with self.lock: - epoch = msg.epoch - - if epoch in self.committed_epochs: - return - - # Verify signature - sign_data = f"{MessageType.COMMIT.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" - if not self._verify_signature(msg.node_id, sign_data, msg.signature): - logging.warning(f"Invalid COMMIT signature from {msg.node_id}") - return - - # Store commit - if epoch not in self.commit_log: - self.commit_log[epoch] = {} - - if msg.node_id not in self.commit_log[epoch]: - self.commit_log[epoch][msg.node_id] = msg - self._save_message_to_db(msg) - logging.info(f"[COMMIT] Received from {msg.node_id} for epoch {epoch}") - - # Check quorum - self._check_commit_quorum(epoch) - - def _check_commit_quorum(self, epoch: int): - """Check if we have quorum of COMMIT messages""" - if epoch not in self.commit_log: - return - - commit_count = len(self.commit_log[epoch]) - quorum = self.get_quorum_size() - - logging.info(f"[COMMIT] Epoch {epoch}: {commit_count}/{quorum} commits") - - if commit_count >= quorum and epoch not in self.committed_epochs: - # CONSENSUS REACHED! - self._finalize_epoch(epoch) - - def _finalize_epoch(self, epoch: int): - """Finalize epoch after consensus reached""" - with self.lock: - if epoch in self.committed_epochs: - return - - self.committed_epochs.add(epoch) - self.phase = ConsensusPhase.COMMITTED - - # Cancel view change timer - self._cancel_view_change_timer() - - # Get the proposal - pre_prepare = self.pre_prepare_log.get(epoch) - if not pre_prepare or not pre_prepare.proposal: - logging.error(f"No proposal found for committed epoch {epoch}") - return - - # Save to committed epochs table - with sqlite3.connect(self.db_path) as conn: - conn.execute(""" - INSERT OR REPLACE INTO bft_committed_epochs - (epoch, view, digest, committed_at, proposal_json) - VALUES (?, ?, ?, ?, ?) - """, (epoch, self.current_view, pre_prepare.digest, - int(time.time()), json.dumps(pre_prepare.proposal))) - conn.commit() - - logging.info(f"CONSENSUS REACHED for epoch {epoch}") - logging.info(f" Digest: {pre_prepare.digest[:16]}...") - logging.info(f" Proposer: {pre_prepare.proposal.get('proposer')}") - - # Apply the settlement (distribute rewards) - self._apply_settlement(pre_prepare.proposal) - - def _apply_settlement(self, proposal: Dict): - """Apply the consensus settlement to database""" - epoch = proposal.get('epoch') - distribution = proposal.get('distribution', {}) - - with sqlite3.connect(self.db_path) as conn: - for miner_id, reward in distribution.items(): - # Update balance - conn.execute(""" - INSERT INTO balances (miner_id, amount_i64) - VALUES (?, ?) - ON CONFLICT(miner_id) DO UPDATE SET - amount_i64 = amount_i64 + excluded.amount_i64 - """, (miner_id, int(reward * 1_000_000))) # Convert to micro-RTC - - # Log in ledger - conn.execute(""" - INSERT INTO ledger (miner_id, delta_i64, tx_type, memo, ts) - VALUES (?, ?, 'reward', ?, ?) - """, (miner_id, int(reward * 1_000_000), f"epoch_{epoch}_bft", int(time.time()))) - - conn.commit() - - logging.info(f"Applied settlement for epoch {epoch}: {len(distribution)} miners rewarded") - - # ======================================================================== - # VIEW CHANGE (Leader failure handling) - # ======================================================================== - - def _start_view_change_timer(self): - """Start timer for view change if consensus not reached""" - self._cancel_view_change_timer() - - self.view_change_timer = threading.Timer(VIEW_CHANGE_TIMEOUT, self._trigger_view_change) - self.view_change_timer.daemon = True - self.view_change_timer.start() - - def _cancel_view_change_timer(self): - """Cancel view change timer""" - if self.view_change_timer: - self.view_change_timer.cancel() - self.view_change_timer = None - - def _trigger_view_change(self): - """Trigger view change due to timeout""" - with self.lock: - logging.warning(f"[VIEW-CHANGE] Timeout! Requesting view {self.current_view + 1}") - self.phase = ConsensusPhase.VIEW_CHANGE - - new_view = self.current_view + 1 - timestamp = int(time.time()) - - sign_data = f"{MessageType.VIEW_CHANGE.value}:{new_view}:{self.current_epoch}:{timestamp}" - signature = self._sign_message(sign_data) - - vc_msg = ViewChangeMessage( - view=new_view, - epoch=self.current_epoch, - node_id=self.node_id, - prepared_cert=None, # Could include prepared certificate - signature=signature - ) - - # Log view change - if new_view not in self.view_change_log: - self.view_change_log[new_view] = {} - self.view_change_log[new_view][self.node_id] = vc_msg - - # Broadcast view change - self._broadcast_view_change(vc_msg) - - # Check if we have quorum for view change - self._check_view_change_quorum(new_view) - - def handle_view_change(self, msg_data: Dict): - """Handle received VIEW-CHANGE message""" - with self.lock: - new_view = msg_data.get('view') - node_id = msg_data.get('node_id') - - if new_view not in self.view_change_log: - self.view_change_log[new_view] = {} - - if node_id not in self.view_change_log[new_view]: - self.view_change_log[new_view][node_id] = ViewChangeMessage(**msg_data) - logging.info(f"[VIEW-CHANGE] Received from {node_id} for view {new_view}") - - self._check_view_change_quorum(new_view) - - def _check_view_change_quorum(self, new_view: int): - """Check if we have quorum for view change""" - if new_view not in self.view_change_log: - return - - vc_count = len(self.view_change_log[new_view]) - quorum = self.get_quorum_size() - - logging.info(f"[VIEW-CHANGE] View {new_view}: {vc_count}/{quorum} votes") - - if vc_count >= quorum: - self._perform_view_change(new_view) - - def _perform_view_change(self, new_view: int): - """Perform view change""" - with self.lock: - if new_view <= self.current_view: - return - - self.current_view = new_view - self.phase = ConsensusPhase.IDLE - - logging.info(f"[NEW-VIEW] Changed to view {new_view}, leader: {self.get_leader()}") - - # If we're the new leader, propose - if self.is_leader(): - logging.info(f"[NEW-VIEW] We are the new leader!") - # New leader should re-propose pending epochs - - # ======================================================================== - # VALIDATION - # ======================================================================== - - def _validate_proposal(self, proposal: Dict) -> bool: - """Validate an epoch settlement proposal""" - if not proposal: - return False - - epoch = proposal.get('epoch') - miners = proposal.get('miners', []) - distribution = proposal.get('distribution', {}) - - # Check epoch is valid - if epoch is None or epoch < 0: - return False - - # Check total reward matches - total = sum(distribution.values()) - if abs(total - 1.5) > 0.001: # Allow small float error - logging.warning(f"Invalid total reward: {total} != 1.5") - return False - - # Check all miners in distribution are in miner list - miner_ids = {m.get('miner_id') for m in miners} - for miner_id in distribution: - if miner_id not in miner_ids: - logging.warning(f"Miner {miner_id} in distribution but not in miners list") - return False - - # Verify merkle_root matches the submitted miners list. - # Without this check a Byzantine leader can recycle a valid merkle_root - # from a previous epoch while submitting a different (falsified) miners - # list, and honest nodes would still send PREPARE for the forged proposal. - expected_merkle = self._compute_merkle_root(miners) - if proposal.get('merkle_root') != expected_merkle: - logging.warning( - f"Proposal merkle_root mismatch for epoch {epoch}: " - f"got {proposal.get('merkle_root', '')[:16]}... " - f"expected {expected_merkle[:16]}..." - ) - return False - - return True - - # ======================================================================== - # NETWORK - # ======================================================================== - - def _broadcast_message(self, msg: ConsensusMessage): - """Broadcast message to all peers""" - for node_id, url in self.peers.items(): - try: - endpoint = f"{url}/bft/message" - response = requests.post( - endpoint, - json=msg.to_dict(), - timeout=5, - headers={'X-Node-ID': self.node_id} - ) - if response.ok: - logging.debug(f"Broadcast {msg.msg_type} to {node_id}") - except Exception as e: - logging.error(f"Failed to broadcast to {node_id}: {e}") - - def _broadcast_view_change(self, msg: ViewChangeMessage): - """Broadcast view change message""" - msg_data = asdict(msg) - for node_id, url in self.peers.items(): - try: - endpoint = f"{url}/bft/view_change" - response = requests.post(endpoint, json=msg_data, timeout=5) - if response.ok: - logging.debug(f"Broadcast VIEW-CHANGE to {node_id}") - except Exception as e: - logging.error(f"Failed to broadcast VIEW-CHANGE to {node_id}: {e}") - - def _save_message_to_db(self, msg: ConsensusMessage): - """Save consensus message to database""" - try: - with sqlite3.connect(self.db_path) as conn: - conn.execute(""" - INSERT OR REPLACE INTO bft_consensus_log - (epoch, view, msg_type, node_id, digest, proposal_json, signature, timestamp) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, ( - msg.epoch, msg.view, msg.msg_type, msg.node_id, - msg.digest, json.dumps(msg.proposal) if msg.proposal else None, - msg.signature, msg.timestamp - )) - conn.commit() - except Exception as e: - logging.error(f"Failed to save message: {e}") - - def receive_message(self, msg_data: Dict): - """Handle incoming consensus message""" - msg_type = msg_data.get('msg_type') - - if msg_type == MessageType.PRE_PREPARE.value: - msg = ConsensusMessage.from_dict(msg_data) - self._handle_pre_prepare(msg) - elif msg_type == MessageType.PREPARE.value: - msg = ConsensusMessage.from_dict(msg_data) - self.handle_prepare(msg) - elif msg_type == MessageType.COMMIT.value: - msg = ConsensusMessage.from_dict(msg_data) - self.handle_commit(msg) - - # ======================================================================== - # STATUS - # ======================================================================== - - def get_status(self) -> Dict: - """Get consensus status""" - with self.lock: - return { - 'node_id': self.node_id, - 'current_view': self.current_view, - 'current_epoch': self.current_epoch, - 'phase': self.phase.value, - 'leader': self.get_leader(), - 'is_leader': self.is_leader(), - 'total_nodes': self.get_total_nodes(), - 'fault_tolerance': self.get_fault_tolerance(), - 'quorum_size': self.get_quorum_size(), - 'committed_epochs': len(self.committed_epochs), - 'peers': list(self.peers.keys()) - } - - -# ============================================================================ -# FLASK ROUTES FOR BFT -# ============================================================================ - -def create_bft_routes(app, bft: BFTConsensus): - """Add BFT consensus routes to Flask app""" - from flask import request, jsonify - - @app.route('/bft/status', methods=['GET']) - def bft_status(): - """Get BFT consensus status""" - return jsonify(bft.get_status()) - - @app.route('/bft/message', methods=['POST']) - def bft_receive_message(): - """Receive consensus message from peer""" - try: - msg_data = request.get_json() - bft.receive_message(msg_data) - return jsonify({'status': 'ok'}) - except Exception as e: - logging.error(f"BFT message error: {e}") - return jsonify({'error': str(e)}), 400 - - @app.route('/bft/view_change', methods=['POST']) - def bft_view_change(): - """Receive view change message""" - try: - msg_data = request.get_json() - bft.handle_view_change(msg_data) - return jsonify({'status': 'ok'}) - except Exception as e: - logging.error(f"BFT view change error: {e}") - return jsonify({'error': str(e)}), 400 - - @app.route('/bft/propose', methods=['POST']) - def bft_propose(): - """Manually trigger epoch proposal (admin)""" - try: - data = request.get_json() - epoch = data.get('epoch') - miners = data.get('miners', []) - distribution = data.get('distribution', {}) - - msg = bft.propose_epoch_settlement(epoch, miners, distribution) - if msg: - return jsonify({'status': 'proposed', 'digest': msg.digest}) - else: - return jsonify({'error': 'not_leader_or_already_committed'}), 400 - except Exception as e: - logging.error(f"BFT propose error: {e}") - return jsonify({'error': str(e)}), 500 - - -# ============================================================================ -# MAIN (Testing) -# ============================================================================ - -if __name__ == "__main__": - import sys - - print("=" * 60) - print("RustChain BFT Consensus Module - RIP-0202") - print("=" * 60) - - # Test with mock data - node_id = sys.argv[1] if len(sys.argv) > 1 else "node-131" - db_path = "/tmp/bft_test.db" - secret_key = "rustchain_bft_testnet_key_2025" - - bft = BFTConsensus(node_id, db_path, secret_key) - - # Register peer - bft.register_peer("node-153", "http://50.28.86.153:8099") - - print(f"\nNode: {node_id}") - print(f"Is Leader: {bft.is_leader()}") - print(f"Current View: {bft.current_view}") - print(f"Total Nodes: {bft.get_total_nodes()}") - print(f"Quorum Size: {bft.get_quorum_size()}") - print(f"Fault Tolerance: {bft.get_fault_tolerance()}") - - if bft.is_leader(): - print("\nProposing epoch settlement...") - - # Mock miner data - miners = [ - {'miner_id': 'g4-powerbook-115', 'device_arch': 'G4', 'weight': 2.5}, - {'miner_id': 'sophia-nas-c4130', 'device_arch': 'modern', 'weight': 1.0}, - ] - - total_weight = sum(m['weight'] for m in miners) - distribution = { - m['miner_id']: 1.5 * (m['weight'] / total_weight) - for m in miners - } - - msg = bft.propose_epoch_settlement(epoch=425, miners=miners, distribution=distribution) - if msg: - print(f"Proposed! Digest: {msg.digest[:32]}...") - - print("\n" + "=" * 60) - print("Status:", json.dumps(bft.get_status(), indent=2)) +#!/usr/bin/env python3 +""" +RustChain BFT Consensus Module - RIP-0202 +Byzantine Fault Tolerant Consensus for Multi-Node Operation + +This module implements a simplified PBFT (Practical Byzantine Fault Tolerance) +consensus mechanism adapted for RustChain's Proof of Antiquity (PoA) model. + +Key Features: +- 3-phase consensus: PRE-PREPARE, PREPARE, COMMIT +- Tolerates f byzantine nodes where total = 3f + 1 +- Epoch-based consensus (one decision per epoch) +- View change for leader failure +- Integrated with PoA hardware attestation + +Author: RustChain Team +RIP: 0202 +Version: 1.0.0 +""" + +import hashlib +import hmac +import json +import logging +import sqlite3 +import threading +import time +from dataclasses import dataclass, asdict +from enum import Enum +from typing import Dict, List, Optional, Set, Tuple +import requests + +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s [BFT] %(message)s') + +# ============================================================================ +# CONSTANTS +# ============================================================================ + +BLOCK_TIME = 600 # 10 minutes per epoch +PREPARE_THRESHOLD = 2/3 # Need 2/3 of nodes to prepare +COMMIT_THRESHOLD = 2/3 # Need 2/3 of nodes to commit +VIEW_CHANGE_TIMEOUT = 90 # Seconds before triggering view change +CONSENSUS_MESSAGE_TTL = 300 # 5 minutes message validity + + +class ConsensusPhase(Enum): + IDLE = "idle" + PRE_PREPARE = "pre_prepare" + PREPARE = "prepare" + COMMIT = "commit" + COMMITTED = "committed" + VIEW_CHANGE = "view_change" + + +class MessageType(Enum): + PRE_PREPARE = "pre_prepare" + PREPARE = "prepare" + COMMIT = "commit" + VIEW_CHANGE = "view_change" + NEW_VIEW = "new_view" + CHECKPOINT = "checkpoint" + + +# ============================================================================ +# DATA STRUCTURES +# ============================================================================ + +@dataclass +class ConsensusMessage: + """Message structure for BFT consensus""" + msg_type: str + view: int # Current view number + epoch: int # RustChain epoch + digest: str # Hash of proposal + node_id: str # Sender node ID + signature: str # HMAC signature + timestamp: int # Unix timestamp + proposal: Optional[Dict] = None # Actual data (only in PRE-PREPARE) + + def to_dict(self) -> Dict: + return asdict(self) + + @staticmethod + def from_dict(data: Dict) -> 'ConsensusMessage': + return ConsensusMessage(**data) + + def compute_digest(self) -> str: + """Compute digest of the proposal""" + if self.proposal: + return hashlib.sha256(json.dumps(self.proposal, sort_keys=True).encode()).hexdigest() + return self.digest + + +@dataclass +class EpochProposal: + """Proposal for epoch settlement""" + epoch: int + miners: List[Dict] # Miner attestations + total_reward: float # 1.5 RTC per epoch + distribution: Dict[str, float] # miner_id -> reward + proposer: str # Node that created proposal + merkle_root: str # Merkle root of miner data + + def compute_digest(self) -> str: + data = { + 'epoch': self.epoch, + 'miners': self.miners, + 'total_reward': self.total_reward, + 'distribution': self.distribution, + 'proposer': self.proposer, + 'merkle_root': self.merkle_root + } + return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest() + + +@dataclass +class ViewChangeMessage: + """View change request""" + view: int + epoch: int + node_id: str + prepared_cert: Optional[Dict] # Proof of prepared state + signature: str + + +# ============================================================================ +# BFT CONSENSUS ENGINE +# ============================================================================ + +class BFTConsensus: + """ + Practical Byzantine Fault Tolerance (PBFT) consensus engine for RustChain. + + Adapted for Proof of Antiquity: + - No block proposer election (round-robin based on view) + - Consensus on epoch settlements (miner rewards) + - Hardware attestation validation before accepting proposals + """ + + def __init__(self, node_id: str, db_path: str, secret_key: str): + self.node_id = node_id + self.db_path = db_path + self.secret_key = secret_key + + # State + self.current_view = 0 + self.current_epoch = 0 + self.phase = ConsensusPhase.IDLE + + # Message logs + self.pre_prepare_log: Dict[int, ConsensusMessage] = {} # epoch -> message + self.prepare_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} + self.commit_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} + self.view_change_log: Dict[int, Dict[str, ViewChangeMessage]] = {} # view -> {node_id: msg} + + # Committed epochs + self.committed_epochs: Set[int] = set() + + # Peer nodes + self.peers: Dict[str, str] = {} # node_id -> url + + # Thread synchronization + self.lock = threading.RLock() + + # Timer for view change + self.view_change_timer: Optional[threading.Timer] = None + + # Initialize database + self._init_db() + + def _init_db(self): + """Initialize BFT consensus tables""" + with sqlite3.connect(self.db_path) as conn: + # Consensus log table + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_consensus_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + epoch INTEGER NOT NULL, + view INTEGER NOT NULL, + msg_type TEXT NOT NULL, + node_id TEXT NOT NULL, + digest TEXT NOT NULL, + proposal_json TEXT, + signature TEXT NOT NULL, + timestamp INTEGER NOT NULL, + UNIQUE(epoch, msg_type, node_id) + ) + """) + + # Committed epochs table + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_committed_epochs ( + epoch INTEGER PRIMARY KEY, + view INTEGER NOT NULL, + digest TEXT NOT NULL, + committed_at INTEGER NOT NULL, + proposal_json TEXT NOT NULL + ) + """) + + # View change log + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_view_changes ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + view INTEGER NOT NULL, + node_id TEXT NOT NULL, + timestamp INTEGER NOT NULL, + UNIQUE(view, node_id) + ) + """) + + conn.commit() + + logging.info(f"BFT consensus initialized for node {self.node_id}") + + def register_peer(self, node_id: str, url: str): + """Register a peer node""" + with self.lock: + self.peers[node_id] = url + logging.info(f"Registered peer: {node_id} at {url}") + + def get_total_nodes(self) -> int: + """Get total number of nodes including self""" + return len(self.peers) + 1 + + def get_fault_tolerance(self) -> int: + """Calculate f (max faulty nodes we can tolerate)""" + # BFT requires n >= 3f + 1 + # So f = (n - 1) / 3 + n = self.get_total_nodes() + return (n - 1) // 3 + + def get_quorum_size(self) -> int: + """Get quorum size for consensus""" + # Quorum = 2f + 1 = ceil(2n/3) + n = self.get_total_nodes() + return (2 * n + 2) // 3 + + def is_leader(self, view: int = None) -> bool: + """Check if this node is the leader for current view""" + if view is None: + view = self.current_view + + # Round-robin leader election + nodes = sorted([self.node_id] + list(self.peers.keys())) + leader_idx = view % len(nodes) + return nodes[leader_idx] == self.node_id + + def get_leader(self, view: int = None) -> str: + """Get the leader node ID for a view""" + if view is None: + view = self.current_view + + nodes = sorted([self.node_id] + list(self.peers.keys())) + leader_idx = view % len(nodes) + return nodes[leader_idx] + + def _sign_message(self, data: str) -> str: + """Sign a message with HMAC""" + return hmac.new( + self.secret_key.encode(), + data.encode(), + hashlib.sha256 + ).hexdigest() + + def _verify_signature(self, node_id: str, data: str, signature: str) -> bool: + """Verify message signature (simplified - all nodes share key in testnet)""" + # In production, each node would have its own keypair + expected = hmac.new( + self.secret_key.encode(), + data.encode(), + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(signature, expected) + + # ======================================================================== + # PHASE 1: PRE-PREPARE (Leader proposes) + # ======================================================================== + + def propose_epoch_settlement(self, epoch: int, miners: List[Dict], + distribution: Dict[str, float]) -> Optional[ConsensusMessage]: + """ + Leader proposes epoch settlement (PRE-PREPARE phase). + Only the leader for current view can call this. + """ + with self.lock: + if not self.is_leader(): + logging.warning(f"Node {self.node_id} is not leader for view {self.current_view}") + return None + + if epoch in self.committed_epochs: + logging.info(f"Epoch {epoch} already committed") + return None + + # Create proposal + proposal = EpochProposal( + epoch=epoch, + miners=miners, + total_reward=1.5, # RTC per epoch + distribution=distribution, + proposer=self.node_id, + merkle_root=self._compute_merkle_root(miners) + ) + + digest = proposal.compute_digest() + timestamp = int(time.time()) + + # Sign the message + sign_data = f"{MessageType.PRE_PREPARE.value}:{self.current_view}:{epoch}:{digest}:{timestamp}" + signature = self._sign_message(sign_data) + + # Create PRE-PREPARE message + msg = ConsensusMessage( + msg_type=MessageType.PRE_PREPARE.value, + view=self.current_view, + epoch=epoch, + digest=digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp, + proposal=asdict(proposal) + ) + + # Log locally + self.pre_prepare_log[epoch] = msg + self.phase = ConsensusPhase.PRE_PREPARE + self._save_message_to_db(msg) + + # Start view change timer + self._start_view_change_timer() + + # Broadcast to peers + self._broadcast_message(msg) + + logging.info(f"[PRE-PREPARE] Leader proposed epoch {epoch} settlement") + + # Leader also prepares + self._handle_pre_prepare(msg) + + return msg + + def _compute_merkle_root(self, miners: List[Dict]) -> str: + """Compute merkle root of miner attestations""" + if not miners: + return hashlib.sha256(b"empty").hexdigest() + + # Simple merkle: hash all miner data + hashes = [ + hashlib.sha256(json.dumps(m, sort_keys=True).encode()).hexdigest() + for m in miners + ] + + while len(hashes) > 1: + if len(hashes) % 2 == 1: + hashes.append(hashes[-1]) + new_hashes = [] + for i in range(0, len(hashes), 2): + combined = hashes[i] + hashes[i + 1] + new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) + hashes = new_hashes + + return hashes[0] + + # ======================================================================== + # PHASE 2: PREPARE (Nodes validate and prepare) + # ======================================================================== + + def _handle_pre_prepare(self, msg: ConsensusMessage) -> Optional[ConsensusMessage]: + """Handle received PRE-PREPARE message""" + with self.lock: + epoch = msg.epoch + + # Validate message + if msg.view != self.current_view: + logging.warning(f"PRE-PREPARE for wrong view: {msg.view} != {self.current_view}") + return None + + if epoch in self.committed_epochs: + logging.info(f"Epoch {epoch} already committed") + return None + + # Verify it's from the leader + if msg.node_id != self.get_leader(msg.view): + logging.warning(f"PRE-PREPARE not from leader: {msg.node_id}") + return None + + # Validate proposal (hardware attestation checks) + if not self._validate_proposal(msg.proposal): + logging.warning(f"Invalid proposal for epoch {epoch}") + return None + + # Store PRE-PREPARE + if epoch not in self.pre_prepare_log: + self.pre_prepare_log[epoch] = msg + self._save_message_to_db(msg) + + # Send PREPARE message + timestamp = int(time.time()) + sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{timestamp}" + signature = self._sign_message(sign_data) + + prepare_msg = ConsensusMessage( + msg_type=MessageType.PREPARE.value, + view=msg.view, + epoch=epoch, + digest=msg.digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp + ) + + # Log prepare + if epoch not in self.prepare_log: + self.prepare_log[epoch] = {} + self.prepare_log[epoch][self.node_id] = prepare_msg + self._save_message_to_db(prepare_msg) + + self.phase = ConsensusPhase.PREPARE + + # Broadcast PREPARE + self._broadcast_message(prepare_msg) + + logging.info(f"[PREPARE] Node {self.node_id} prepared epoch {epoch}") + + # Check if we have quorum to commit + self._check_prepare_quorum(epoch) + + return prepare_msg + + def handle_prepare(self, msg: ConsensusMessage): + """Handle received PREPARE message from peer""" + with self.lock: + epoch = msg.epoch + + # Validate + if msg.view != self.current_view: + return + + if epoch in self.committed_epochs: + return + + # Verify signature + sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" + if not self._verify_signature(msg.node_id, sign_data, msg.signature): + logging.warning(f"Invalid PREPARE signature from {msg.node_id}") + return + + # Store prepare + if epoch not in self.prepare_log: + self.prepare_log[epoch] = {} + + if msg.node_id not in self.prepare_log[epoch]: + self.prepare_log[epoch][msg.node_id] = msg + self._save_message_to_db(msg) + logging.info(f"[PREPARE] Received from {msg.node_id} for epoch {epoch}") + + # Check quorum + self._check_prepare_quorum(epoch) + + def _check_prepare_quorum(self, epoch: int): + """Check if we have quorum of PREPARE messages""" + if epoch not in self.prepare_log: + return + + prepare_count = len(self.prepare_log[epoch]) + quorum = self.get_quorum_size() + + logging.info(f"[PREPARE] Epoch {epoch}: {prepare_count}/{quorum} prepares") + + if prepare_count >= quorum and self.phase == ConsensusPhase.PREPARE: + # Transition to COMMIT phase + self._send_commit(epoch) + + # ======================================================================== + # PHASE 3: COMMIT (Finalize consensus) + # ======================================================================== + + def _send_commit(self, epoch: int): + """Send COMMIT message after receiving quorum of PREPAREs""" + with self.lock: + if epoch not in self.pre_prepare_log: + return + + pre_prepare = self.pre_prepare_log[epoch] + timestamp = int(time.time()) + + sign_data = f"{MessageType.COMMIT.value}:{pre_prepare.view}:{epoch}:{pre_prepare.digest}:{timestamp}" + signature = self._sign_message(sign_data) + + commit_msg = ConsensusMessage( + msg_type=MessageType.COMMIT.value, + view=pre_prepare.view, + epoch=epoch, + digest=pre_prepare.digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp + ) + + # Log commit + if epoch not in self.commit_log: + self.commit_log[epoch] = {} + self.commit_log[epoch][self.node_id] = commit_msg + self._save_message_to_db(commit_msg) + + self.phase = ConsensusPhase.COMMIT + + # Broadcast COMMIT + self._broadcast_message(commit_msg) + + logging.info(f"[COMMIT] Node {self.node_id} committed epoch {epoch}") + + # Check commit quorum + self._check_commit_quorum(epoch) + + def handle_commit(self, msg: ConsensusMessage): + """Handle received COMMIT message""" + with self.lock: + epoch = msg.epoch + + if epoch in self.committed_epochs: + return + + # Verify signature + sign_data = f"{MessageType.COMMIT.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" + if not self._verify_signature(msg.node_id, sign_data, msg.signature): + logging.warning(f"Invalid COMMIT signature from {msg.node_id}") + return + + # Store commit + if epoch not in self.commit_log: + self.commit_log[epoch] = {} + + if msg.node_id not in self.commit_log[epoch]: + self.commit_log[epoch][msg.node_id] = msg + self._save_message_to_db(msg) + logging.info(f"[COMMIT] Received from {msg.node_id} for epoch {epoch}") + + # Check quorum + self._check_commit_quorum(epoch) + + def _check_commit_quorum(self, epoch: int): + """Check if we have quorum of COMMIT messages""" + if epoch not in self.commit_log: + return + + commit_count = len(self.commit_log[epoch]) + quorum = self.get_quorum_size() + + logging.info(f"[COMMIT] Epoch {epoch}: {commit_count}/{quorum} commits") + + if commit_count >= quorum and epoch not in self.committed_epochs: + # CONSENSUS REACHED! + self._finalize_epoch(epoch) + + def _finalize_epoch(self, epoch: int): + """Finalize epoch after consensus reached""" + with self.lock: + if epoch in self.committed_epochs: + return + + self.committed_epochs.add(epoch) + self.phase = ConsensusPhase.COMMITTED + + # Cancel view change timer + self._cancel_view_change_timer() + + # Get the proposal + pre_prepare = self.pre_prepare_log.get(epoch) + if not pre_prepare or not pre_prepare.proposal: + logging.error(f"No proposal found for committed epoch {epoch}") + return + + # Save to committed epochs table + with sqlite3.connect(self.db_path) as conn: + conn.execute(""" + INSERT OR REPLACE INTO bft_committed_epochs + (epoch, view, digest, committed_at, proposal_json) + VALUES (?, ?, ?, ?, ?) + """, (epoch, self.current_view, pre_prepare.digest, + int(time.time()), json.dumps(pre_prepare.proposal))) + conn.commit() + + logging.info(f"CONSENSUS REACHED for epoch {epoch}") + logging.info(f" Digest: {pre_prepare.digest[:16]}...") + logging.info(f" Proposer: {pre_prepare.proposal.get('proposer')}") + + # Apply the settlement (distribute rewards) + self._apply_settlement(pre_prepare.proposal) + + def _apply_settlement(self, proposal: Dict): + """Apply the consensus settlement to database""" + epoch = proposal.get('epoch') + distribution = proposal.get('distribution', {}) + + with sqlite3.connect(self.db_path) as conn: + for miner_id, reward in distribution.items(): + # Update balance + conn.execute(""" + INSERT INTO balances (miner_id, amount_i64) + VALUES (?, ?) + ON CONFLICT(miner_id) DO UPDATE SET + amount_i64 = amount_i64 + excluded.amount_i64 + """, (miner_id, int(reward * 1_000_000))) # Convert to micro-RTC + + # Log in ledger + conn.execute(""" + INSERT INTO ledger (miner_id, delta_i64, tx_type, memo, ts) + VALUES (?, ?, 'reward', ?, ?) + """, (miner_id, int(reward * 1_000_000), f"epoch_{epoch}_bft", int(time.time()))) + + conn.commit() + + logging.info(f"Applied settlement for epoch {epoch}: {len(distribution)} miners rewarded") + + # ======================================================================== + # VIEW CHANGE (Leader failure handling) + # ======================================================================== + + def _start_view_change_timer(self): + """Start timer for view change if consensus not reached""" + self._cancel_view_change_timer() + + self.view_change_timer = threading.Timer(VIEW_CHANGE_TIMEOUT, self._trigger_view_change) + self.view_change_timer.daemon = True + self.view_change_timer.start() + + def _cancel_view_change_timer(self): + """Cancel view change timer""" + if self.view_change_timer: + self.view_change_timer.cancel() + self.view_change_timer = None + + def _trigger_view_change(self): + """Trigger view change due to timeout""" + with self.lock: + logging.warning(f"[VIEW-CHANGE] Timeout! Requesting view {self.current_view + 1}") + self.phase = ConsensusPhase.VIEW_CHANGE + + new_view = self.current_view + 1 + timestamp = int(time.time()) + + sign_data = f"{MessageType.VIEW_CHANGE.value}:{new_view}:{self.current_epoch}:{timestamp}" + signature = self._sign_message(sign_data) + + vc_msg = ViewChangeMessage( + view=new_view, + epoch=self.current_epoch, + node_id=self.node_id, + prepared_cert=None, # Could include prepared certificate + signature=signature + ) + + # Log view change + if new_view not in self.view_change_log: + self.view_change_log[new_view] = {} + self.view_change_log[new_view][self.node_id] = vc_msg + + # Broadcast view change + self._broadcast_view_change(vc_msg) + + # Check if we have quorum for view change + self._check_view_change_quorum(new_view) + + def handle_view_change(self, msg_data: Dict): + """Handle received VIEW-CHANGE message""" + with self.lock: + new_view = msg_data.get('view') + node_id = msg_data.get('node_id') + + if new_view not in self.view_change_log: + self.view_change_log[new_view] = {} + + if node_id not in self.view_change_log[new_view]: + self.view_change_log[new_view][node_id] = ViewChangeMessage(**msg_data) + logging.info(f"[VIEW-CHANGE] Received from {node_id} for view {new_view}") + + self._check_view_change_quorum(new_view) + + def _check_view_change_quorum(self, new_view: int): + """Check if we have quorum for view change""" + if new_view not in self.view_change_log: + return + + vc_count = len(self.view_change_log[new_view]) + quorum = self.get_quorum_size() + + logging.info(f"[VIEW-CHANGE] View {new_view}: {vc_count}/{quorum} votes") + + if vc_count >= quorum: + self._perform_view_change(new_view) + + def _perform_view_change(self, new_view: int): + """Perform view change""" + with self.lock: + if new_view <= self.current_view: + return + + self.current_view = new_view + self.phase = ConsensusPhase.IDLE + + logging.info(f"[NEW-VIEW] Changed to view {new_view}, leader: {self.get_leader()}") + + # If we're the new leader, propose + if self.is_leader(): + logging.info(f"[NEW-VIEW] We are the new leader!") + # New leader should re-propose pending epochs + + # ======================================================================== + # VALIDATION + # ======================================================================== + + def _validate_proposal(self, proposal: Dict) -> bool: + """Validate an epoch settlement proposal""" + if not proposal: + return False + + epoch = proposal.get('epoch') + miners = proposal.get('miners', []) + distribution = proposal.get('distribution', {}) + + # Check epoch is valid + if epoch is None or epoch < 0: + return False + + # Check total reward matches + total = sum(distribution.values()) + if abs(total - 1.5) > 0.001: # Allow small float error + logging.warning(f"Invalid total reward: {total} != 1.5") + return False + + # Check all miners in distribution are in miner list + miner_ids = {m.get('miner_id') for m in miners} + for miner_id in distribution: + if miner_id not in miner_ids: + logging.warning(f"Miner {miner_id} in distribution but not in miners list") + return False + + # Verify merkle_root matches the submitted miners list. + # Without this check a Byzantine leader can recycle a valid merkle_root + # from a previous epoch while submitting a different (falsified) miners + # list, and honest nodes would still send PREPARE for the forged proposal. + expected_merkle = self._compute_merkle_root(miners) + if proposal.get('merkle_root') != expected_merkle: + logging.warning( + f"Proposal merkle_root mismatch for epoch {epoch}: " + f"got {proposal.get('merkle_root', '')[:16]}... " + f"expected {expected_merkle[:16]}..." + ) + return False + + return True + + # ======================================================================== + # NETWORK + # ======================================================================== + + def _broadcast_message(self, msg: ConsensusMessage): + """Broadcast message to all peers""" + for node_id, url in self.peers.items(): + try: + endpoint = f"{url}/bft/message" + response = requests.post( + endpoint, + json=msg.to_dict(), + timeout=5, + headers={'X-Node-ID': self.node_id} + ) + if response.ok: + logging.debug(f"Broadcast {msg.msg_type} to {node_id}") + except Exception as e: + logging.error(f"Failed to broadcast to {node_id}: {e}") + + def _broadcast_view_change(self, msg: ViewChangeMessage): + """Broadcast view change message""" + msg_data = asdict(msg) + for node_id, url in self.peers.items(): + try: + endpoint = f"{url}/bft/view_change" + response = requests.post(endpoint, json=msg_data, timeout=5) + if response.ok: + logging.debug(f"Broadcast VIEW-CHANGE to {node_id}") + except Exception as e: + logging.error(f"Failed to broadcast VIEW-CHANGE to {node_id}: {e}") + + def _save_message_to_db(self, msg: ConsensusMessage): + """Save consensus message to database""" + try: + with sqlite3.connect(self.db_path) as conn: + conn.execute(""" + INSERT OR REPLACE INTO bft_consensus_log + (epoch, view, msg_type, node_id, digest, proposal_json, signature, timestamp) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, ( + msg.epoch, msg.view, msg.msg_type, msg.node_id, + msg.digest, json.dumps(msg.proposal) if msg.proposal else None, + msg.signature, msg.timestamp + )) + conn.commit() + except Exception as e: + logging.error(f"Failed to save message: {e}") + + def receive_message(self, msg_data: Dict): + """Handle incoming consensus message""" + msg_type = msg_data.get('msg_type') + + if msg_type == MessageType.PRE_PREPARE.value: + msg = ConsensusMessage.from_dict(msg_data) + self._handle_pre_prepare(msg) + elif msg_type == MessageType.PREPARE.value: + msg = ConsensusMessage.from_dict(msg_data) + self.handle_prepare(msg) + elif msg_type == MessageType.COMMIT.value: + msg = ConsensusMessage.from_dict(msg_data) + self.handle_commit(msg) + + # ======================================================================== + # STATUS + # ======================================================================== + + def get_status(self) -> Dict: + """Get consensus status""" + with self.lock: + return { + 'node_id': self.node_id, + 'current_view': self.current_view, + 'current_epoch': self.current_epoch, + 'phase': self.phase.value, + 'leader': self.get_leader(), + 'is_leader': self.is_leader(), + 'total_nodes': self.get_total_nodes(), + 'fault_tolerance': self.get_fault_tolerance(), + 'quorum_size': self.get_quorum_size(), + 'committed_epochs': len(self.committed_epochs), + 'peers': list(self.peers.keys()) + } + + +# ============================================================================ +# FLASK ROUTES FOR BFT +# ============================================================================ + +def create_bft_routes(app, bft: BFTConsensus): + """Add BFT consensus routes to Flask app""" + from flask import request, jsonify + + @app.route('/bft/status', methods=['GET']) + def bft_status(): + """Get BFT consensus status""" + return jsonify(bft.get_status()) + + @app.route('/bft/message', methods=['POST']) + def bft_receive_message(): + """Receive consensus message from peer""" + try: + msg_data = request.get_json() + bft.receive_message(msg_data) + return jsonify({'status': 'ok'}) + except Exception as e: + logging.error(f"BFT message error: {e}") + return jsonify({'error': str(e)}), 400 + + @app.route('/bft/view_change', methods=['POST']) + def bft_view_change(): + """Receive view change message""" + try: + msg_data = request.get_json() + bft.handle_view_change(msg_data) + return jsonify({'status': 'ok'}) + except Exception as e: + logging.error(f"BFT view change error: {e}") + return jsonify({'error': str(e)}), 400 + + @app.route('/bft/propose', methods=['POST']) + def bft_propose(): + """Manually trigger epoch proposal (admin)""" + try: + data = request.get_json() + epoch = data.get('epoch') + miners = data.get('miners', []) + distribution = data.get('distribution', {}) + + msg = bft.propose_epoch_settlement(epoch, miners, distribution) + if msg: + return jsonify({'status': 'proposed', 'digest': msg.digest}) + else: + return jsonify({'error': 'not_leader_or_already_committed'}), 400 + except Exception as e: + logging.error(f"BFT propose error: {e}") + return jsonify({'error': str(e)}), 500 + + +# ============================================================================ +# MAIN (Testing) +# ============================================================================ + +if __name__ == "__main__": + import sys + + print("=" * 60) + print("RustChain BFT Consensus Module - RIP-0202") + print("=" * 60) + + # Test with mock data + node_id = sys.argv[1] if len(sys.argv) > 1 else "node-131" + db_path = "/tmp/bft_test.db" + secret_key = "rustchain_bft_testnet_key_2025" + + bft = BFTConsensus(node_id, db_path, secret_key) + + # Register peer + bft.register_peer("node-153", "http://50.28.86.153:8099") + + print(f"\nNode: {node_id}") + print(f"Is Leader: {bft.is_leader()}") + print(f"Current View: {bft.current_view}") + print(f"Total Nodes: {bft.get_total_nodes()}") + print(f"Quorum Size: {bft.get_quorum_size()}") + print(f"Fault Tolerance: {bft.get_fault_tolerance()}") + + if bft.is_leader(): + print("\nProposing epoch settlement...") + + # Mock miner data + miners = [ + {'miner_id': 'g4-powerbook-115', 'device_arch': 'G4', 'weight': 2.5}, + {'miner_id': 'sophia-nas-c4130', 'device_arch': 'modern', 'weight': 1.0}, + ] + + total_weight = sum(m['weight'] for m in miners) + distribution = { + m['miner_id']: 1.5 * (m['weight'] / total_weight) + for m in miners + } + + msg = bft.propose_epoch_settlement(epoch=425, miners=miners, distribution=distribution) + if msg: + print(f"Proposed! Digest: {msg.digest[:32]}...") + + print("\n" + "=" * 60) + print("Status:", json.dumps(bft.get_status(), indent=2)) diff --git a/node/rustchain_block_producer.py b/node/rustchain_block_producer.py index 720a01ae..c812997a 100644 --- a/node/rustchain_block_producer.py +++ b/node/rustchain_block_producer.py @@ -1,747 +1,747 @@ -#!/usr/bin/env python3 -""" -RustChain Block Producer - Mainnet Security -============================================ - -Phase 1 & 2 Implementation: -- Canonical block header construction -- Merkle tree for transaction body -- PoA round-robin block producer selection -- Block signing with Ed25519 - -Implements secure block production for Proof of Antiquity consensus. -""" - -import sqlite3 -import time -import threading -import logging -import json -from typing import Dict, List, Optional, Tuple -from dataclasses import dataclass, field - -from rustchain_crypto import ( - CanonicalBlockHeader, - MerkleTree, - SignedTransaction, - Ed25519Signer, - blake2b256_hex, - canonical_json -) -from rustchain_tx_handler import TransactionPool - -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s [BLOCK] %(levelname)s: %(message)s' -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# CONSTANTS -# ============================================================================= - -GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC -BLOCK_TIME = 600 # 10 minutes (600 seconds) -MAX_TXS_PER_BLOCK = 1000 -ATTESTATION_TTL = 600 # 10 minutes - - -# ============================================================================= -# BLOCK BODY -# ============================================================================= - -@dataclass -class BlockBody: - """ - Block body containing transactions and attestations. - """ - transactions: List[SignedTransaction] = field(default_factory=list) - attestations: List[Dict] = field(default_factory=list) - _merkle_tree: Optional[MerkleTree] = None - - def add_transaction(self, tx: SignedTransaction): - """Add a transaction to the block""" - self.transactions.append(tx) - self._merkle_tree = None # Invalidate cache - - def add_attestation(self, attestation: Dict): - """Add an attestation to the block""" - self.attestations.append(attestation) - - @property - def merkle_root(self) -> str: - """Compute merkle root of transactions""" - if self._merkle_tree is None: - self._merkle_tree = MerkleTree() - for tx in self.transactions: - tx_hash = bytes.fromhex(tx.tx_hash) - self._merkle_tree.add_leaf_hash(tx_hash) - - return self._merkle_tree.root_hex - - def compute_attestations_hash(self) -> str: - """Compute hash of attestations""" - if not self.attestations: - return "0" * 64 - - # Canonical JSON of attestations - attestations_bytes = canonical_json(sorted( - self.attestations, - key=lambda a: a.get("miner", "") - )) - return blake2b256_hex(attestations_bytes) - - def to_dict(self) -> Dict: - """Convert to dictionary""" - return { - "transactions": [tx.to_dict() for tx in self.transactions], - "attestations": self.attestations, - "merkle_root": self.merkle_root, - "attestations_hash": self.compute_attestations_hash(), - "tx_count": len(self.transactions), - "attestation_count": len(self.attestations) - } - - @classmethod - def from_dict(cls, d: Dict) -> "BlockBody": - """Create from dictionary""" - body = cls() - for tx_dict in d.get("transactions", []): - body.transactions.append(SignedTransaction.from_dict(tx_dict)) - body.attestations = d.get("attestations", []) - return body - - -# ============================================================================= -# FULL BLOCK -# ============================================================================= - -@dataclass -class Block: - """ - Complete block with header and body. - """ - header: CanonicalBlockHeader - body: BlockBody - - @property - def hash(self) -> str: - """Get block hash""" - return self.header.compute_hash() - - @property - def height(self) -> int: - """Get block height""" - return self.header.height - - def to_dict(self) -> Dict: - """Convert to dictionary""" - return { - "header": self.header.to_dict(), - "body": self.body.to_dict(), - "hash": self.hash - } - - @classmethod - def from_dict(cls, d: Dict) -> "Block": - """Create from dictionary""" - return cls( - header=CanonicalBlockHeader.from_dict(d["header"]), - body=BlockBody.from_dict(d["body"]) - ) - - def validate_structure(self) -> Tuple[bool, str]: - """ - Validate block structure (not consensus rules). - - Checks: - - Merkle root matches transactions - - Attestations hash matches - - All transactions have valid signatures - """ - # Check merkle root - if self.header.merkle_root != self.body.merkle_root: - return False, "Merkle root mismatch" - - # Check attestations hash - if self.header.attestations_hash != self.body.compute_attestations_hash(): - return False, "Attestations hash mismatch" - - # Check all transaction signatures - for tx in self.body.transactions: - if not tx.verify(): - return False, f"Invalid transaction signature: {tx.tx_hash}" - - return True, "" - - -# ============================================================================= -# BLOCK PRODUCER -# ============================================================================= - -class BlockProducer: - """ - Produces blocks in the PoA round-robin consensus. - """ - - def __init__( - self, - db_path: str, - tx_pool: TransactionPool, - signer: Optional[Ed25519Signer] = None, - wallet_address: Optional[str] = None - ): - self.db_path = db_path - self.tx_pool = tx_pool - self.signer = signer - self.wallet_address = wallet_address - self._lock = threading.Lock() - - def get_current_slot(self) -> int: - """Get current slot number""" - now = int(time.time()) - return (now - GENESIS_TIMESTAMP) // BLOCK_TIME - - def get_slot_start_time(self, slot: int) -> int: - """Get start timestamp for a slot""" - return GENESIS_TIMESTAMP + (slot * BLOCK_TIME) - - def get_attested_miners(self, current_ts: int) -> List[Tuple[str, str, Dict]]: - """ - Get all currently attested miners (within TTL window). - - Returns: List of (miner_id, device_arch, device_info) tuples, sorted alphabetically - """ - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT miner, device_arch, device_family, device_model, device_year, ts_ok - FROM miner_attest_recent - WHERE ts_ok >= ? - ORDER BY miner ASC - """, (current_ts - ATTESTATION_TTL,)) - - results = [] - for row in cursor.fetchall(): - device_info = { - "arch": row["device_arch"] or "modern_x86", - "family": row["device_family"] or "", - "model": row["device_model"] if "device_model" in row.keys() else "", - "year": row["device_year"] if "device_year" in row.keys() else 2025 - } - results.append((row["miner"], row["device_arch"], device_info)) - - return results - - def get_round_robin_producer(self, slot: int) -> Optional[str]: - """ - Deterministic round-robin block producer selection. - - Returns wallet address of the selected producer for this slot. - """ - current_ts = self.get_slot_start_time(slot) - attested_miners = self.get_attested_miners(current_ts) - - if not attested_miners: - return None - - producer_index = slot % len(attested_miners) - return attested_miners[producer_index][0] - - def is_my_turn(self, slot: int = None) -> bool: - """Check if it's this node's turn to produce a block""" - if not self.wallet_address: - return False - - if slot is None: - slot = self.get_current_slot() - - producer = self.get_round_robin_producer(slot) - return producer == self.wallet_address - - def get_latest_block(self) -> Optional[Dict]: - """Get the latest block from database""" - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT * FROM blocks - ORDER BY height DESC - LIMIT 1 - """) - - row = cursor.fetchone() - if row: - return dict(row) - return None - - def get_state_root(self) -> str: - """ - Compute current state root. - - State root is hash of all balances sorted by address. - """ - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT wallet, balance_urtc, wallet_nonce - FROM balances - ORDER BY wallet ASC - """) - - state = [] - for row in cursor.fetchall(): - state.append({ - "wallet": row["wallet"], - "balance": row["balance_urtc"], - "nonce": row["wallet_nonce"] if "wallet_nonce" in row.keys() else 0 - }) - - return blake2b256_hex(canonical_json(state)) - - def get_attestations_for_block(self) -> List[Dict]: - """Get attestations to include in block""" - current_ts = int(time.time()) - - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT miner, device_arch, device_family, ts_ok - FROM miner_attest_recent - WHERE ts_ok >= ? - ORDER BY ts_ok DESC - """, (current_ts - ATTESTATION_TTL,)) - - return [ - { - "miner": row["miner"], - "arch": row["device_arch"], - "family": row["device_family"], - "timestamp": row["ts_ok"] - } - for row in cursor.fetchall() - ] - - def produce_block(self, slot: int = None) -> Optional[Block]: - """ - Produce a new block. - - Returns None if: - - Not this node's turn - - No signer configured - - Block production fails - """ - if slot is None: - slot = self.get_current_slot() - - # Check if it's our turn - expected_producer = self.get_round_robin_producer(slot) - if expected_producer != self.wallet_address: - logger.debug(f"Not our turn: slot {slot} belongs to {expected_producer}") - return None - - if not self.signer: - logger.error("No signer configured, cannot produce block") - return None - - with self._lock: - try: - # Get previous block - latest = self.get_latest_block() - prev_hash = latest["block_hash"] if latest else "0" * 64 - prev_height = latest["height"] if latest else -1 - - new_height = prev_height + 1 - - # Collect transactions - pending_txs = self.tx_pool.get_pending_transactions(MAX_TXS_PER_BLOCK) - - # Create block body - body = BlockBody() - for tx in pending_txs: - body.add_transaction(tx) - - # Add attestations - attestations = self.get_attestations_for_block() - for att in attestations: - body.add_attestation(att) - - # Compute state root - state_root = self.get_state_root() - - # Create header - header = CanonicalBlockHeader( - version=1, - height=new_height, - timestamp=int(time.time() * 1000), - prev_hash=prev_hash, - merkle_root=body.merkle_root, - state_root=state_root, - attestations_hash=body.compute_attestations_hash(), - producer=self.wallet_address - ) - - # Sign header - header.sign(self.signer) - - # Create block - block = Block(header=header, body=body) - - # Validate structure - is_valid, error = block.validate_structure() - if not is_valid: - logger.error(f"Block validation failed: {error}") - return None - - logger.info(f"Produced block {new_height}: {block.hash[:16]}... " - f"txs={len(body.transactions)} attestations={len(body.attestations)}") - - return block - - except Exception as e: - logger.error(f"Block production failed: {e}") - return None - - def save_block(self, block: Block) -> bool: - """Save a block to database""" - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - - try: - # Ensure blocks table exists - cursor.execute(""" - CREATE TABLE IF NOT EXISTS blocks ( - height INTEGER PRIMARY KEY, - block_hash TEXT UNIQUE NOT NULL, - prev_hash TEXT NOT NULL, - timestamp INTEGER NOT NULL, - merkle_root TEXT NOT NULL, - state_root TEXT NOT NULL, - attestations_hash TEXT NOT NULL, - producer TEXT NOT NULL, - producer_sig TEXT NOT NULL, - tx_count INTEGER NOT NULL, - attestation_count INTEGER NOT NULL, - body_json TEXT NOT NULL, - created_at INTEGER NOT NULL - ) - """) - - # Insert block - cursor.execute(""" - INSERT INTO blocks ( - height, block_hash, prev_hash, timestamp, - merkle_root, state_root, attestations_hash, - producer, producer_sig, tx_count, attestation_count, - body_json, created_at - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - block.height, - block.hash, - block.header.prev_hash, - block.header.timestamp, - block.header.merkle_root, - block.header.state_root, - block.header.attestations_hash, - block.header.producer, - block.header.producer_sig, - len(block.body.transactions), - len(block.body.attestations), - json.dumps(block.body.to_dict()), - int(time.time()) - )) - - # Confirm transactions - for tx in block.body.transactions: - self.tx_pool.confirm_transaction( - tx.tx_hash, - block.height, - block.hash - ) - - conn.commit() - - logger.info(f"Saved block {block.height}: {block.hash[:16]}...") - return True - - except sqlite3.IntegrityError as e: - logger.warning(f"Block already exists: {e}") - return False - except Exception as e: - logger.error(f"Failed to save block: {e}") - return False - - -# ============================================================================= -# BLOCK VALIDATOR -# ============================================================================= - -class BlockValidator: - """ - Validates blocks according to consensus rules. - """ - - def __init__(self, db_path: str): - self.db_path = db_path - - def validate_block( - self, - block: Block, - expected_producer: str = None, - producer_pubkey: bytes = None - ) -> Tuple[bool, str]: - """ - Validate a block. - - Checks: - 1. Block structure (merkle root, signatures) - 2. Producer is correct for this slot - 3. Block height is sequential - 4. Prev hash is correct - 5. Producer signature is valid - """ - # 1. Validate structure - is_valid, error = block.validate_structure() - if not is_valid: - return False, f"Structure invalid: {error}" - - # 2. Check producer (if we know expected) - if expected_producer and block.header.producer != expected_producer: - return False, f"Wrong producer: expected {expected_producer}, got {block.header.producer}" - - # 3. Check height is sequential - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - cursor.execute("SELECT MAX(height) FROM blocks") - result = cursor.fetchone() - max_height = result[0] if result[0] is not None else -1 - - if block.height != max_height + 1: - return False, f"Invalid height: expected {max_height + 1}, got {block.height}" - - # 4. Check prev hash - if block.height > 0: - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - cursor.execute( - "SELECT block_hash FROM blocks WHERE height = ?", - (block.height - 1,) - ) - result = cursor.fetchone() - if result and result[0] != block.header.prev_hash: - return False, f"Invalid prev_hash" - - # 5. Validate producer signature (if we have pubkey) - if producer_pubkey: - if not block.header.verify_signature(producer_pubkey): - return False, "Invalid producer signature" - - return True, "" - - -# ============================================================================= -# API ROUTES -# ============================================================================= - -def create_block_api_routes(app, producer: BlockProducer, validator: BlockValidator): - """Create Flask routes for block API""" - from flask import request, jsonify - - @app.route('/block/latest', methods=['GET']) - def get_latest_block(): - """Get latest block""" - latest = producer.get_latest_block() - if latest: - return jsonify(latest) - return jsonify({"error": "No blocks found"}), 404 - - @app.route('/block/', methods=['GET']) - def get_block_by_height(height: int): - """Get block by height""" - with sqlite3.connect(producer.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - cursor.execute("SELECT * FROM blocks WHERE height = ?", (height,)) - row = cursor.fetchone() - - if row: - return jsonify(dict(row)) - return jsonify({"error": "Block not found"}), 404 - - @app.route('/block/hash/', methods=['GET']) - def get_block_by_hash(block_hash: str): - """Get block by hash""" - with sqlite3.connect(producer.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - cursor.execute("SELECT * FROM blocks WHERE block_hash = ?", (block_hash,)) - row = cursor.fetchone() - - if row: - return jsonify(dict(row)) - return jsonify({"error": "Block not found"}), 404 - - @app.route('/block/slot', methods=['GET']) - def get_current_slot(): - """Get current slot info""" - slot = producer.get_current_slot() - expected_producer = producer.get_round_robin_producer(slot) - slot_start = producer.get_slot_start_time(slot) - slot_end = slot_start + BLOCK_TIME - - return jsonify({ - "slot": slot, - "expected_producer": expected_producer, - "slot_start": slot_start, - "slot_end": slot_end, - "time_remaining": max(0, slot_end - int(time.time())), - "is_my_turn": producer.is_my_turn(slot) - }) - - @app.route('/block/producers', methods=['GET']) - def list_producers(): - """List current block producers""" - current_ts = int(time.time()) - miners = producer.get_attested_miners(current_ts) - - return jsonify({ - "count": len(miners), - "producers": [ - { - "wallet": m[0], - "arch": m[1], - "device_info": m[2] - } - for m in miners - ] - }) - - -# ============================================================================= -# TESTING -# ============================================================================= - -if __name__ == "__main__": - import tempfile - import os - - print("=" * 70) - print("RustChain Block Producer - Test Suite") - print("=" * 70) - - # Create temporary database - with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as f: - db_path = f.name - - try: - # Initialize - tx_pool = TransactionPool(db_path) - - # Create test wallet - from rustchain_crypto import generate_wallet_keypair - - addr, pub, priv = generate_wallet_keypair() - signer = Ed25519Signer(bytes.fromhex(priv)) - - print(f"\n=== Test Wallet ===") - print(f"Address: {addr}") - - # Seed balance - with sqlite3.connect(db_path) as conn: - conn.execute( - "INSERT INTO balances (wallet, balance_urtc, wallet_nonce) VALUES (?, ?, ?)", - (addr, 1000_000_000_000, 0) # 10000 RTC - ) - - # Add fake attestation for this wallet - conn.execute(""" - CREATE TABLE IF NOT EXISTS miner_attest_recent ( - miner TEXT PRIMARY KEY, - device_arch TEXT, - device_family TEXT, - ts_ok INTEGER - ) - """) - conn.execute( - "INSERT INTO miner_attest_recent VALUES (?, ?, ?, ?)", - (addr, "test_arch", "Test Device", int(time.time())) - ) - - # Create producer - producer = BlockProducer( - db_path=db_path, - tx_pool=tx_pool, - signer=signer, - wallet_address=addr - ) - - print(f"\n=== Slot Info ===") - slot = producer.get_current_slot() - print(f"Current slot: {slot}") - print(f"Expected producer: {producer.get_round_robin_producer(slot)}") - print(f"Is my turn: {producer.is_my_turn()}") - - # Create a test transaction - print(f"\n=== Creating Test Transaction ===") - addr2, _, _ = generate_wallet_keypair() - - tx = SignedTransaction( - from_addr=addr, - to_addr=addr2, - amount_urtc=100_000_000, # 1 RTC - nonce=1, - timestamp=int(time.time() * 1000), - memo="Test" - ) - tx.sign(signer) - - success, result = tx_pool.submit_transaction(tx) - print(f"TX submitted: {success}, {result}") - - # Produce block - print(f"\n=== Producing Block ===") - block = producer.produce_block() - - if block: - print(f"Block height: {block.height}") - print(f"Block hash: {block.hash}") - print(f"Merkle root: {block.header.merkle_root}") - print(f"State root: {block.header.state_root}") - print(f"TX count: {len(block.body.transactions)}") - print(f"Attestation count: {len(block.body.attestations)}") - - # Save block - print(f"\n=== Saving Block ===") - saved = producer.save_block(block) - print(f"Saved: {saved}") - - # Validate - print(f"\n=== Validating Block ===") - validator = BlockValidator(db_path) - # Need to fake the expected producer since we only have one attester - is_valid, error = block.validate_structure() - print(f"Structure valid: {is_valid} {error}") - - # Check block in DB - latest = producer.get_latest_block() - print(f"\n=== Latest Block in DB ===") - print(f"Height: {latest['height']}") - print(f"Hash: {latest['block_hash'][:32]}...") - - else: - print("Block production failed (not our turn or error)") - - print("\n" + "=" * 70) - print("Tests complete!") - print("=" * 70) - - finally: - os.unlink(db_path) +#!/usr/bin/env python3 +""" +RustChain Block Producer - Mainnet Security +============================================ + +Phase 1 & 2 Implementation: +- Canonical block header construction +- Merkle tree for transaction body +- PoA round-robin block producer selection +- Block signing with Ed25519 + +Implements secure block production for Proof of Antiquity consensus. +""" + +import sqlite3 +import time +import threading +import logging +import json +from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass, field + +from rustchain_crypto import ( + CanonicalBlockHeader, + MerkleTree, + SignedTransaction, + Ed25519Signer, + blake2b256_hex, + canonical_json +) +from rustchain_tx_handler import TransactionPool + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [BLOCK] %(levelname)s: %(message)s' +) +logger = logging.getLogger(__name__) + + +# ============================================================================= +# CONSTANTS +# ============================================================================= + +GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC +BLOCK_TIME = 600 # 10 minutes (600 seconds) +MAX_TXS_PER_BLOCK = 1000 +ATTESTATION_TTL = 600 # 10 minutes + + +# ============================================================================= +# BLOCK BODY +# ============================================================================= + +@dataclass +class BlockBody: + """ + Block body containing transactions and attestations. + """ + transactions: List[SignedTransaction] = field(default_factory=list) + attestations: List[Dict] = field(default_factory=list) + _merkle_tree: Optional[MerkleTree] = None + + def add_transaction(self, tx: SignedTransaction): + """Add a transaction to the block""" + self.transactions.append(tx) + self._merkle_tree = None # Invalidate cache + + def add_attestation(self, attestation: Dict): + """Add an attestation to the block""" + self.attestations.append(attestation) + + @property + def merkle_root(self) -> str: + """Compute merkle root of transactions""" + if self._merkle_tree is None: + self._merkle_tree = MerkleTree() + for tx in self.transactions: + tx_hash = bytes.fromhex(tx.tx_hash) + self._merkle_tree.add_leaf_hash(tx_hash) + + return self._merkle_tree.root_hex + + def compute_attestations_hash(self) -> str: + """Compute hash of attestations""" + if not self.attestations: + return "0" * 64 + + # Canonical JSON of attestations + attestations_bytes = canonical_json(sorted( + self.attestations, + key=lambda a: a.get("miner", "") + )) + return blake2b256_hex(attestations_bytes) + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + "transactions": [tx.to_dict() for tx in self.transactions], + "attestations": self.attestations, + "merkle_root": self.merkle_root, + "attestations_hash": self.compute_attestations_hash(), + "tx_count": len(self.transactions), + "attestation_count": len(self.attestations) + } + + @classmethod + def from_dict(cls, d: Dict) -> "BlockBody": + """Create from dictionary""" + body = cls() + for tx_dict in d.get("transactions", []): + body.transactions.append(SignedTransaction.from_dict(tx_dict)) + body.attestations = d.get("attestations", []) + return body + + +# ============================================================================= +# FULL BLOCK +# ============================================================================= + +@dataclass +class Block: + """ + Complete block with header and body. + """ + header: CanonicalBlockHeader + body: BlockBody + + @property + def hash(self) -> str: + """Get block hash""" + return self.header.compute_hash() + + @property + def height(self) -> int: + """Get block height""" + return self.header.height + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + "header": self.header.to_dict(), + "body": self.body.to_dict(), + "hash": self.hash + } + + @classmethod + def from_dict(cls, d: Dict) -> "Block": + """Create from dictionary""" + return cls( + header=CanonicalBlockHeader.from_dict(d["header"]), + body=BlockBody.from_dict(d["body"]) + ) + + def validate_structure(self) -> Tuple[bool, str]: + """ + Validate block structure (not consensus rules). + + Checks: + - Merkle root matches transactions + - Attestations hash matches + - All transactions have valid signatures + """ + # Check merkle root + if self.header.merkle_root != self.body.merkle_root: + return False, "Merkle root mismatch" + + # Check attestations hash + if self.header.attestations_hash != self.body.compute_attestations_hash(): + return False, "Attestations hash mismatch" + + # Check all transaction signatures + for tx in self.body.transactions: + if not tx.verify(): + return False, f"Invalid transaction signature: {tx.tx_hash}" + + return True, "" + + +# ============================================================================= +# BLOCK PRODUCER +# ============================================================================= + +class BlockProducer: + """ + Produces blocks in the PoA round-robin consensus. + """ + + def __init__( + self, + db_path: str, + tx_pool: TransactionPool, + signer: Optional[Ed25519Signer] = None, + wallet_address: Optional[str] = None + ): + self.db_path = db_path + self.tx_pool = tx_pool + self.signer = signer + self.wallet_address = wallet_address + self._lock = threading.Lock() + + def get_current_slot(self) -> int: + """Get current slot number""" + now = int(time.time()) + return (now - GENESIS_TIMESTAMP) // BLOCK_TIME + + def get_slot_start_time(self, slot: int) -> int: + """Get start timestamp for a slot""" + return GENESIS_TIMESTAMP + (slot * BLOCK_TIME) + + def get_attested_miners(self, current_ts: int) -> List[Tuple[str, str, Dict]]: + """ + Get all currently attested miners (within TTL window). + + Returns: List of (miner_id, device_arch, device_info) tuples, sorted alphabetically + """ + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT miner, device_arch, device_family, device_model, device_year, ts_ok + FROM miner_attest_recent + WHERE ts_ok >= ? + ORDER BY miner ASC + """, (current_ts - ATTESTATION_TTL,)) + + results = [] + for row in cursor.fetchall(): + device_info = { + "arch": row["device_arch"] or "modern_x86", + "family": row["device_family"] or "", + "model": row["device_model"] if "device_model" in row.keys() else "", + "year": row["device_year"] if "device_year" in row.keys() else 2025 + } + results.append((row["miner"], row["device_arch"], device_info)) + + return results + + def get_round_robin_producer(self, slot: int) -> Optional[str]: + """ + Deterministic round-robin block producer selection. + + Returns wallet address of the selected producer for this slot. + """ + current_ts = self.get_slot_start_time(slot) + attested_miners = self.get_attested_miners(current_ts) + + if not attested_miners: + return None + + producer_index = slot % len(attested_miners) + return attested_miners[producer_index][0] + + def is_my_turn(self, slot: int = None) -> bool: + """Check if it's this node's turn to produce a block""" + if not self.wallet_address: + return False + + if slot is None: + slot = self.get_current_slot() + + producer = self.get_round_robin_producer(slot) + return producer == self.wallet_address + + def get_latest_block(self) -> Optional[Dict]: + """Get the latest block from database""" + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT * FROM blocks + ORDER BY height DESC + LIMIT 1 + """) + + row = cursor.fetchone() + if row: + return dict(row) + return None + + def get_state_root(self) -> str: + """ + Compute current state root. + + State root is hash of all balances sorted by address. + """ + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT wallet, balance_urtc, wallet_nonce + FROM balances + ORDER BY wallet ASC + """) + + state = [] + for row in cursor.fetchall(): + state.append({ + "wallet": row["wallet"], + "balance": row["balance_urtc"], + "nonce": row["wallet_nonce"] if "wallet_nonce" in row.keys() else 0 + }) + + return blake2b256_hex(canonical_json(state)) + + def get_attestations_for_block(self) -> List[Dict]: + """Get attestations to include in block""" + current_ts = int(time.time()) + + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT miner, device_arch, device_family, ts_ok + FROM miner_attest_recent + WHERE ts_ok >= ? + ORDER BY ts_ok DESC + """, (current_ts - ATTESTATION_TTL,)) + + return [ + { + "miner": row["miner"], + "arch": row["device_arch"], + "family": row["device_family"], + "timestamp": row["ts_ok"] + } + for row in cursor.fetchall() + ] + + def produce_block(self, slot: int = None) -> Optional[Block]: + """ + Produce a new block. + + Returns None if: + - Not this node's turn + - No signer configured + - Block production fails + """ + if slot is None: + slot = self.get_current_slot() + + # Check if it's our turn + expected_producer = self.get_round_robin_producer(slot) + if expected_producer != self.wallet_address: + logger.debug(f"Not our turn: slot {slot} belongs to {expected_producer}") + return None + + if not self.signer: + logger.error("No signer configured, cannot produce block") + return None + + with self._lock: + try: + # Get previous block + latest = self.get_latest_block() + prev_hash = latest["block_hash"] if latest else "0" * 64 + prev_height = latest["height"] if latest else -1 + + new_height = prev_height + 1 + + # Collect transactions + pending_txs = self.tx_pool.get_pending_transactions(MAX_TXS_PER_BLOCK) + + # Create block body + body = BlockBody() + for tx in pending_txs: + body.add_transaction(tx) + + # Add attestations + attestations = self.get_attestations_for_block() + for att in attestations: + body.add_attestation(att) + + # Compute state root + state_root = self.get_state_root() + + # Create header + header = CanonicalBlockHeader( + version=1, + height=new_height, + timestamp=int(time.time() * 1000), + prev_hash=prev_hash, + merkle_root=body.merkle_root, + state_root=state_root, + attestations_hash=body.compute_attestations_hash(), + producer=self.wallet_address + ) + + # Sign header + header.sign(self.signer) + + # Create block + block = Block(header=header, body=body) + + # Validate structure + is_valid, error = block.validate_structure() + if not is_valid: + logger.error(f"Block validation failed: {error}") + return None + + logger.info(f"Produced block {new_height}: {block.hash[:16]}... " + f"txs={len(body.transactions)} attestations={len(body.attestations)}") + + return block + + except Exception as e: + logger.error(f"Block production failed: {e}") + return None + + def save_block(self, block: Block) -> bool: + """Save a block to database""" + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + + try: + # Ensure blocks table exists + cursor.execute(""" + CREATE TABLE IF NOT EXISTS blocks ( + height INTEGER PRIMARY KEY, + block_hash TEXT UNIQUE NOT NULL, + prev_hash TEXT NOT NULL, + timestamp INTEGER NOT NULL, + merkle_root TEXT NOT NULL, + state_root TEXT NOT NULL, + attestations_hash TEXT NOT NULL, + producer TEXT NOT NULL, + producer_sig TEXT NOT NULL, + tx_count INTEGER NOT NULL, + attestation_count INTEGER NOT NULL, + body_json TEXT NOT NULL, + created_at INTEGER NOT NULL + ) + """) + + # Insert block + cursor.execute(""" + INSERT INTO blocks ( + height, block_hash, prev_hash, timestamp, + merkle_root, state_root, attestations_hash, + producer, producer_sig, tx_count, attestation_count, + body_json, created_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + block.height, + block.hash, + block.header.prev_hash, + block.header.timestamp, + block.header.merkle_root, + block.header.state_root, + block.header.attestations_hash, + block.header.producer, + block.header.producer_sig, + len(block.body.transactions), + len(block.body.attestations), + json.dumps(block.body.to_dict()), + int(time.time()) + )) + + # Confirm transactions + for tx in block.body.transactions: + self.tx_pool.confirm_transaction( + tx.tx_hash, + block.height, + block.hash + ) + + conn.commit() + + logger.info(f"Saved block {block.height}: {block.hash[:16]}...") + return True + + except sqlite3.IntegrityError as e: + logger.warning(f"Block already exists: {e}") + return False + except Exception as e: + logger.error(f"Failed to save block: {e}") + return False + + +# ============================================================================= +# BLOCK VALIDATOR +# ============================================================================= + +class BlockValidator: + """ + Validates blocks according to consensus rules. + """ + + def __init__(self, db_path: str): + self.db_path = db_path + + def validate_block( + self, + block: Block, + expected_producer: str = None, + producer_pubkey: bytes = None + ) -> Tuple[bool, str]: + """ + Validate a block. + + Checks: + 1. Block structure (merkle root, signatures) + 2. Producer is correct for this slot + 3. Block height is sequential + 4. Prev hash is correct + 5. Producer signature is valid + """ + # 1. Validate structure + is_valid, error = block.validate_structure() + if not is_valid: + return False, f"Structure invalid: {error}" + + # 2. Check producer (if we know expected) + if expected_producer and block.header.producer != expected_producer: + return False, f"Wrong producer: expected {expected_producer}, got {block.header.producer}" + + # 3. Check height is sequential + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + cursor.execute("SELECT MAX(height) FROM blocks") + result = cursor.fetchone() + max_height = result[0] if result[0] is not None else -1 + + if block.height != max_height + 1: + return False, f"Invalid height: expected {max_height + 1}, got {block.height}" + + # 4. Check prev hash + if block.height > 0: + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + cursor.execute( + "SELECT block_hash FROM blocks WHERE height = ?", + (block.height - 1,) + ) + result = cursor.fetchone() + if result and result[0] != block.header.prev_hash: + return False, f"Invalid prev_hash" + + # 5. Validate producer signature (if we have pubkey) + if producer_pubkey: + if not block.header.verify_signature(producer_pubkey): + return False, "Invalid producer signature" + + return True, "" + + +# ============================================================================= +# API ROUTES +# ============================================================================= + +def create_block_api_routes(app, producer: BlockProducer, validator: BlockValidator): + """Create Flask routes for block API""" + from flask import request, jsonify + + @app.route('/block/latest', methods=['GET']) + def get_latest_block(): + """Get latest block""" + latest = producer.get_latest_block() + if latest: + return jsonify(latest) + return jsonify({"error": "No blocks found"}), 404 + + @app.route('/block/', methods=['GET']) + def get_block_by_height(height: int): + """Get block by height""" + with sqlite3.connect(producer.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + cursor.execute("SELECT * FROM blocks WHERE height = ?", (height,)) + row = cursor.fetchone() + + if row: + return jsonify(dict(row)) + return jsonify({"error": "Block not found"}), 404 + + @app.route('/block/hash/', methods=['GET']) + def get_block_by_hash(block_hash: str): + """Get block by hash""" + with sqlite3.connect(producer.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + cursor.execute("SELECT * FROM blocks WHERE block_hash = ?", (block_hash,)) + row = cursor.fetchone() + + if row: + return jsonify(dict(row)) + return jsonify({"error": "Block not found"}), 404 + + @app.route('/block/slot', methods=['GET']) + def get_current_slot(): + """Get current slot info""" + slot = producer.get_current_slot() + expected_producer = producer.get_round_robin_producer(slot) + slot_start = producer.get_slot_start_time(slot) + slot_end = slot_start + BLOCK_TIME + + return jsonify({ + "slot": slot, + "expected_producer": expected_producer, + "slot_start": slot_start, + "slot_end": slot_end, + "time_remaining": max(0, slot_end - int(time.time())), + "is_my_turn": producer.is_my_turn(slot) + }) + + @app.route('/block/producers', methods=['GET']) + def list_producers(): + """List current block producers""" + current_ts = int(time.time()) + miners = producer.get_attested_miners(current_ts) + + return jsonify({ + "count": len(miners), + "producers": [ + { + "wallet": m[0], + "arch": m[1], + "device_info": m[2] + } + for m in miners + ] + }) + + +# ============================================================================= +# TESTING +# ============================================================================= + +if __name__ == "__main__": + import tempfile + import os + + print("=" * 70) + print("RustChain Block Producer - Test Suite") + print("=" * 70) + + # Create temporary database + with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as f: + db_path = f.name + + try: + # Initialize + tx_pool = TransactionPool(db_path) + + # Create test wallet + from rustchain_crypto import generate_wallet_keypair + + addr, pub, priv = generate_wallet_keypair() + signer = Ed25519Signer(bytes.fromhex(priv)) + + print(f"\n=== Test Wallet ===") + print(f"Address: {addr}") + + # Seed balance + with sqlite3.connect(db_path) as conn: + conn.execute( + "INSERT INTO balances (wallet, balance_urtc, wallet_nonce) VALUES (?, ?, ?)", + (addr, 1000_000_000_000, 0) # 10000 RTC + ) + + # Add fake attestation for this wallet + conn.execute(""" + CREATE TABLE IF NOT EXISTS miner_attest_recent ( + miner TEXT PRIMARY KEY, + device_arch TEXT, + device_family TEXT, + ts_ok INTEGER + ) + """) + conn.execute( + "INSERT INTO miner_attest_recent VALUES (?, ?, ?, ?)", + (addr, "test_arch", "Test Device", int(time.time())) + ) + + # Create producer + producer = BlockProducer( + db_path=db_path, + tx_pool=tx_pool, + signer=signer, + wallet_address=addr + ) + + print(f"\n=== Slot Info ===") + slot = producer.get_current_slot() + print(f"Current slot: {slot}") + print(f"Expected producer: {producer.get_round_robin_producer(slot)}") + print(f"Is my turn: {producer.is_my_turn()}") + + # Create a test transaction + print(f"\n=== Creating Test Transaction ===") + addr2, _, _ = generate_wallet_keypair() + + tx = SignedTransaction( + from_addr=addr, + to_addr=addr2, + amount_urtc=100_000_000, # 1 RTC + nonce=1, + timestamp=int(time.time() * 1000), + memo="Test" + ) + tx.sign(signer) + + success, result = tx_pool.submit_transaction(tx) + print(f"TX submitted: {success}, {result}") + + # Produce block + print(f"\n=== Producing Block ===") + block = producer.produce_block() + + if block: + print(f"Block height: {block.height}") + print(f"Block hash: {block.hash}") + print(f"Merkle root: {block.header.merkle_root}") + print(f"State root: {block.header.state_root}") + print(f"TX count: {len(block.body.transactions)}") + print(f"Attestation count: {len(block.body.attestations)}") + + # Save block + print(f"\n=== Saving Block ===") + saved = producer.save_block(block) + print(f"Saved: {saved}") + + # Validate + print(f"\n=== Validating Block ===") + validator = BlockValidator(db_path) + # Need to fake the expected producer since we only have one attester + is_valid, error = block.validate_structure() + print(f"Structure valid: {is_valid} {error}") + + # Check block in DB + latest = producer.get_latest_block() + print(f"\n=== Latest Block in DB ===") + print(f"Height: {latest['height']}") + print(f"Hash: {latest['block_hash'][:32]}...") + + else: + print("Block production failed (not our turn or error)") + + print("\n" + "=" * 70) + print("Tests complete!") + print("=" * 70) + + finally: + os.unlink(db_path) diff --git a/node/rustchain_ergo_anchor.py b/node/rustchain_ergo_anchor.py index 4387f970..81067d31 100644 --- a/node/rustchain_ergo_anchor.py +++ b/node/rustchain_ergo_anchor.py @@ -1,579 +1,579 @@ -#!/usr/bin/env python3 -""" -RustChain Ergo Cross-Chain Anchoring -===================================== - -Phase 4 Implementation: -- Periodic anchoring of RustChain state to Ergo blockchain -- Merkle root commitment transactions -- Anchor verification and proof generation - -Provides finality by anchoring RustChain state to Ergo's PoW chain. -""" - -import os -import time -import json -import hashlib -import logging -import threading -import requests -from typing import Dict, List, Optional, Tuple -from dataclasses import dataclass - -from rustchain_crypto import blake2b256_hex, canonical_json, MerkleTree - -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s [ANCHOR] %(levelname)s: %(message)s' -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# CONFIGURATION -# ============================================================================= - -# Ergo node endpoints -ERGO_NODE_URL = os.environ.get("ERGO_NODE_URL", "http://localhost:9053") -ERGO_API_KEY = os.environ.get("ERGO_API_KEY", "") - -# Anchoring parameters -ANCHOR_INTERVAL_BLOCKS = 144 # Anchor every 144 RustChain blocks (~24 hours) -ANCHOR_CONFIRMATION_DEPTH = 6 # Wait for 6 Ergo confirmations - -# RustChain anchor wallet (holds ERG for anchor fees) -ANCHOR_WALLET_ADDRESS = os.environ.get("ANCHOR_WALLET", "") - - -# ============================================================================= -# ANCHOR COMMITMENT -# ============================================================================= - -@dataclass -class AnchorCommitment: - """ - Commitment to be anchored to Ergo. - """ - rustchain_height: int # RustChain block height - rustchain_hash: str # RustChain block hash - state_root: str # State merkle root - attestations_root: str # Attestations merkle root - timestamp: int # Unix timestamp (ms) - commitment_hash: str = "" # Blake2b256 of all fields - - def compute_hash(self) -> str: - """Compute commitment hash""" - data = { - "rc_height": self.rustchain_height, - "rc_hash": self.rustchain_hash, - "state_root": self.state_root, - "attestations_root": self.attestations_root, - "timestamp": self.timestamp - } - return blake2b256_hex(canonical_json(data)) - - def to_dict(self) -> Dict: - """Convert to dictionary""" - if not self.commitment_hash: - self.commitment_hash = self.compute_hash() - return { - "rustchain_height": self.rustchain_height, - "rustchain_hash": self.rustchain_hash, - "state_root": self.state_root, - "attestations_root": self.attestations_root, - "timestamp": self.timestamp, - "commitment_hash": self.commitment_hash - } - - @classmethod - def from_dict(cls, d: Dict) -> "AnchorCommitment": - """Create from dictionary""" - return cls( - rustchain_height=d["rustchain_height"], - rustchain_hash=d["rustchain_hash"], - state_root=d["state_root"], - attestations_root=d["attestations_root"], - timestamp=d["timestamp"], - commitment_hash=d.get("commitment_hash", "") - ) - - -# ============================================================================= -# ERGO CLIENT -# ============================================================================= - -class ErgoClient: - """ - Client for interacting with Ergo node. - """ - - def __init__(self, node_url: str = ERGO_NODE_URL, api_key: str = ERGO_API_KEY): - self.node_url = node_url.rstrip('/') - self.api_key = api_key - self.session = requests.Session() - if api_key: - self.session.headers['api_key'] = api_key - - def _get(self, endpoint: str) -> Optional[Dict]: - """Make GET request to Ergo node""" - try: - resp = self.session.get(f"{self.node_url}{endpoint}", timeout=30) - if resp.status_code == 200: - return resp.json() - else: - logger.error(f"Ergo GET {endpoint} failed: {resp.status_code}") - return None - except Exception as e: - logger.error(f"Ergo GET {endpoint} error: {e}") - return None - - def _post(self, endpoint: str, data: Dict) -> Optional[Dict]: - """Make POST request to Ergo node""" - try: - resp = self.session.post( - f"{self.node_url}{endpoint}", - json=data, - timeout=30 - ) - if resp.status_code in [200, 201]: - return resp.json() - else: - logger.error(f"Ergo POST {endpoint} failed: {resp.status_code} - {resp.text}") - return None - except Exception as e: - logger.error(f"Ergo POST {endpoint} error: {e}") - return None - - def get_info(self) -> Optional[Dict]: - """Get node info""" - return self._get("/info") - - def get_height(self) -> int: - """Get current blockchain height""" - info = self.get_info() - return info.get("fullHeight", 0) if info else 0 - - def get_wallet_addresses(self) -> List[str]: - """Get wallet addresses""" - resp = self._get("/wallet/addresses") - return resp if resp else [] - - def get_wallet_balance(self) -> int: - """Get wallet balance in nanoERG""" - resp = self._get("/wallet/balances") - if resp: - return resp.get("balance", 0) - return 0 - - def create_anchor_transaction( - self, - commitment: AnchorCommitment, - fee_nano: int = 1_000_000 # 0.001 ERG - ) -> Optional[str]: - """ - Create an anchor transaction on Ergo. - - Stores commitment hash in a data output. - - Returns transaction ID if successful. - """ - commitment_bytes = bytes.fromhex(commitment.commitment_hash) - - # Build transaction request - tx_request = { - "requests": [ - { - "address": ANCHOR_WALLET_ADDRESS, # Send back to self - "value": 1_000_000, # 0.001 ERG (minimum box value) - "registers": { - # R4: RustChain height (Long) - "R4": f"05{commitment.rustchain_height:016x}", - # R5: Commitment hash (Coll[Byte]) - "R5": f"0e40{commitment.commitment_hash}", - # R6: Timestamp (Long) - "R6": f"05{commitment.timestamp:016x}" - } - } - ], - "fee": fee_nano, - "inputsRaw": [] - } - - # Generate transaction - resp = self._post("/wallet/transaction/generate", tx_request) - if not resp: - return None - - # Sign transaction - unsigned_tx = resp - signed = self._post("/wallet/transaction/sign", unsigned_tx) - if not signed: - return None - - # Send transaction - result = self._post("/transactions", signed) - if result: - tx_id = result.get("id") - logger.info(f"Anchor TX submitted: {tx_id}") - return tx_id - - return None - - def get_transaction(self, tx_id: str) -> Optional[Dict]: - """Get transaction by ID""" - return self._get(f"/transactions/{tx_id}") - - def get_transaction_confirmations(self, tx_id: str) -> int: - """Get number of confirmations for transaction""" - tx = self.get_transaction(tx_id) - if tx and "numConfirmations" in tx: - return tx["numConfirmations"] - - # Try getting from mempool or unconfirmed - unconfirmed = self._get(f"/transactions/unconfirmed/{tx_id}") - if unconfirmed: - return 0 - - return -1 # Transaction not found - - def verify_anchor(self, tx_id: str, commitment: AnchorCommitment) -> Tuple[bool, str]: - """ - Verify an anchor transaction contains the expected commitment. - - Returns (is_valid, error_message) - """ - tx = self.get_transaction(tx_id) - if not tx: - return False, "Transaction not found" - - # Check outputs for commitment - for output in tx.get("outputs", []): - registers = output.get("additionalRegisters", {}) - - # Check R5 for commitment hash - r5 = registers.get("R5", {}).get("serializedValue", "") - if r5: - # Remove prefix (0e40 = Coll[Byte] with 64 bytes) - if r5.startswith("0e40"): - stored_hash = r5[4:] - if stored_hash == commitment.commitment_hash: - return True, "" - - return False, "Commitment not found in transaction outputs" - - -# ============================================================================= -# ANCHOR SERVICE -# ============================================================================= - -class AnchorService: - """ - Service for managing RustChain -> Ergo anchoring. - """ - - def __init__( - self, - db_path: str, - ergo_client: ErgoClient = None, - interval_blocks: int = ANCHOR_INTERVAL_BLOCKS - ): - self.db_path = db_path - self.ergo = ergo_client or ErgoClient() - self.interval_blocks = interval_blocks - self._running = False - self._thread = None - - def get_last_anchor(self) -> Optional[Dict]: - """Get the last recorded anchor""" - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - # Ensure table exists - cursor.execute(""" - CREATE TABLE IF NOT EXISTS ergo_anchors ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - rustchain_height INTEGER NOT NULL, - rustchain_hash TEXT NOT NULL, - commitment_hash TEXT NOT NULL, - ergo_tx_id TEXT NOT NULL, - ergo_height INTEGER, - confirmations INTEGER DEFAULT 0, - status TEXT DEFAULT 'pending', - created_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - SELECT * FROM ergo_anchors - ORDER BY rustchain_height DESC - LIMIT 1 - """) - - row = cursor.fetchone() - return dict(row) if row else None - - def should_anchor(self, current_height: int) -> bool: - """Check if we should create a new anchor""" - last = self.get_last_anchor() - - if not last: - return current_height >= self.interval_blocks - - blocks_since = current_height - last["rustchain_height"] - return blocks_since >= self.interval_blocks - - def create_commitment(self, block: Dict) -> AnchorCommitment: - """Create an anchor commitment from a RustChain block""" - return AnchorCommitment( - rustchain_height=block["height"], - rustchain_hash=block["block_hash"], - state_root=block.get("state_root", "0" * 64), - attestations_root=block.get("attestations_hash", "0" * 64), - timestamp=int(time.time() * 1000) - ) - - def submit_anchor(self, commitment: AnchorCommitment) -> Optional[str]: - """Submit an anchor to Ergo""" - commitment.commitment_hash = commitment.compute_hash() - - logger.info(f"Submitting anchor for RC height {commitment.rustchain_height}") - logger.info(f"Commitment hash: {commitment.commitment_hash}") - - tx_id = self.ergo.create_anchor_transaction(commitment) - - if tx_id: - self._save_anchor(commitment, tx_id) - return tx_id - - return None - - def _save_anchor(self, commitment: AnchorCommitment, tx_id: str): - """Save anchor record to database""" - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - - cursor.execute(""" - INSERT INTO ergo_anchors - (rustchain_height, rustchain_hash, commitment_hash, - ergo_tx_id, status, created_at) - VALUES (?, ?, ?, ?, 'pending', ?) - """, ( - commitment.rustchain_height, - commitment.rustchain_hash, - commitment.commitment_hash, - tx_id, - int(time.time()) - )) - - def update_anchor_status(self, tx_id: str) -> Tuple[int, str]: - """ - Update anchor status based on Ergo confirmations. - - Returns (confirmations, status) - """ - confirmations = self.ergo.get_transaction_confirmations(tx_id) - - if confirmations < 0: - status = "not_found" - elif confirmations == 0: - status = "pending" - elif confirmations < ANCHOR_CONFIRMATION_DEPTH: - status = "confirming" - else: - status = "confirmed" - - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - cursor.execute(""" - UPDATE ergo_anchors - SET confirmations = ?, status = ? - WHERE ergo_tx_id = ? - """, (confirmations, status, tx_id)) - - return confirmations, status - - def get_anchor_proof(self, rustchain_height: int) -> Optional[Dict]: - """ - Get proof that a RustChain height was anchored to Ergo. - - Returns anchor details including Ergo transaction. - """ - import sqlite3 - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT * FROM ergo_anchors - WHERE rustchain_height <= ? - ORDER BY rustchain_height DESC - LIMIT 1 - """, (rustchain_height,)) - - row = cursor.fetchone() - if not row: - return None - - anchor = dict(row) - - # Get Ergo transaction details - tx = self.ergo.get_transaction(anchor["ergo_tx_id"]) - if tx: - anchor["ergo_transaction"] = tx - - return anchor - - def start(self, check_interval: int = 60): - """Start the anchor monitoring thread""" - if self._running: - return - - self._running = True - self._thread = threading.Thread( - target=self._monitor_loop, - args=(check_interval,), - daemon=True - ) - self._thread.start() - logger.info("Anchor service started") - - def stop(self): - """Stop the anchor monitoring thread""" - self._running = False - if self._thread: - self._thread.join(timeout=5) - logger.info("Anchor service stopped") - - def _monitor_loop(self, interval: int): - """Monitor pending anchors and update status""" - import sqlite3 - - while self._running: - try: - with sqlite3.connect(self.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - # Get pending anchors - cursor.execute(""" - SELECT ergo_tx_id FROM ergo_anchors - WHERE status IN ('pending', 'confirming') - """) - - for row in cursor.fetchall(): - tx_id = row["ergo_tx_id"] - confs, status = self.update_anchor_status(tx_id) - logger.debug(f"Anchor {tx_id[:16]}... = {confs} confirmations ({status})") - - except Exception as e: - logger.error(f"Anchor monitor error: {e}") - - time.sleep(interval) - - -# ============================================================================= -# API ROUTES -# ============================================================================= - -def create_anchor_api_routes(app, anchor_service: AnchorService): - """Create Flask routes for anchor API""" - from flask import request, jsonify - - @app.route('/anchor/status', methods=['GET']) - def anchor_status(): - """Get anchoring service status""" - last = anchor_service.get_last_anchor() - ergo_height = anchor_service.ergo.get_height() - - return jsonify({ - "ergo_connected": ergo_height > 0, - "ergo_height": ergo_height, - "interval_blocks": anchor_service.interval_blocks, - "last_anchor": last - }) - - @app.route('/anchor/proof/', methods=['GET']) - def get_anchor_proof(height: int): - """Get anchor proof for a RustChain height""" - proof = anchor_service.get_anchor_proof(height) - if proof: - return jsonify(proof) - return jsonify({"error": "No anchor found for height"}), 404 - - @app.route('/anchor/list', methods=['GET']) - def list_anchors(): - """List all anchors""" - import sqlite3 - - limit = request.args.get('limit', 50, type=int) - offset = request.args.get('offset', 0, type=int) - - with sqlite3.connect(anchor_service.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute(""" - SELECT * FROM ergo_anchors - ORDER BY rustchain_height DESC - LIMIT ? OFFSET ? - """, (limit, offset)) - - anchors = [dict(row) for row in cursor.fetchall()] - - return jsonify({ - "count": len(anchors), - "anchors": anchors - }) - - -# ============================================================================= -# TESTING -# ============================================================================= - -if __name__ == "__main__": - print("=" * 70) - print("RustChain Ergo Anchoring - Test Suite") - print("=" * 70) - - # Test commitment creation - print("\n=== Commitment Creation ===") - commitment = AnchorCommitment( - rustchain_height=1000, - rustchain_hash="abc123" + "0" * 58, - state_root="def456" + "0" * 58, - attestations_root="789ghi" + "0" * 58, - timestamp=int(time.time() * 1000) - ) - - print(f"RC Height: {commitment.rustchain_height}") - print(f"RC Hash: {commitment.rustchain_hash[:16]}...") - print(f"Commitment Hash: {commitment.compute_hash()}") - - # Test serialization - print("\n=== Serialization ===") - d = commitment.to_dict() - print(f"Dict keys: {list(d.keys())}") - - restored = AnchorCommitment.from_dict(d) - print(f"Restored hash matches: {restored.compute_hash() == commitment.compute_hash()}") - - # Test Ergo client (if node available) - print("\n=== Ergo Client ===") - client = ErgoClient() - info = client.get_info() - - if info: - print(f"Connected to Ergo node") - print(f"Height: {info.get('fullHeight', 'N/A')}") - print(f"Network: {info.get('network', 'N/A')}") - else: - print("Could not connect to Ergo node (this is expected in testing)") - - print("\n" + "=" * 70) - print("Tests complete!") - print("=" * 70) +#!/usr/bin/env python3 +""" +RustChain Ergo Cross-Chain Anchoring +===================================== + +Phase 4 Implementation: +- Periodic anchoring of RustChain state to Ergo blockchain +- Merkle root commitment transactions +- Anchor verification and proof generation + +Provides finality by anchoring RustChain state to Ergo's PoW chain. +""" + +import os +import time +import json +import hashlib +import logging +import threading +import requests +from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass + +from rustchain_crypto import blake2b256_hex, canonical_json, MerkleTree + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [ANCHOR] %(levelname)s: %(message)s' +) +logger = logging.getLogger(__name__) + + +# ============================================================================= +# CONFIGURATION +# ============================================================================= + +# Ergo node endpoints +ERGO_NODE_URL = os.environ.get("ERGO_NODE_URL", "http://localhost:9053") +ERGO_API_KEY = os.environ.get("ERGO_API_KEY", "") + +# Anchoring parameters +ANCHOR_INTERVAL_BLOCKS = 144 # Anchor every 144 RustChain blocks (~24 hours) +ANCHOR_CONFIRMATION_DEPTH = 6 # Wait for 6 Ergo confirmations + +# RustChain anchor wallet (holds ERG for anchor fees) +ANCHOR_WALLET_ADDRESS = os.environ.get("ANCHOR_WALLET", "") + + +# ============================================================================= +# ANCHOR COMMITMENT +# ============================================================================= + +@dataclass +class AnchorCommitment: + """ + Commitment to be anchored to Ergo. + """ + rustchain_height: int # RustChain block height + rustchain_hash: str # RustChain block hash + state_root: str # State merkle root + attestations_root: str # Attestations merkle root + timestamp: int # Unix timestamp (ms) + commitment_hash: str = "" # Blake2b256 of all fields + + def compute_hash(self) -> str: + """Compute commitment hash""" + data = { + "rc_height": self.rustchain_height, + "rc_hash": self.rustchain_hash, + "state_root": self.state_root, + "attestations_root": self.attestations_root, + "timestamp": self.timestamp + } + return blake2b256_hex(canonical_json(data)) + + def to_dict(self) -> Dict: + """Convert to dictionary""" + if not self.commitment_hash: + self.commitment_hash = self.compute_hash() + return { + "rustchain_height": self.rustchain_height, + "rustchain_hash": self.rustchain_hash, + "state_root": self.state_root, + "attestations_root": self.attestations_root, + "timestamp": self.timestamp, + "commitment_hash": self.commitment_hash + } + + @classmethod + def from_dict(cls, d: Dict) -> "AnchorCommitment": + """Create from dictionary""" + return cls( + rustchain_height=d["rustchain_height"], + rustchain_hash=d["rustchain_hash"], + state_root=d["state_root"], + attestations_root=d["attestations_root"], + timestamp=d["timestamp"], + commitment_hash=d.get("commitment_hash", "") + ) + + +# ============================================================================= +# ERGO CLIENT +# ============================================================================= + +class ErgoClient: + """ + Client for interacting with Ergo node. + """ + + def __init__(self, node_url: str = ERGO_NODE_URL, api_key: str = ERGO_API_KEY): + self.node_url = node_url.rstrip('/') + self.api_key = api_key + self.session = requests.Session() + if api_key: + self.session.headers['api_key'] = api_key + + def _get(self, endpoint: str) -> Optional[Dict]: + """Make GET request to Ergo node""" + try: + resp = self.session.get(f"{self.node_url}{endpoint}", timeout=30) + if resp.status_code == 200: + return resp.json() + else: + logger.error(f"Ergo GET {endpoint} failed: {resp.status_code}") + return None + except Exception as e: + logger.error(f"Ergo GET {endpoint} error: {e}") + return None + + def _post(self, endpoint: str, data: Dict) -> Optional[Dict]: + """Make POST request to Ergo node""" + try: + resp = self.session.post( + f"{self.node_url}{endpoint}", + json=data, + timeout=30 + ) + if resp.status_code in [200, 201]: + return resp.json() + else: + logger.error(f"Ergo POST {endpoint} failed: {resp.status_code} - {resp.text}") + return None + except Exception as e: + logger.error(f"Ergo POST {endpoint} error: {e}") + return None + + def get_info(self) -> Optional[Dict]: + """Get node info""" + return self._get("/info") + + def get_height(self) -> int: + """Get current blockchain height""" + info = self.get_info() + return info.get("fullHeight", 0) if info else 0 + + def get_wallet_addresses(self) -> List[str]: + """Get wallet addresses""" + resp = self._get("/wallet/addresses") + return resp if resp else [] + + def get_wallet_balance(self) -> int: + """Get wallet balance in nanoERG""" + resp = self._get("/wallet/balances") + if resp: + return resp.get("balance", 0) + return 0 + + def create_anchor_transaction( + self, + commitment: AnchorCommitment, + fee_nano: int = 1_000_000 # 0.001 ERG + ) -> Optional[str]: + """ + Create an anchor transaction on Ergo. + + Stores commitment hash in a data output. + + Returns transaction ID if successful. + """ + commitment_bytes = bytes.fromhex(commitment.commitment_hash) + + # Build transaction request + tx_request = { + "requests": [ + { + "address": ANCHOR_WALLET_ADDRESS, # Send back to self + "value": 1_000_000, # 0.001 ERG (minimum box value) + "registers": { + # R4: RustChain height (Long) + "R4": f"05{commitment.rustchain_height:016x}", + # R5: Commitment hash (Coll[Byte]) + "R5": f"0e40{commitment.commitment_hash}", + # R6: Timestamp (Long) + "R6": f"05{commitment.timestamp:016x}" + } + } + ], + "fee": fee_nano, + "inputsRaw": [] + } + + # Generate transaction + resp = self._post("/wallet/transaction/generate", tx_request) + if not resp: + return None + + # Sign transaction + unsigned_tx = resp + signed = self._post("/wallet/transaction/sign", unsigned_tx) + if not signed: + return None + + # Send transaction + result = self._post("/transactions", signed) + if result: + tx_id = result.get("id") + logger.info(f"Anchor TX submitted: {tx_id}") + return tx_id + + return None + + def get_transaction(self, tx_id: str) -> Optional[Dict]: + """Get transaction by ID""" + return self._get(f"/transactions/{tx_id}") + + def get_transaction_confirmations(self, tx_id: str) -> int: + """Get number of confirmations for transaction""" + tx = self.get_transaction(tx_id) + if tx and "numConfirmations" in tx: + return tx["numConfirmations"] + + # Try getting from mempool or unconfirmed + unconfirmed = self._get(f"/transactions/unconfirmed/{tx_id}") + if unconfirmed: + return 0 + + return -1 # Transaction not found + + def verify_anchor(self, tx_id: str, commitment: AnchorCommitment) -> Tuple[bool, str]: + """ + Verify an anchor transaction contains the expected commitment. + + Returns (is_valid, error_message) + """ + tx = self.get_transaction(tx_id) + if not tx: + return False, "Transaction not found" + + # Check outputs for commitment + for output in tx.get("outputs", []): + registers = output.get("additionalRegisters", {}) + + # Check R5 for commitment hash + r5 = registers.get("R5", {}).get("serializedValue", "") + if r5: + # Remove prefix (0e40 = Coll[Byte] with 64 bytes) + if r5.startswith("0e40"): + stored_hash = r5[4:] + if stored_hash == commitment.commitment_hash: + return True, "" + + return False, "Commitment not found in transaction outputs" + + +# ============================================================================= +# ANCHOR SERVICE +# ============================================================================= + +class AnchorService: + """ + Service for managing RustChain -> Ergo anchoring. + """ + + def __init__( + self, + db_path: str, + ergo_client: ErgoClient = None, + interval_blocks: int = ANCHOR_INTERVAL_BLOCKS + ): + self.db_path = db_path + self.ergo = ergo_client or ErgoClient() + self.interval_blocks = interval_blocks + self._running = False + self._thread = None + + def get_last_anchor(self) -> Optional[Dict]: + """Get the last recorded anchor""" + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + # Ensure table exists + cursor.execute(""" + CREATE TABLE IF NOT EXISTS ergo_anchors ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + rustchain_height INTEGER NOT NULL, + rustchain_hash TEXT NOT NULL, + commitment_hash TEXT NOT NULL, + ergo_tx_id TEXT NOT NULL, + ergo_height INTEGER, + confirmations INTEGER DEFAULT 0, + status TEXT DEFAULT 'pending', + created_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + SELECT * FROM ergo_anchors + ORDER BY rustchain_height DESC + LIMIT 1 + """) + + row = cursor.fetchone() + return dict(row) if row else None + + def should_anchor(self, current_height: int) -> bool: + """Check if we should create a new anchor""" + last = self.get_last_anchor() + + if not last: + return current_height >= self.interval_blocks + + blocks_since = current_height - last["rustchain_height"] + return blocks_since >= self.interval_blocks + + def create_commitment(self, block: Dict) -> AnchorCommitment: + """Create an anchor commitment from a RustChain block""" + return AnchorCommitment( + rustchain_height=block["height"], + rustchain_hash=block["block_hash"], + state_root=block.get("state_root", "0" * 64), + attestations_root=block.get("attestations_hash", "0" * 64), + timestamp=int(time.time() * 1000) + ) + + def submit_anchor(self, commitment: AnchorCommitment) -> Optional[str]: + """Submit an anchor to Ergo""" + commitment.commitment_hash = commitment.compute_hash() + + logger.info(f"Submitting anchor for RC height {commitment.rustchain_height}") + logger.info(f"Commitment hash: {commitment.commitment_hash}") + + tx_id = self.ergo.create_anchor_transaction(commitment) + + if tx_id: + self._save_anchor(commitment, tx_id) + return tx_id + + return None + + def _save_anchor(self, commitment: AnchorCommitment, tx_id: str): + """Save anchor record to database""" + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + + cursor.execute(""" + INSERT INTO ergo_anchors + (rustchain_height, rustchain_hash, commitment_hash, + ergo_tx_id, status, created_at) + VALUES (?, ?, ?, ?, 'pending', ?) + """, ( + commitment.rustchain_height, + commitment.rustchain_hash, + commitment.commitment_hash, + tx_id, + int(time.time()) + )) + + def update_anchor_status(self, tx_id: str) -> Tuple[int, str]: + """ + Update anchor status based on Ergo confirmations. + + Returns (confirmations, status) + """ + confirmations = self.ergo.get_transaction_confirmations(tx_id) + + if confirmations < 0: + status = "not_found" + elif confirmations == 0: + status = "pending" + elif confirmations < ANCHOR_CONFIRMATION_DEPTH: + status = "confirming" + else: + status = "confirmed" + + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + cursor.execute(""" + UPDATE ergo_anchors + SET confirmations = ?, status = ? + WHERE ergo_tx_id = ? + """, (confirmations, status, tx_id)) + + return confirmations, status + + def get_anchor_proof(self, rustchain_height: int) -> Optional[Dict]: + """ + Get proof that a RustChain height was anchored to Ergo. + + Returns anchor details including Ergo transaction. + """ + import sqlite3 + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT * FROM ergo_anchors + WHERE rustchain_height <= ? + ORDER BY rustchain_height DESC + LIMIT 1 + """, (rustchain_height,)) + + row = cursor.fetchone() + if not row: + return None + + anchor = dict(row) + + # Get Ergo transaction details + tx = self.ergo.get_transaction(anchor["ergo_tx_id"]) + if tx: + anchor["ergo_transaction"] = tx + + return anchor + + def start(self, check_interval: int = 60): + """Start the anchor monitoring thread""" + if self._running: + return + + self._running = True + self._thread = threading.Thread( + target=self._monitor_loop, + args=(check_interval,), + daemon=True + ) + self._thread.start() + logger.info("Anchor service started") + + def stop(self): + """Stop the anchor monitoring thread""" + self._running = False + if self._thread: + self._thread.join(timeout=5) + logger.info("Anchor service stopped") + + def _monitor_loop(self, interval: int): + """Monitor pending anchors and update status""" + import sqlite3 + + while self._running: + try: + with sqlite3.connect(self.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + # Get pending anchors + cursor.execute(""" + SELECT ergo_tx_id FROM ergo_anchors + WHERE status IN ('pending', 'confirming') + """) + + for row in cursor.fetchall(): + tx_id = row["ergo_tx_id"] + confs, status = self.update_anchor_status(tx_id) + logger.debug(f"Anchor {tx_id[:16]}... = {confs} confirmations ({status})") + + except Exception as e: + logger.error(f"Anchor monitor error: {e}") + + time.sleep(interval) + + +# ============================================================================= +# API ROUTES +# ============================================================================= + +def create_anchor_api_routes(app, anchor_service: AnchorService): + """Create Flask routes for anchor API""" + from flask import request, jsonify + + @app.route('/anchor/status', methods=['GET']) + def anchor_status(): + """Get anchoring service status""" + last = anchor_service.get_last_anchor() + ergo_height = anchor_service.ergo.get_height() + + return jsonify({ + "ergo_connected": ergo_height > 0, + "ergo_height": ergo_height, + "interval_blocks": anchor_service.interval_blocks, + "last_anchor": last + }) + + @app.route('/anchor/proof/', methods=['GET']) + def get_anchor_proof(height: int): + """Get anchor proof for a RustChain height""" + proof = anchor_service.get_anchor_proof(height) + if proof: + return jsonify(proof) + return jsonify({"error": "No anchor found for height"}), 404 + + @app.route('/anchor/list', methods=['GET']) + def list_anchors(): + """List all anchors""" + import sqlite3 + + limit = request.args.get('limit', 50, type=int) + offset = request.args.get('offset', 0, type=int) + + with sqlite3.connect(anchor_service.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute(""" + SELECT * FROM ergo_anchors + ORDER BY rustchain_height DESC + LIMIT ? OFFSET ? + """, (limit, offset)) + + anchors = [dict(row) for row in cursor.fetchall()] + + return jsonify({ + "count": len(anchors), + "anchors": anchors + }) + + +# ============================================================================= +# TESTING +# ============================================================================= + +if __name__ == "__main__": + print("=" * 70) + print("RustChain Ergo Anchoring - Test Suite") + print("=" * 70) + + # Test commitment creation + print("\n=== Commitment Creation ===") + commitment = AnchorCommitment( + rustchain_height=1000, + rustchain_hash="abc123" + "0" * 58, + state_root="def456" + "0" * 58, + attestations_root="789ghi" + "0" * 58, + timestamp=int(time.time() * 1000) + ) + + print(f"RC Height: {commitment.rustchain_height}") + print(f"RC Hash: {commitment.rustchain_hash[:16]}...") + print(f"Commitment Hash: {commitment.compute_hash()}") + + # Test serialization + print("\n=== Serialization ===") + d = commitment.to_dict() + print(f"Dict keys: {list(d.keys())}") + + restored = AnchorCommitment.from_dict(d) + print(f"Restored hash matches: {restored.compute_hash() == commitment.compute_hash()}") + + # Test Ergo client (if node available) + print("\n=== Ergo Client ===") + client = ErgoClient() + info = client.get_info() + + if info: + print(f"Connected to Ergo node") + print(f"Height: {info.get('fullHeight', 'N/A')}") + print(f"Network: {info.get('network', 'N/A')}") + else: + print("Could not connect to Ergo node (this is expected in testing)") + + print("\n" + "=" * 70) + print("Tests complete!") + print("=" * 70) diff --git a/node/rustchain_hardware_database.py b/node/rustchain_hardware_database.py index db647b12..a8d2a4a1 100644 --- a/node/rustchain_hardware_database.py +++ b/node/rustchain_hardware_database.py @@ -1,940 +1,940 @@ -#!/usr/bin/env python3 -""" -RustChain Proof of Antiquity - Hardware Database -================================================ -Comprehensive database of vintage and rare hardware for PoA multiplier calculation. -Includes CPUID values, PVR codes, chipset IDs, and rarity bonuses. - -Reference databases used: -- Intel/AMD CPUID documentation -- IBM PowerPC Processor Version Register (PVR) values -- Amiga Hardware Reference Manual -- PCI ID Repository (pci-ids.ucw.cz) -- USB ID Repository -""" - -from dataclasses import dataclass -from typing import Optional, Dict, List, Tuple -import re - -@dataclass -class HardwareEntry: - """Single hardware entry in the database""" - id: str # Unique identifier (CPUID, PVR, chipset ID) - name: str # Human-readable name - family: str # Hardware family (x86, powerpc, m68k, etc.) - year: int # Release year (approximate) - base_multiplier: float # Base PoA multiplier - rarity_bonus: float # Additional bonus for rare hardware (0.0 - 1.0) - tier: str # MYTHIC, LEGENDARY, ANCIENT, VINTAGE, STANDARD, PENALTY - notes: str = "" # Additional notes - -# ============================================================================= -# x86 PROCESSOR DATABASE (by CPUID Family/Model/Stepping) -# Format: "family_model" or "family_model_stepping" -# ============================================================================= - -X86_CPUID_DATABASE: Dict[str, HardwareEntry] = { - # ============ MYTHIC TIER (4.0x) - Pre-486 ============ - # Intel 8086/8088 (1978-1979) - "8086": HardwareEntry("8086", "Intel 8086", "x86", 1978, 4.0, 0.5, "MYTHIC", "Original x86"), - "8088": HardwareEntry("8088", "Intel 8088", "x86", 1979, 4.0, 0.5, "MYTHIC", "IBM PC original"), - - # Intel 80186/80188 (1982) - "80186": HardwareEntry("80186", "Intel 80186", "x86", 1982, 4.0, 0.6, "MYTHIC", "Embedded variant"), - "80188": HardwareEntry("80188", "Intel 80188", "x86", 1982, 4.0, 0.6, "MYTHIC", "Embedded 8-bit bus"), - - # Intel 80286 (1982) - "2_0": HardwareEntry("2_0", "Intel 80286", "x86", 1982, 4.0, 0.4, "MYTHIC", "Protected mode"), - "286": HardwareEntry("286", "Intel 80286", "x86", 1982, 4.0, 0.4, "MYTHIC"), - - # Intel 80386 (1985) - "3_0": HardwareEntry("3_0", "Intel 80386DX", "x86", 1985, 4.0, 0.3, "MYTHIC", "32-bit x86"), - "3_2": HardwareEntry("3_2", "Intel 80386SX", "x86", 1988, 4.0, 0.25, "MYTHIC", "16-bit bus"), - "3_4": HardwareEntry("3_4", "Intel 80386SL", "x86", 1990, 4.0, 0.35, "MYTHIC", "Low power"), - "386": HardwareEntry("386", "Intel 80386", "x86", 1985, 4.0, 0.3, "MYTHIC"), - - # AMD Am386 variants - "amd_386": HardwareEntry("amd_386", "AMD Am386", "x86", 1991, 4.0, 0.35, "MYTHIC", "AMD clone"), - - # Cyrix 386 variants - "cyrix_386": HardwareEntry("cyrix_386", "Cyrix Cx486SLC", "x86", 1992, 4.0, 0.4, "MYTHIC", "386 pin-compatible"), - - # ============ LEGENDARY-HIGH TIER (3.8x) - 486 ============ - # Intel 486 (1989) - "4_0": HardwareEntry("4_0", "Intel 486DX", "x86", 1989, 3.8, 0.2, "LEGENDARY", "Integrated FPU"), - "4_1": HardwareEntry("4_1", "Intel 486DX-50", "x86", 1990, 3.8, 0.25, "LEGENDARY", "50MHz variant"), - "4_2": HardwareEntry("4_2", "Intel 486SX", "x86", 1991, 3.8, 0.15, "LEGENDARY", "No FPU"), - "4_3": HardwareEntry("4_3", "Intel 486DX2", "x86", 1992, 3.8, 0.2, "LEGENDARY", "Clock doubled"), - "4_4": HardwareEntry("4_4", "Intel 486SL", "x86", 1992, 3.8, 0.3, "LEGENDARY", "Mobile/low power"), - "4_5": HardwareEntry("4_5", "Intel 486SX2", "x86", 1994, 3.8, 0.2, "LEGENDARY"), - "4_7": HardwareEntry("4_7", "Intel 486DX2-WB", "x86", 1994, 3.8, 0.2, "LEGENDARY", "Write-back cache"), - "4_8": HardwareEntry("4_8", "Intel 486DX4", "x86", 1994, 3.8, 0.2, "LEGENDARY", "Clock tripled"), - "4_9": HardwareEntry("4_9", "Intel 486DX4-WB", "x86", 1994, 3.8, 0.2, "LEGENDARY"), - "486": HardwareEntry("486", "Intel 486", "x86", 1989, 3.8, 0.2, "LEGENDARY"), - - # AMD 486 variants (often higher clocks) - "amd_4_3": HardwareEntry("amd_4_3", "AMD Am486DX2", "x86", 1993, 3.8, 0.25, "LEGENDARY"), - "amd_4_7": HardwareEntry("amd_4_7", "AMD Am486DX4", "x86", 1994, 3.8, 0.25, "LEGENDARY"), - "amd_4_8": HardwareEntry("amd_4_8", "AMD Am5x86", "x86", 1995, 3.8, 0.3, "LEGENDARY", "486 socket, P75 perf"), - "am5x86": HardwareEntry("am5x86", "AMD Am5x86", "x86", 1995, 3.8, 0.3, "LEGENDARY"), - - # Cyrix 486 variants - "cyrix_4_4": HardwareEntry("cyrix_4_4", "Cyrix Cx486DX2", "x86", 1993, 3.8, 0.35, "LEGENDARY", "Rare"), - "cyrix_4_9": HardwareEntry("cyrix_4_9", "Cyrix Cx5x86", "x86", 1995, 3.8, 0.4, "LEGENDARY", "Rare Cyrix"), - - # ============ LEGENDARY TIER (3.5x) - Pentium 1 ============ - # Intel Pentium (P5) (1993) - "5_1": HardwareEntry("5_1", "Intel Pentium 60/66", "x86", 1993, 3.5, 0.2, "LEGENDARY", "First Pentium"), - "5_2": HardwareEntry("5_2", "Intel Pentium 75-200", "x86", 1994, 3.5, 0.15, "LEGENDARY", "P54C"), - "5_3": HardwareEntry("5_3", "Intel Pentium OverDrive", "x86", 1995, 3.5, 0.3, "LEGENDARY", "Upgrade chip"), - "5_4": HardwareEntry("5_4", "Intel Pentium MMX", "x86", 1997, 3.5, 0.1, "LEGENDARY", "P55C with MMX"), - "5_7": HardwareEntry("5_7", "Intel Pentium MMX Mobile", "x86", 1997, 3.5, 0.2, "LEGENDARY"), - "5_8": HardwareEntry("5_8", "Intel Pentium MMX Mobile", "x86", 1998, 3.5, 0.2, "LEGENDARY"), - "pentium": HardwareEntry("pentium", "Intel Pentium", "x86", 1993, 3.5, 0.15, "LEGENDARY"), - "p5": HardwareEntry("p5", "Intel Pentium P5", "x86", 1993, 3.5, 0.15, "LEGENDARY"), - "p54c": HardwareEntry("p54c", "Intel Pentium P54C", "x86", 1994, 3.5, 0.15, "LEGENDARY"), - "p55c": HardwareEntry("p55c", "Intel Pentium MMX P55C", "x86", 1997, 3.5, 0.1, "LEGENDARY"), - - # AMD K5 (1996) - Pentium competitor - "amd_5_0": HardwareEntry("amd_5_0", "AMD K5 PR75-PR100", "x86", 1996, 3.5, 0.3, "LEGENDARY", "AMD's first x86"), - "amd_5_1": HardwareEntry("amd_5_1", "AMD K5 PR120-PR133", "x86", 1996, 3.5, 0.3, "LEGENDARY"), - "amd_5_2": HardwareEntry("amd_5_2", "AMD K5 PR150-PR200", "x86", 1996, 3.5, 0.3, "LEGENDARY"), - "k5": HardwareEntry("k5", "AMD K5", "x86", 1996, 3.5, 0.3, "LEGENDARY"), - - # Cyrix 6x86 (1996) - Pentium competitor (actually family 5 compatible) - "cyrix_5_2": HardwareEntry("cyrix_5_2", "Cyrix 6x86", "x86", 1996, 3.5, 0.4, "LEGENDARY", "Rare Cyrix"), - "cyrix_5_4": HardwareEntry("cyrix_5_4", "Cyrix 6x86MX", "x86", 1997, 3.5, 0.4, "LEGENDARY", "Rare"), - "6x86": HardwareEntry("6x86", "Cyrix 6x86", "x86", 1996, 3.5, 0.4, "LEGENDARY"), - - # IDT/Centaur WinChip (1997) - "idt_5_4": HardwareEntry("idt_5_4", "IDT WinChip C6", "x86", 1997, 3.5, 0.5, "LEGENDARY", "Very rare"), - "idt_5_8": HardwareEntry("idt_5_8", "IDT WinChip 2", "x86", 1998, 3.5, 0.5, "LEGENDARY", "Very rare"), - "winchip": HardwareEntry("winchip", "IDT WinChip", "x86", 1997, 3.5, 0.5, "LEGENDARY"), - - # NexGen Nx586 (1994) - Very rare - "nexgen_5": HardwareEntry("nexgen_5", "NexGen Nx586", "x86", 1994, 3.5, 0.7, "LEGENDARY", "Extremely rare"), - "nx586": HardwareEntry("nx586", "NexGen Nx586", "x86", 1994, 3.5, 0.7, "LEGENDARY"), - - # ============ LEGENDARY-LOW TIER (3.2x) - Pentium II / Celeron ============ - # Intel Pentium Pro (1995) - Actually family 6 - "6_1": HardwareEntry("6_1", "Intel Pentium Pro", "x86", 1995, 3.2, 0.2, "LEGENDARY", "P6 architecture"), - "ppro": HardwareEntry("ppro", "Intel Pentium Pro", "x86", 1995, 3.2, 0.2, "LEGENDARY"), - - # Intel Pentium II (1997) - "6_3": HardwareEntry("6_3", "Intel Pentium II Klamath", "x86", 1997, 3.2, 0.15, "LEGENDARY", "Slot 1"), - "6_5": HardwareEntry("6_5", "Intel Pentium II Deschutes", "x86", 1998, 3.2, 0.1, "LEGENDARY"), - "pii": HardwareEntry("pii", "Intel Pentium II", "x86", 1997, 3.2, 0.15, "LEGENDARY"), - "p2": HardwareEntry("p2", "Intel Pentium II", "x86", 1997, 3.2, 0.15, "LEGENDARY"), - "klamath": HardwareEntry("klamath", "Intel Pentium II Klamath", "x86", 1997, 3.2, 0.15, "LEGENDARY"), - - # Intel Celeron (1998) - "6_6": HardwareEntry("6_6", "Intel Celeron Mendocino", "x86", 1998, 3.2, 0.1, "LEGENDARY"), - "celeron_slot1": HardwareEntry("celeron_slot1", "Intel Celeron (Slot 1)", "x86", 1998, 3.2, 0.15, "LEGENDARY"), - "mendocino": HardwareEntry("mendocino", "Intel Celeron Mendocino", "x86", 1998, 3.2, 0.1, "LEGENDARY"), - - # AMD K6 (1997) - "amd_6_6": HardwareEntry("amd_6_6", "AMD K6", "x86", 1997, 3.2, 0.2, "LEGENDARY"), - "amd_6_8": HardwareEntry("amd_6_8", "AMD K6-2", "x86", 1998, 3.2, 0.15, "LEGENDARY", "3DNow!"), - "amd_6_9": HardwareEntry("amd_6_9", "AMD K6-III", "x86", 1999, 3.2, 0.2, "LEGENDARY", "Triple cache"), - "k6": HardwareEntry("k6", "AMD K6", "x86", 1997, 3.2, 0.2, "LEGENDARY"), - "k6-2": HardwareEntry("k6-2", "AMD K6-2", "x86", 1998, 3.2, 0.15, "LEGENDARY"), - "k6-3": HardwareEntry("k6-3", "AMD K6-III", "x86", 1999, 3.2, 0.2, "LEGENDARY"), - - # ============ LEGENDARY-LOW TIER (3.0x) - Pentium III / Athlon ============ - # Intel Pentium III (1999) - "6_7": HardwareEntry("6_7", "Intel Pentium III Katmai", "x86", 1999, 3.0, 0.1, "LEGENDARY", "SSE"), - "6_8": HardwareEntry("6_8", "Intel Pentium III Coppermine", "x86", 1999, 3.0, 0.05, "LEGENDARY"), - "6_10": HardwareEntry("6_10", "Intel Pentium III Coppermine-T", "x86", 2000, 3.0, 0.05, "LEGENDARY"), - "6_11": HardwareEntry("6_11", "Intel Pentium III Tualatin", "x86", 2001, 3.0, 0.1, "LEGENDARY"), - "piii": HardwareEntry("piii", "Intel Pentium III", "x86", 1999, 3.0, 0.1, "LEGENDARY"), - "p3": HardwareEntry("p3", "Intel Pentium III", "x86", 1999, 3.0, 0.1, "LEGENDARY"), - "katmai": HardwareEntry("katmai", "Intel Pentium III Katmai", "x86", 1999, 3.0, 0.1, "LEGENDARY"), - "coppermine": HardwareEntry("coppermine", "Intel Pentium III Coppermine", "x86", 1999, 3.0, 0.05, "LEGENDARY"), - "tualatin": HardwareEntry("tualatin", "Intel Pentium III Tualatin", "x86", 2001, 3.0, 0.1, "LEGENDARY"), - - # AMD Athlon (1999) - "amd_6_1": HardwareEntry("amd_6_1", "AMD Athlon (K7)", "x86", 1999, 3.0, 0.1, "LEGENDARY", "Slot A"), - "amd_6_2": HardwareEntry("amd_6_2", "AMD Athlon (K75)", "x86", 1999, 3.0, 0.1, "LEGENDARY"), - "amd_6_4": HardwareEntry("amd_6_4", "AMD Athlon Thunderbird", "x86", 2000, 3.0, 0.05, "LEGENDARY"), - "amd_6_6_xp": HardwareEntry("amd_6_6_xp", "AMD Athlon XP Palomino", "x86", 2001, 3.0, 0.05, "LEGENDARY"), - "amd_6_8_xp": HardwareEntry("amd_6_8_xp", "AMD Athlon XP Thoroughbred", "x86", 2002, 3.0, 0.05, "LEGENDARY"), - "amd_6_10_xp": HardwareEntry("amd_6_10_xp", "AMD Athlon XP Barton", "x86", 2003, 3.0, 0.1, "LEGENDARY", "512K L2"), - "athlon": HardwareEntry("athlon", "AMD Athlon", "x86", 1999, 3.0, 0.1, "LEGENDARY"), - "athlon_xp": HardwareEntry("athlon_xp", "AMD Athlon XP", "x86", 2001, 3.0, 0.05, "LEGENDARY"), - "thunderbird": HardwareEntry("thunderbird", "AMD Athlon Thunderbird", "x86", 2000, 3.0, 0.05, "LEGENDARY"), - "barton": HardwareEntry("barton", "AMD Athlon XP Barton", "x86", 2003, 3.0, 0.1, "LEGENDARY"), - - # VIA C3 (2001) - Rare - "via_6_7": HardwareEntry("via_6_7", "VIA C3 Samuel", "x86", 2001, 3.0, 0.4, "LEGENDARY", "Rare VIA"), - "via_6_8": HardwareEntry("via_6_8", "VIA C3 Ezra", "x86", 2001, 3.0, 0.4, "LEGENDARY", "Rare"), - "via_6_9": HardwareEntry("via_6_9", "VIA C3 Nehemiah", "x86", 2003, 3.0, 0.4, "LEGENDARY", "Rare"), - "c3": HardwareEntry("c3", "VIA C3", "x86", 2001, 3.0, 0.4, "LEGENDARY"), - - # Transmeta Crusoe (2000) - Very rare - "transmeta_5_4": HardwareEntry("transmeta_5_4", "Transmeta Crusoe TM5400", "x86", 2000, 3.0, 0.6, "LEGENDARY", "Code morphing"), - "transmeta_5_5": HardwareEntry("transmeta_5_5", "Transmeta Crusoe TM5600", "x86", 2000, 3.0, 0.6, "LEGENDARY"), - "transmeta_15": HardwareEntry("transmeta_15", "Transmeta Efficeon", "x86", 2003, 3.0, 0.6, "LEGENDARY"), - "crusoe": HardwareEntry("crusoe", "Transmeta Crusoe", "x86", 2000, 3.0, 0.6, "LEGENDARY"), - "efficeon": HardwareEntry("efficeon", "Transmeta Efficeon", "x86", 2003, 3.0, 0.6, "LEGENDARY"), - - # ============ ANCIENT TIER (2.5x) - Pentium 4 / Athlon 64 ============ - # Intel Pentium 4 (2000) - "15_0": HardwareEntry("15_0", "Intel Pentium 4 Willamette", "x86", 2000, 2.5, 0.1, "ANCIENT", "NetBurst"), - "15_1": HardwareEntry("15_1", "Intel Pentium 4 Willamette-2", "x86", 2001, 2.5, 0.1, "ANCIENT"), - "15_2": HardwareEntry("15_2", "Intel Pentium 4 Northwood", "x86", 2002, 2.5, 0.05, "ANCIENT", "130nm"), - "15_3": HardwareEntry("15_3", "Intel Pentium 4 Prescott", "x86", 2004, 2.5, 0.05, "ANCIENT", "90nm"), - "15_4": HardwareEntry("15_4", "Intel Pentium 4 Prescott-2M", "x86", 2005, 2.5, 0.05, "ANCIENT"), - "15_6": HardwareEntry("15_6", "Intel Pentium D", "x86", 2005, 2.5, 0.1, "ANCIENT", "Dual Prescott"), - "p4": HardwareEntry("p4", "Intel Pentium 4", "x86", 2000, 2.5, 0.05, "ANCIENT"), - "pentium4": HardwareEntry("pentium4", "Intel Pentium 4", "x86", 2000, 2.5, 0.05, "ANCIENT"), - "willamette": HardwareEntry("willamette", "Intel Pentium 4 Willamette", "x86", 2000, 2.5, 0.1, "ANCIENT"), - "northwood": HardwareEntry("northwood", "Intel Pentium 4 Northwood", "x86", 2002, 2.5, 0.05, "ANCIENT"), - "prescott": HardwareEntry("prescott", "Intel Pentium 4 Prescott", "x86", 2004, 2.5, 0.05, "ANCIENT"), - - # Intel Pentium M (2003) - "6_9": HardwareEntry("6_9", "Intel Pentium M Banias", "x86", 2003, 2.5, 0.15, "ANCIENT", "Mobile P6"), - "6_13": HardwareEntry("6_13", "Intel Pentium M Dothan", "x86", 2004, 2.5, 0.1, "ANCIENT"), - "pentium_m": HardwareEntry("pentium_m", "Intel Pentium M", "x86", 2003, 2.5, 0.1, "ANCIENT"), - "banias": HardwareEntry("banias", "Intel Pentium M Banias", "x86", 2003, 2.5, 0.15, "ANCIENT"), - "dothan": HardwareEntry("dothan", "Intel Pentium M Dothan", "x86", 2004, 2.5, 0.1, "ANCIENT"), - - # AMD Athlon 64 (2003) - "amd_15_4": HardwareEntry("amd_15_4", "AMD Athlon 64 Clawhammer", "x86", 2003, 2.5, 0.1, "ANCIENT", "x86-64"), - "amd_15_5": HardwareEntry("amd_15_5", "AMD Opteron", "x86", 2003, 2.5, 0.15, "ANCIENT", "Server"), - "amd_15_7": HardwareEntry("amd_15_7", "AMD Athlon 64 San Diego", "x86", 2005, 2.5, 0.1, "ANCIENT"), - "amd_15_11": HardwareEntry("amd_15_11", "AMD Athlon 64 Orleans", "x86", 2006, 2.5, 0.05, "ANCIENT"), - "amd_15_35": HardwareEntry("amd_15_35", "AMD Athlon 64 X2", "x86", 2005, 2.5, 0.1, "ANCIENT", "Dual core"), - "athlon64": HardwareEntry("athlon64", "AMD Athlon 64", "x86", 2003, 2.5, 0.1, "ANCIENT"), - "athlon64_x2": HardwareEntry("athlon64_x2", "AMD Athlon 64 X2", "x86", 2005, 2.5, 0.1, "ANCIENT"), - "opteron": HardwareEntry("opteron", "AMD Opteron", "x86", 2003, 2.5, 0.15, "ANCIENT"), - - # ============ ANCIENT TIER (2.0x) - Core Duo / Early Core ============ - # Intel Core (2006) - "6_14": HardwareEntry("6_14", "Intel Core Yonah", "x86", 2006, 2.0, 0.1, "ANCIENT", "Core Duo/Solo"), - "core_duo": HardwareEntry("core_duo", "Intel Core Duo", "x86", 2006, 2.0, 0.1, "ANCIENT"), - "core_solo": HardwareEntry("core_solo", "Intel Core Solo", "x86", 2006, 2.0, 0.1, "ANCIENT"), - "yonah": HardwareEntry("yonah", "Intel Core Yonah", "x86", 2006, 2.0, 0.1, "ANCIENT"), - - # Intel Pentium D - "pentium_d": HardwareEntry("pentium_d", "Intel Pentium D", "x86", 2005, 2.0, 0.1, "ANCIENT"), - - # AMD Athlon X2 (socket 939/AM2) - "amd_15_67": HardwareEntry("amd_15_67", "AMD Athlon X2 Brisbane", "x86", 2007, 2.0, 0.05, "ANCIENT"), - - # ============ VINTAGE TIER (1.5x) - Core 2 ============ - # Intel Core 2 (2006) - "6_15": HardwareEntry("6_15", "Intel Core 2 Merom/Conroe", "x86", 2006, 1.5, 0.05, "VINTAGE", "Core 2 Duo"), - "6_22": HardwareEntry("6_22", "Intel Core 2 Merom-L", "x86", 2007, 1.5, 0.05, "VINTAGE"), - "6_23": HardwareEntry("6_23", "Intel Core 2 Penryn", "x86", 2008, 1.5, 0.05, "VINTAGE", "45nm"), - "6_29": HardwareEntry("6_29", "Intel Xeon Dunnington", "x86", 2008, 1.5, 0.1, "VINTAGE", "6-core"), - "core2": HardwareEntry("core2", "Intel Core 2", "x86", 2006, 1.5, 0.05, "VINTAGE"), - "core2_duo": HardwareEntry("core2_duo", "Intel Core 2 Duo", "x86", 2006, 1.5, 0.05, "VINTAGE"), - "core2_quad": HardwareEntry("core2_quad", "Intel Core 2 Quad", "x86", 2007, 1.5, 0.05, "VINTAGE"), - "conroe": HardwareEntry("conroe", "Intel Core 2 Conroe", "x86", 2006, 1.5, 0.05, "VINTAGE"), - "merom": HardwareEntry("merom", "Intel Core 2 Merom", "x86", 2006, 1.5, 0.05, "VINTAGE"), - "penryn": HardwareEntry("penryn", "Intel Core 2 Penryn", "x86", 2008, 1.5, 0.05, "VINTAGE"), - - # AMD Phenom (2007) - "amd_16_2": HardwareEntry("amd_16_2", "AMD Phenom X4 Agena", "x86", 2007, 1.5, 0.1, "VINTAGE"), - "amd_16_4": HardwareEntry("amd_16_4", "AMD Phenom II X4 Deneb", "x86", 2009, 1.5, 0.05, "VINTAGE"), - "amd_16_6": HardwareEntry("amd_16_6", "AMD Phenom II X6 Thuban", "x86", 2010, 1.5, 0.1, "VINTAGE", "6-core"), - "phenom": HardwareEntry("phenom", "AMD Phenom", "x86", 2007, 1.5, 0.1, "VINTAGE"), - "phenom_ii": HardwareEntry("phenom_ii", "AMD Phenom II", "x86", 2009, 1.5, 0.05, "VINTAGE"), - - # AMD FX (2011) - "amd_21_1": HardwareEntry("amd_21_1", "AMD FX Bulldozer", "x86", 2011, 1.5, 0.1, "VINTAGE"), - "amd_21_2": HardwareEntry("amd_21_2", "AMD FX Piledriver", "x86", 2012, 1.5, 0.1, "VINTAGE"), - "fx": HardwareEntry("fx", "AMD FX", "x86", 2011, 1.5, 0.1, "VINTAGE"), - "bulldozer": HardwareEntry("bulldozer", "AMD FX Bulldozer", "x86", 2011, 1.5, 0.1, "VINTAGE"), - "piledriver": HardwareEntry("piledriver", "AMD FX Piledriver", "x86", 2012, 1.5, 0.1, "VINTAGE"), - - # ============ STANDARD TIER (1.0x) - Nehalem through Haswell ============ - "6_26": HardwareEntry("6_26", "Intel Core i7 Nehalem", "x86", 2008, 1.0, 0.0, "STANDARD"), - "6_30": HardwareEntry("6_30", "Intel Core i7 Lynnfield", "x86", 2009, 1.0, 0.0, "STANDARD"), - "6_37": HardwareEntry("6_37", "Intel Core Westmere", "x86", 2010, 1.0, 0.0, "STANDARD"), - "6_42": HardwareEntry("6_42", "Intel Core Sandy Bridge", "x86", 2011, 1.0, 0.0, "STANDARD"), - "6_58": HardwareEntry("6_58", "Intel Core Ivy Bridge", "x86", 2012, 1.0, 0.0, "STANDARD"), - "6_60": HardwareEntry("6_60", "Intel Core Haswell", "x86", 2013, 1.0, 0.0, "STANDARD"), - "nehalem": HardwareEntry("nehalem", "Intel Core Nehalem", "x86", 2008, 1.0, 0.0, "STANDARD"), - "sandy_bridge": HardwareEntry("sandy_bridge", "Intel Core Sandy Bridge", "x86", 2011, 1.0, 0.0, "STANDARD"), - "ivy_bridge": HardwareEntry("ivy_bridge", "Intel Core Ivy Bridge", "x86", 2012, 1.0, 0.0, "STANDARD"), - "haswell": HardwareEntry("haswell", "Intel Core Haswell", "x86", 2013, 1.0, 0.0, "STANDARD"), - - # ============ PENALTY TIER (0.8x) - Modern x86-64 ============ - "6_61": HardwareEntry("6_61", "Intel Core Broadwell", "x86", 2014, 0.8, 0.0, "PENALTY"), - "6_78": HardwareEntry("6_78", "Intel Core Skylake", "x86", 2015, 0.8, 0.0, "PENALTY"), - "6_142": HardwareEntry("6_142", "Intel Core Kaby Lake", "x86", 2016, 0.8, 0.0, "PENALTY"), - "6_158": HardwareEntry("6_158", "Intel Core Coffee Lake", "x86", 2017, 0.8, 0.0, "PENALTY"), - "skylake": HardwareEntry("skylake", "Intel Core Skylake", "x86", 2015, 0.8, 0.0, "PENALTY"), - "kaby_lake": HardwareEntry("kaby_lake", "Intel Core Kaby Lake", "x86", 2016, 0.8, 0.0, "PENALTY"), - "coffee_lake": HardwareEntry("coffee_lake", "Intel Core Coffee Lake", "x86", 2017, 0.8, 0.0, "PENALTY"), - "alder_lake": HardwareEntry("alder_lake", "Intel Core Alder Lake", "x86", 2021, 0.8, 0.0, "PENALTY"), - "raptor_lake": HardwareEntry("raptor_lake", "Intel Core Raptor Lake", "x86", 2022, 0.8, 0.0, "PENALTY"), - - # AMD Ryzen (Modern - Penalty) - "amd_23_1": HardwareEntry("amd_23_1", "AMD Ryzen Zen", "x86", 2017, 0.8, 0.0, "PENALTY"), - "amd_23_8": HardwareEntry("amd_23_8", "AMD Ryzen Zen+", "x86", 2018, 0.8, 0.0, "PENALTY"), - "amd_23_49": HardwareEntry("amd_23_49", "AMD Ryzen Zen 2", "x86", 2019, 0.8, 0.0, "PENALTY"), - "amd_25_33": HardwareEntry("amd_25_33", "AMD Ryzen Zen 3", "x86", 2020, 0.8, 0.0, "PENALTY"), - "amd_25_97": HardwareEntry("amd_25_97", "AMD Ryzen Zen 4", "x86", 2022, 0.8, 0.0, "PENALTY"), - "ryzen": HardwareEntry("ryzen", "AMD Ryzen", "x86", 2017, 0.8, 0.0, "PENALTY"), - "zen": HardwareEntry("zen", "AMD Ryzen Zen", "x86", 2017, 0.8, 0.0, "PENALTY"), - "zen2": HardwareEntry("zen2", "AMD Ryzen Zen 2", "x86", 2019, 0.8, 0.0, "PENALTY"), - "zen3": HardwareEntry("zen3", "AMD Ryzen Zen 3", "x86", 2020, 0.8, 0.0, "PENALTY"), - "zen4": HardwareEntry("zen4", "AMD Ryzen Zen 4", "x86", 2022, 0.8, 0.0, "PENALTY"), -} - -# ============================================================================= -# POWERPC PROCESSOR DATABASE (by PVR - Processor Version Register) -# ============================================================================= - -POWERPC_PVR_DATABASE: Dict[str, HardwareEntry] = { - # ============ MYTHIC TIER (4.0x) - POWER1 / PowerPC 601 ============ - "0x0001": HardwareEntry("0x0001", "PowerPC 601", "powerpc", 1993, 4.0, 0.4, "MYTHIC", "First PowerPC"), - "0x0003": HardwareEntry("0x0003", "PowerPC 603", "powerpc", 1994, 3.5, 0.2, "LEGENDARY", "Low power"), - "0x0004": HardwareEntry("0x0004", "PowerPC 604", "powerpc", 1994, 3.5, 0.2, "LEGENDARY", "High performance"), - "0x0006": HardwareEntry("0x0006", "PowerPC 603e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), - "0x0007": HardwareEntry("0x0007", "PowerPC 603ev", "powerpc", 1997, 3.5, 0.15, "LEGENDARY"), - "0x0009": HardwareEntry("0x0009", "PowerPC 604e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), - "0x000A": HardwareEntry("0x000A", "PowerPC 604ev", "powerpc", 1997, 3.5, 0.15, "LEGENDARY"), - "601": HardwareEntry("601", "PowerPC 601", "powerpc", 1993, 4.0, 0.4, "MYTHIC"), - "603": HardwareEntry("603", "PowerPC 603", "powerpc", 1994, 3.5, 0.2, "LEGENDARY"), - "603e": HardwareEntry("603e", "PowerPC 603e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), - "604": HardwareEntry("604", "PowerPC 604", "powerpc", 1994, 3.5, 0.2, "LEGENDARY"), - "604e": HardwareEntry("604e", "PowerPC 604e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), - - # ============ LEGENDARY TIER (3.2x) - PowerPC G3 ============ - "0x0008": HardwareEntry("0x0008", "PowerPC 750 (G3)", "powerpc", 1997, 3.2, 0.1, "LEGENDARY", "G3"), - "0x7000": HardwareEntry("0x7000", "PowerPC 750CX (G3)", "powerpc", 1999, 3.2, 0.1, "LEGENDARY"), - "0x7002": HardwareEntry("0x7002", "PowerPC 750CXe (G3)", "powerpc", 2000, 3.2, 0.1, "LEGENDARY"), - "0x7003": HardwareEntry("0x7003", "PowerPC 750FX (G3)", "powerpc", 2002, 3.2, 0.15, "LEGENDARY"), - "0x7004": HardwareEntry("0x7004", "PowerPC 750GX (G3)", "powerpc", 2004, 3.2, 0.15, "LEGENDARY"), - "750": HardwareEntry("750", "PowerPC 750 (G3)", "powerpc", 1997, 3.2, 0.1, "LEGENDARY"), - "g3": HardwareEntry("g3", "PowerPC G3", "powerpc", 1997, 3.2, 0.1, "LEGENDARY"), - "750cx": HardwareEntry("750cx", "PowerPC 750CX (G3)", "powerpc", 1999, 3.2, 0.1, "LEGENDARY"), - "750fx": HardwareEntry("750fx", "PowerPC 750FX (G3)", "powerpc", 2002, 3.2, 0.15, "LEGENDARY"), - "750gx": HardwareEntry("750gx", "PowerPC 750GX (G3)", "powerpc", 2004, 3.2, 0.15, "LEGENDARY"), - - # ============ ANCIENT TIER (2.5x) - PowerPC G4 ============ - "0x000C": HardwareEntry("0x000C", "PowerPC 7400 (G4)", "powerpc", 1999, 2.5, 0.1, "ANCIENT", "AltiVec"), - "0x800C": HardwareEntry("0x800C", "PowerPC 7410 (G4)", "powerpc", 2000, 2.5, 0.1, "ANCIENT"), - "0x8000": HardwareEntry("0x8000", "PowerPC 7450 (G4)", "powerpc", 2001, 2.5, 0.1, "ANCIENT", "Improved G4"), - "0x8001": HardwareEntry("0x8001", "PowerPC 7445 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), - "0x8002": HardwareEntry("0x8002", "PowerPC 7455 (G4)", "powerpc", 2002, 2.5, 0.1, "ANCIENT"), - "0x8003": HardwareEntry("0x8003", "PowerPC 7447 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), - "0x8004": HardwareEntry("0x8004", "PowerPC 7448 (G4)", "powerpc", 2005, 2.5, 0.15, "ANCIENT", "Last G4"), - "7400": HardwareEntry("7400", "PowerPC 7400 (G4)", "powerpc", 1999, 2.5, 0.1, "ANCIENT"), - "7410": HardwareEntry("7410", "PowerPC 7410 (G4)", "powerpc", 2000, 2.5, 0.1, "ANCIENT"), - "7450": HardwareEntry("7450", "PowerPC 7450 (G4)", "powerpc", 2001, 2.5, 0.1, "ANCIENT"), - "7455": HardwareEntry("7455", "PowerPC 7455 (G4)", "powerpc", 2002, 2.5, 0.1, "ANCIENT"), - "7447": HardwareEntry("7447", "PowerPC 7447 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), - "7448": HardwareEntry("7448", "PowerPC 7448 (G4)", "powerpc", 2005, 2.5, 0.15, "ANCIENT"), - "g4": HardwareEntry("g4", "PowerPC G4", "powerpc", 1999, 2.5, 0.1, "ANCIENT"), - - # ============ ANCIENT TIER (2.0x) - PowerPC G5 ============ - "0x0039": HardwareEntry("0x0039", "PowerPC 970 (G5)", "powerpc", 2003, 2.0, 0.1, "ANCIENT", "First G5"), - "0x003C": HardwareEntry("0x003C", "PowerPC 970FX (G5)", "powerpc", 2004, 2.0, 0.1, "ANCIENT", "90nm"), - "0x0044": HardwareEntry("0x0044", "PowerPC 970MP (G5)", "powerpc", 2005, 2.0, 0.15, "ANCIENT", "Dual-core"), - "970": HardwareEntry("970", "PowerPC 970 (G5)", "powerpc", 2003, 2.0, 0.1, "ANCIENT"), - "970fx": HardwareEntry("970fx", "PowerPC 970FX (G5)", "powerpc", 2004, 2.0, 0.1, "ANCIENT"), - "970mp": HardwareEntry("970mp", "PowerPC 970MP (G5)", "powerpc", 2005, 2.0, 0.15, "ANCIENT"), - "g5": HardwareEntry("g5", "PowerPC G5", "powerpc", 2003, 2.0, 0.1, "ANCIENT"), - - # ============ RARE POWERPC VARIANTS ============ - # IBM POWER series (Servers) - "power1": HardwareEntry("power1", "IBM POWER1", "powerpc", 1990, 4.0, 0.7, "MYTHIC", "Extremely rare"), - "power2": HardwareEntry("power2", "IBM POWER2", "powerpc", 1993, 4.0, 0.6, "MYTHIC", "Very rare"), - "power3": HardwareEntry("power3", "IBM POWER3", "powerpc", 1998, 3.5, 0.5, "LEGENDARY", "Rare server"), - "power4": HardwareEntry("power4", "IBM POWER4", "powerpc", 2001, 3.0, 0.4, "LEGENDARY", "First GHz"), - "power5": HardwareEntry("power5", "IBM POWER5", "powerpc", 2004, 2.5, 0.3, "ANCIENT"), - - # Freescale/NXP embedded PowerPC - "mpc5xx": HardwareEntry("mpc5xx", "Freescale MPC5xx", "powerpc", 1996, 3.5, 0.5, "LEGENDARY", "Automotive"), - "mpc8xx": HardwareEntry("mpc8xx", "Freescale MPC8xx", "powerpc", 1997, 3.5, 0.4, "LEGENDARY", "Networking"), - "e300": HardwareEntry("e300", "Freescale e300", "powerpc", 2004, 3.0, 0.3, "LEGENDARY"), - "e500": HardwareEntry("e500", "Freescale e500", "powerpc", 2003, 2.5, 0.3, "ANCIENT"), - "e600": HardwareEntry("e600", "Freescale e600", "powerpc", 2005, 2.5, 0.3, "ANCIENT"), - - # AMCC PowerPC - "ppc405": HardwareEntry("ppc405", "AMCC PPC405", "powerpc", 1999, 3.2, 0.4, "LEGENDARY", "Embedded"), - "ppc440": HardwareEntry("ppc440", "AMCC PPC440", "powerpc", 2002, 3.0, 0.3, "LEGENDARY"), - "ppc460": HardwareEntry("ppc460", "AMCC PPC460", "powerpc", 2006, 2.5, 0.3, "ANCIENT"), -} - -# ============================================================================= -# MOTOROLA 68K PROCESSOR DATABASE -# ============================================================================= - -M68K_DATABASE: Dict[str, HardwareEntry] = { - # ============ MYTHIC TIER (4.0x) ============ - "68000": HardwareEntry("68000", "Motorola 68000", "m68k", 1979, 4.0, 0.3, "MYTHIC", "Original Mac/Amiga"), - "68008": HardwareEntry("68008", "Motorola 68008", "m68k", 1982, 4.0, 0.4, "MYTHIC", "8-bit bus variant"), - "68010": HardwareEntry("68010", "Motorola 68010", "m68k", 1982, 4.0, 0.35, "MYTHIC", "Virtual memory"), - "68012": HardwareEntry("68012", "Motorola 68012", "m68k", 1983, 4.0, 0.6, "MYTHIC", "Very rare"), - "68020": HardwareEntry("68020", "Motorola 68020", "m68k", 1984, 4.0, 0.25, "MYTHIC", "32-bit"), - "68030": HardwareEntry("68030", "Motorola 68030", "m68k", 1987, 4.0, 0.2, "MYTHIC", "Integrated MMU"), - - # ============ LEGENDARY-HIGH TIER (3.8x) ============ - "68040": HardwareEntry("68040", "Motorola 68040", "m68k", 1990, 3.8, 0.2, "LEGENDARY", "Integrated FPU"), - "68lc040": HardwareEntry("68lc040", "Motorola 68LC040", "m68k", 1991, 3.8, 0.25, "LEGENDARY", "No FPU"), - "68060": HardwareEntry("68060", "Motorola 68060", "m68k", 1994, 3.8, 0.3, "LEGENDARY", "Final 68K"), - "68lc060": HardwareEntry("68lc060", "Motorola 68LC060", "m68k", 1995, 3.8, 0.35, "LEGENDARY"), - - # ============ RARE VARIANTS ============ - "cpu32": HardwareEntry("cpu32", "Motorola CPU32", "m68k", 1990, 3.8, 0.5, "LEGENDARY", "Embedded 68K"), - "coldfire": HardwareEntry("coldfire", "Freescale ColdFire", "m68k", 1994, 3.5, 0.3, "LEGENDARY", "68K compatible"), - "dragonball": HardwareEntry("dragonball", "Motorola DragonBall", "m68k", 1995, 3.5, 0.4, "LEGENDARY", "Palm PDAs"), -} - -# ============================================================================= -# CLASSIC COMPUTER CHIPSET DATABASE (Amiga, Atari, C64, etc.) -# ============================================================================= - -CLASSIC_CHIPSET_DATABASE: Dict[str, HardwareEntry] = { - # ============ AMIGA CHIPSETS (MYTHIC) ============ - "ocs": HardwareEntry("ocs", "Amiga OCS (Original Chip Set)", "amiga", 1985, 4.0, 0.3, "MYTHIC", "A1000/A500/A2000"), - "ecs": HardwareEntry("ecs", "Amiga ECS (Enhanced Chip Set)", "amiga", 1990, 4.0, 0.25, "MYTHIC", "A500+/A600/A3000"), - "aga": HardwareEntry("aga", "Amiga AGA (Advanced Graphics)", "amiga", 1992, 4.0, 0.2, "MYTHIC", "A1200/A4000"), - "agnus_8361": HardwareEntry("agnus_8361", "Agnus 8361 (PAL-A)", "amiga", 1985, 4.0, 0.35, "MYTHIC"), - "agnus_8367": HardwareEntry("agnus_8367", "Agnus 8367 (NTSC-A)", "amiga", 1985, 4.0, 0.35, "MYTHIC"), - "agnus_8370": HardwareEntry("agnus_8370", "Fat Agnus 8370", "amiga", 1987, 4.0, 0.3, "MYTHIC", "1MB"), - "agnus_8372": HardwareEntry("agnus_8372", "Fat Agnus 8372", "amiga", 1988, 4.0, 0.3, "MYTHIC", "ECS"), - "agnus_8375": HardwareEntry("agnus_8375", "Alice 8375", "amiga", 1992, 4.0, 0.25, "MYTHIC", "AGA"), - "denise_8362": HardwareEntry("denise_8362", "Denise 8362", "amiga", 1985, 4.0, 0.35, "MYTHIC", "OCS"), - "denise_8373": HardwareEntry("denise_8373", "Super Denise 8373", "amiga", 1990, 4.0, 0.3, "MYTHIC", "ECS"), - "lisa_8364": HardwareEntry("lisa_8364", "Lisa 8364", "amiga", 1992, 4.0, 0.25, "MYTHIC", "AGA"), - "paula_8364": HardwareEntry("paula_8364", "Paula 8364", "amiga", 1985, 4.0, 0.35, "MYTHIC", "Sound/IO"), - - # Amiga Accelerator Cards (RARE!) - "blizzard_1230": HardwareEntry("blizzard_1230", "Blizzard 1230 (68030)", "amiga", 1995, 4.0, 0.5, "MYTHIC", "A1200 accelerator"), - "blizzard_1260": HardwareEntry("blizzard_1260", "Blizzard 1260 (68060)", "amiga", 1997, 4.0, 0.6, "MYTHIC", "Very rare"), - "cyberstorm_060": HardwareEntry("cyberstorm_060", "CyberStorm 68060", "amiga", 1996, 4.0, 0.6, "MYTHIC", "A4000 accelerator"), - "apollo_68080": HardwareEntry("apollo_68080", "Apollo 68080 FPGA", "amiga", 2017, 3.5, 0.7, "LEGENDARY", "Modern retro"), - - # ============ ATARI CHIPSETS (MYTHIC) ============ - "shifter": HardwareEntry("shifter", "Atari ST Shifter", "atari", 1985, 4.0, 0.35, "MYTHIC", "Video"), - "glue": HardwareEntry("glue", "Atari ST GLUE", "atari", 1985, 4.0, 0.35, "MYTHIC", "Bus controller"), - "mmu": HardwareEntry("mmu", "Atari ST MMU", "atari", 1985, 4.0, 0.35, "MYTHIC", "Memory management"), - "blitter": HardwareEntry("blitter", "Atari ST BLiTTER", "atari", 1987, 4.0, 0.4, "MYTHIC", "STE/Mega ST"), - "videl": HardwareEntry("videl", "Atari Falcon VIDEL", "atari", 1992, 4.0, 0.5, "MYTHIC", "Falcon030 only"), - "dsp56001": HardwareEntry("dsp56001", "Motorola DSP56001", "atari", 1992, 4.0, 0.5, "MYTHIC", "Falcon030 DSP"), - - # ============ COMMODORE 64/128 (MYTHIC) ============ - "vic_ii": HardwareEntry("vic_ii", "MOS 6569 VIC-II (PAL)", "c64", 1982, 4.0, 0.25, "MYTHIC", "C64 video"), - "vic_ii_ntsc": HardwareEntry("vic_ii_ntsc", "MOS 6567 VIC-II (NTSC)", "c64", 1982, 4.0, 0.25, "MYTHIC"), - "sid_6581": HardwareEntry("sid_6581", "MOS 6581 SID", "c64", 1982, 4.0, 0.3, "MYTHIC", "C64 sound"), - "sid_8580": HardwareEntry("sid_8580", "MOS 8580 SID", "c64", 1986, 4.0, 0.35, "MYTHIC", "C64C sound"), - "cia_6526": HardwareEntry("cia_6526", "MOS 6526 CIA", "c64", 1982, 4.0, 0.25, "MYTHIC", "I/O"), - "pla_906114": HardwareEntry("pla_906114", "MOS 906114-01 PLA", "c64", 1982, 4.0, 0.3, "MYTHIC"), - "vdc_8563": HardwareEntry("vdc_8563", "MOS 8563 VDC", "c64", 1985, 4.0, 0.45, "MYTHIC", "C128 80-col"), - "mmu_8722": HardwareEntry("mmu_8722", "MOS 8722 MMU", "c64", 1985, 4.0, 0.45, "MYTHIC", "C128 only"), - - # ============ APPLE II (MYTHIC) ============ - "iou": HardwareEntry("iou", "Apple IOU", "apple2", 1977, 4.0, 0.4, "MYTHIC", "I/O controller"), - "mmu_apple2": HardwareEntry("mmu_apple2", "Apple II MMU", "apple2", 1983, 4.0, 0.4, "MYTHIC", "IIe/IIc"), - "iigs_mega2": HardwareEntry("iigs_mega2", "Apple IIgs Mega II", "apple2", 1986, 4.0, 0.5, "MYTHIC", "IIgs"), - "iigs_fpi": HardwareEntry("iigs_fpi", "Apple IIgs FPI", "apple2", 1986, 4.0, 0.5, "MYTHIC"), - - # ============ RARE/OBSCURE SYSTEMS (HIGH BONUS) ============ - # Sinclair ZX Spectrum - "ula_spectrum": HardwareEntry("ula_spectrum", "Ferranti ULA", "spectrum", 1982, 4.0, 0.35, "MYTHIC", "ZX Spectrum"), - - # BBC Micro - "bbc_video_ula": HardwareEntry("bbc_video_ula", "BBC Video ULA", "bbc", 1981, 4.0, 0.5, "MYTHIC"), - - # MSX - "v9938": HardwareEntry("v9938", "Yamaha V9938 VDP", "msx", 1985, 4.0, 0.4, "MYTHIC", "MSX2"), - "v9958": HardwareEntry("v9958", "Yamaha V9958 VDP", "msx", 1988, 4.0, 0.45, "MYTHIC", "MSX2+"), - - # TI-99/4A - "tms9900": HardwareEntry("tms9900", "TI TMS9900", "ti99", 1976, 4.0, 0.6, "MYTHIC", "16-bit!"), - "tms9918a": HardwareEntry("tms9918a", "TI TMS9918A VDP", "ti99", 1979, 4.0, 0.5, "MYTHIC"), - - # Tandy/Radio Shack - "coco_sam": HardwareEntry("coco_sam", "TRS-80 CoCo SAM", "tandy", 1980, 4.0, 0.5, "MYTHIC"), - "gime": HardwareEntry("gime", "GIME (CoCo 3)", "tandy", 1986, 4.0, 0.55, "MYTHIC", "Rare"), - - # Acorn Archimedes - "vidc1": HardwareEntry("vidc1", "ARM VIDC1", "acorn", 1987, 4.0, 0.6, "MYTHIC", "Archimedes"), - "memc1": HardwareEntry("memc1", "ARM MEMC1", "acorn", 1987, 4.0, 0.6, "MYTHIC"), - "ioc": HardwareEntry("ioc", "ARM IOC", "acorn", 1987, 4.0, 0.6, "MYTHIC"), -} - -# ============================================================================= -# WORKSTATION/SERVER PROCESSORS (SPARC, PA-RISC, Alpha, MIPS) -# ============================================================================= - -WORKSTATION_DATABASE: Dict[str, HardwareEntry] = { - # ============ DEC ALPHA (LEGENDARY) ============ - "ev4": HardwareEntry("ev4", "DEC Alpha 21064 (EV4)", "alpha", 1992, 3.0, 0.5, "LEGENDARY", "First Alpha"), - "ev45": HardwareEntry("ev45", "DEC Alpha 21064A (EV45)", "alpha", 1994, 3.0, 0.45, "LEGENDARY"), - "ev5": HardwareEntry("ev5", "DEC Alpha 21164 (EV5)", "alpha", 1995, 3.0, 0.4, "LEGENDARY"), - "ev56": HardwareEntry("ev56", "DEC Alpha 21164A (EV56)", "alpha", 1996, 3.0, 0.35, "LEGENDARY"), - "pca56": HardwareEntry("pca56", "DEC Alpha 21164PC (PCA56)", "alpha", 1997, 3.0, 0.4, "LEGENDARY", "Low cost"), - "ev6": HardwareEntry("ev6", "DEC Alpha 21264 (EV6)", "alpha", 1998, 3.0, 0.35, "LEGENDARY"), - "ev67": HardwareEntry("ev67", "DEC Alpha 21264A (EV67)", "alpha", 1999, 3.0, 0.3, "LEGENDARY"), - "ev68": HardwareEntry("ev68", "DEC Alpha 21264C (EV68)", "alpha", 2001, 3.0, 0.35, "LEGENDARY"), - "ev7": HardwareEntry("ev7", "DEC Alpha 21364 (EV7)", "alpha", 2003, 3.0, 0.5, "LEGENDARY", "Final Alpha"), - "alpha": HardwareEntry("alpha", "DEC Alpha", "alpha", 1992, 3.0, 0.4, "LEGENDARY"), - - # ============ SUN SPARC (LEGENDARY) ============ - "sparc_v7": HardwareEntry("sparc_v7", "SPARC V7", "sparc", 1987, 3.0, 0.5, "LEGENDARY", "Sun-4"), - "sparc_v8": HardwareEntry("sparc_v8", "SPARC V8 (SuperSPARC)", "sparc", 1992, 3.0, 0.4, "LEGENDARY"), - "ultrasparc_i": HardwareEntry("ultrasparc_i", "UltraSPARC I", "sparc", 1995, 3.0, 0.35, "LEGENDARY"), - "ultrasparc_ii": HardwareEntry("ultrasparc_ii", "UltraSPARC II", "sparc", 1997, 3.0, 0.3, "LEGENDARY"), - "ultrasparc_iii": HardwareEntry("ultrasparc_iii", "UltraSPARC III", "sparc", 2001, 2.5, 0.3, "ANCIENT"), - "ultrasparc_iv": HardwareEntry("ultrasparc_iv", "UltraSPARC IV", "sparc", 2004, 2.5, 0.25, "ANCIENT"), - "sparc64": HardwareEntry("sparc64", "Fujitsu SPARC64", "sparc", 1995, 3.0, 0.4, "LEGENDARY"), - "sparc": HardwareEntry("sparc", "SPARC", "sparc", 1987, 3.0, 0.4, "LEGENDARY"), - - # ============ HP PA-RISC (LEGENDARY) ============ - "pa7000": HardwareEntry("pa7000", "HP PA-7000", "parisc", 1991, 3.0, 0.5, "LEGENDARY"), - "pa7100": HardwareEntry("pa7100", "HP PA-7100", "parisc", 1992, 3.0, 0.45, "LEGENDARY"), - "pa7200": HardwareEntry("pa7200", "HP PA-7200", "parisc", 1994, 3.0, 0.4, "LEGENDARY"), - "pa8000": HardwareEntry("pa8000", "HP PA-8000", "parisc", 1996, 3.0, 0.35, "LEGENDARY"), - "pa8200": HardwareEntry("pa8200", "HP PA-8200", "parisc", 1997, 3.0, 0.35, "LEGENDARY"), - "pa8500": HardwareEntry("pa8500", "HP PA-8500", "parisc", 1998, 3.0, 0.35, "LEGENDARY"), - "pa8600": HardwareEntry("pa8600", "HP PA-8600", "parisc", 2000, 2.5, 0.35, "ANCIENT"), - "pa8700": HardwareEntry("pa8700", "HP PA-8700", "parisc", 2001, 2.5, 0.35, "ANCIENT"), - "pa8800": HardwareEntry("pa8800", "HP PA-8800", "parisc", 2003, 2.5, 0.4, "ANCIENT", "Final PA-RISC"), - "parisc": HardwareEntry("parisc", "HP PA-RISC", "parisc", 1986, 3.0, 0.4, "LEGENDARY"), - - # ============ SGI MIPS (LEGENDARY) ============ - "r2000": HardwareEntry("r2000", "MIPS R2000", "mips", 1985, 3.5, 0.5, "LEGENDARY", "First MIPS"), - "r3000": HardwareEntry("r3000", "MIPS R3000", "mips", 1988, 3.5, 0.45, "LEGENDARY"), - "r4000": HardwareEntry("r4000", "MIPS R4000", "mips", 1991, 3.0, 0.4, "LEGENDARY", "64-bit"), - "r4400": HardwareEntry("r4400", "MIPS R4400", "mips", 1992, 3.0, 0.35, "LEGENDARY"), - "r4600": HardwareEntry("r4600", "MIPS R4600 Orion", "mips", 1994, 3.0, 0.3, "LEGENDARY"), - "r5000": HardwareEntry("r5000", "MIPS R5000", "mips", 1996, 3.0, 0.3, "LEGENDARY"), - "r8000": HardwareEntry("r8000", "MIPS R8000", "mips", 1994, 3.0, 0.5, "LEGENDARY", "Superscalar"), - "r10000": HardwareEntry("r10000", "MIPS R10000", "mips", 1996, 3.0, 0.35, "LEGENDARY"), - "r12000": HardwareEntry("r12000", "MIPS R12000", "mips", 1998, 3.0, 0.35, "LEGENDARY"), - "r14000": HardwareEntry("r14000", "MIPS R14000", "mips", 2001, 2.5, 0.35, "ANCIENT"), - "r16000": HardwareEntry("r16000", "MIPS R16000", "mips", 2002, 2.5, 0.4, "ANCIENT", "Final SGI MIPS"), - "mips": HardwareEntry("mips", "MIPS", "mips", 1985, 3.0, 0.4, "LEGENDARY"), - - # ============ IBM mainframes (VERY RARE) ============ - "s390": HardwareEntry("s390", "IBM S/390", "ibm", 1990, 3.0, 0.8, "LEGENDARY", "Mainframe"), - "z900": HardwareEntry("z900", "IBM zSeries z900", "ibm", 2000, 2.5, 0.6, "ANCIENT"), - "z990": HardwareEntry("z990", "IBM zSeries z990", "ibm", 2003, 2.5, 0.5, "ANCIENT"), -} - -# ============================================================================= -# ARM PROCESSORS (Vintage through Modern) -# ============================================================================= - -ARM_DATABASE: Dict[str, HardwareEntry] = { - # ============ LEGENDARY TIER (3.0x) - Early ARM ============ - "arm2": HardwareEntry("arm2", "ARM2", "arm", 1987, 4.0, 0.6, "MYTHIC", "Acorn Archimedes"), - "arm3": HardwareEntry("arm3", "ARM3", "arm", 1989, 4.0, 0.5, "MYTHIC"), - "arm6": HardwareEntry("arm6", "ARM6/ARM610", "arm", 1992, 3.5, 0.4, "LEGENDARY"), - "arm7": HardwareEntry("arm7", "ARM7", "arm", 1994, 3.5, 0.3, "LEGENDARY"), - "arm7tdmi": HardwareEntry("arm7tdmi", "ARM7TDMI", "arm", 1995, 3.5, 0.25, "LEGENDARY", "GBA"), - "strongarm": HardwareEntry("strongarm", "StrongARM SA-110", "arm", 1996, 3.0, 0.3, "LEGENDARY", "DEC/Intel"), - "sa1100": HardwareEntry("sa1100", "StrongARM SA-1100", "arm", 1998, 3.0, 0.3, "LEGENDARY", "iPAQ"), - "xscale": HardwareEntry("xscale", "Intel XScale", "arm", 2000, 2.5, 0.25, "ANCIENT", "PDAs"), - - # ============ ANCIENT TIER (2.0-2.5x) - ARM9/ARM11 ============ - "arm9": HardwareEntry("arm9", "ARM9", "arm", 1998, 2.5, 0.2, "ANCIENT"), - "arm926ej": HardwareEntry("arm926ej", "ARM926EJ-S", "arm", 2001, 2.5, 0.2, "ANCIENT"), - "arm11": HardwareEntry("arm11", "ARM11", "arm", 2003, 2.0, 0.15, "ANCIENT", "iPhone 1"), - "arm1176": HardwareEntry("arm1176", "ARM1176JZF-S", "arm", 2003, 2.0, 0.15, "ANCIENT", "RPi 1"), - - # ============ VINTAGE TIER (1.5x) - Cortex-A ============ - "cortex_a8": HardwareEntry("cortex_a8", "ARM Cortex-A8", "arm", 2005, 1.5, 0.1, "VINTAGE", "iPhone 3GS"), - "cortex_a9": HardwareEntry("cortex_a9", "ARM Cortex-A9", "arm", 2007, 1.5, 0.05, "VINTAGE"), - "cortex_a15": HardwareEntry("cortex_a15", "ARM Cortex-A15", "arm", 2010, 1.5, 0.05, "VINTAGE"), - - # ============ PENALTY TIER (0.8x) - Modern ARM ============ - "cortex_a53": HardwareEntry("cortex_a53", "ARM Cortex-A53", "arm", 2012, 1.0, 0.0, "STANDARD"), - "cortex_a72": HardwareEntry("cortex_a72", "ARM Cortex-A72", "arm", 2015, 0.8, 0.0, "PENALTY"), - "cortex_a76": HardwareEntry("cortex_a76", "ARM Cortex-A76", "arm", 2018, 0.8, 0.0, "PENALTY"), - "cortex_x1": HardwareEntry("cortex_x1", "ARM Cortex-X1", "arm", 2020, 0.8, 0.0, "PENALTY"), - - # Apple Silicon (PENALTY) - "m1": HardwareEntry("m1", "Apple M1", "arm", 2020, 0.8, 0.0, "PENALTY", "Modern ARM"), - "m1_pro": HardwareEntry("m1_pro", "Apple M1 Pro", "arm", 2021, 0.8, 0.0, "PENALTY"), - "m1_max": HardwareEntry("m1_max", "Apple M1 Max", "arm", 2021, 0.8, 0.0, "PENALTY"), - "m1_ultra": HardwareEntry("m1_ultra", "Apple M1 Ultra", "arm", 2022, 0.8, 0.0, "PENALTY"), - "m2": HardwareEntry("m2", "Apple M2", "arm", 2022, 0.8, 0.0, "PENALTY"), - "m2_pro": HardwareEntry("m2_pro", "Apple M2 Pro", "arm", 2023, 0.8, 0.0, "PENALTY"), - "m2_max": HardwareEntry("m2_max", "Apple M2 Max", "arm", 2023, 0.8, 0.0, "PENALTY"), - "m3": HardwareEntry("m3", "Apple M3", "arm", 2023, 0.8, 0.0, "PENALTY"), - "m3_pro": HardwareEntry("m3_pro", "Apple M3 Pro", "arm", 2023, 0.8, 0.0, "PENALTY"), - "m3_max": HardwareEntry("m3_max", "Apple M3 Max", "arm", 2023, 0.8, 0.0, "PENALTY"), - "apple_silicon": HardwareEntry("apple_silicon", "Apple Silicon", "arm", 2020, 0.8, 0.0, "PENALTY"), -} - -# ============================================================================= -# VINTAGE GRAPHICS CARDS (BONUS MULTIPLIERS!) -# ============================================================================= - -GRAPHICS_DATABASE: Dict[str, HardwareEntry] = { - # ============ MYTHIC/LEGENDARY GRAPHICS ============ - # 3dfx Voodoo (MYTHIC!) - "voodoo1": HardwareEntry("voodoo1", "3dfx Voodoo Graphics", "gpu", 1996, 0.0, 0.5, "MYTHIC", "First 3D accelerator"), - "voodoo2": HardwareEntry("voodoo2", "3dfx Voodoo2", "gpu", 1998, 0.0, 0.4, "MYTHIC", "SLI!"), - "voodoo_banshee": HardwareEntry("voodoo_banshee", "3dfx Voodoo Banshee", "gpu", 1998, 0.0, 0.35, "LEGENDARY"), - "voodoo3": HardwareEntry("voodoo3", "3dfx Voodoo3", "gpu", 1999, 0.0, 0.3, "LEGENDARY"), - "voodoo4": HardwareEntry("voodoo4", "3dfx Voodoo4", "gpu", 2000, 0.0, 0.4, "LEGENDARY", "Rare"), - "voodoo5": HardwareEntry("voodoo5", "3dfx Voodoo5", "gpu", 2000, 0.0, 0.5, "LEGENDARY", "Very rare"), - "voodoo5_6000": HardwareEntry("voodoo5_6000", "3dfx Voodoo5 6000", "gpu", 2000, 0.0, 0.9, "LEGENDARY", "Extremely rare"), - - # S3 (MYTHIC/LEGENDARY) - "virge": HardwareEntry("virge", "S3 ViRGE", "gpu", 1995, 0.0, 0.35, "MYTHIC", "First consumer 3D"), - "virge_dx": HardwareEntry("virge_dx", "S3 ViRGE/DX", "gpu", 1996, 0.0, 0.3, "MYTHIC"), - "savage3d": HardwareEntry("savage3d", "S3 Savage3D", "gpu", 1998, 0.0, 0.3, "LEGENDARY"), - "savage4": HardwareEntry("savage4", "S3 Savage4", "gpu", 1999, 0.0, 0.25, "LEGENDARY"), - "savage2000": HardwareEntry("savage2000", "S3 Savage2000", "gpu", 1999, 0.0, 0.35, "LEGENDARY", "Rare"), - - # ATI Rage (LEGENDARY) - "rage_pro": HardwareEntry("rage_pro", "ATI Rage Pro", "gpu", 1997, 0.0, 0.25, "LEGENDARY"), - "rage_128": HardwareEntry("rage_128", "ATI Rage 128", "gpu", 1999, 0.0, 0.2, "LEGENDARY"), - "rage_fury": HardwareEntry("rage_fury", "ATI Rage Fury MAXX", "gpu", 1999, 0.0, 0.4, "LEGENDARY", "Dual GPU"), - "radeon_ddr": HardwareEntry("radeon_ddr", "ATI Radeon DDR", "gpu", 2000, 0.0, 0.2, "LEGENDARY"), - "radeon_7200": HardwareEntry("radeon_7200", "ATI Radeon 7200", "gpu", 2001, 0.0, 0.15, "LEGENDARY"), - - # NVIDIA (LEGENDARY/ANCIENT) - "riva_128": HardwareEntry("riva_128", "NVIDIA RIVA 128", "gpu", 1997, 0.0, 0.35, "LEGENDARY"), - "riva_tnt": HardwareEntry("riva_tnt", "NVIDIA RIVA TNT", "gpu", 1998, 0.0, 0.3, "LEGENDARY"), - "tnt2": HardwareEntry("tnt2", "NVIDIA TNT2", "gpu", 1999, 0.0, 0.25, "LEGENDARY"), - "geforce_256": HardwareEntry("geforce_256", "NVIDIA GeForce 256", "gpu", 1999, 0.0, 0.25, "LEGENDARY", "First GeForce"), - "geforce2": HardwareEntry("geforce2", "NVIDIA GeForce2", "gpu", 2000, 0.0, 0.2, "LEGENDARY"), - "geforce3": HardwareEntry("geforce3", "NVIDIA GeForce3", "gpu", 2001, 0.0, 0.15, "ANCIENT"), - "geforce4": HardwareEntry("geforce4", "NVIDIA GeForce4", "gpu", 2002, 0.0, 0.15, "ANCIENT"), - - # Matrox (RARE!) - "millennium": HardwareEntry("millennium", "Matrox Millennium", "gpu", 1995, 0.0, 0.5, "LEGENDARY", "Professional"), - "mystique": HardwareEntry("mystique", "Matrox Mystique", "gpu", 1996, 0.0, 0.4, "LEGENDARY"), - "g200": HardwareEntry("g200", "Matrox G200", "gpu", 1998, 0.0, 0.35, "LEGENDARY"), - "g400": HardwareEntry("g400", "Matrox G400", "gpu", 1999, 0.0, 0.35, "LEGENDARY", "Best 2D"), - "parhelia": HardwareEntry("parhelia", "Matrox Parhelia", "gpu", 2002, 0.0, 0.5, "LEGENDARY", "Triple-head"), - - # Number Nine (VERY RARE!) - "imagine_128": HardwareEntry("imagine_128", "Number Nine Imagine 128", "gpu", 1995, 0.0, 0.6, "LEGENDARY", "Very rare"), - "revolution_3d": HardwareEntry("revolution_3d", "Number Nine Revolution 3D", "gpu", 1997, 0.0, 0.7, "LEGENDARY", "Extremely rare"), - "revolution_iv": HardwareEntry("revolution_iv", "Number Nine Revolution IV", "gpu", 1998, 0.0, 0.7, "LEGENDARY"), - - # Rendition (MYTHIC - VERY RARE!) - "verite_v1000": HardwareEntry("verite_v1000", "Rendition Verite V1000", "gpu", 1995, 0.0, 0.7, "MYTHIC", "Extremely rare"), - "verite_v2100": HardwareEntry("verite_v2100", "Rendition Verite V2100", "gpu", 1997, 0.0, 0.6, "MYTHIC", "Very rare"), - "verite_v2200": HardwareEntry("verite_v2200", "Rendition Verite V2200", "gpu", 1998, 0.0, 0.6, "MYTHIC", "Very rare"), - - # PowerVR (RARE!) - "pcx1": HardwareEntry("pcx1", "NEC PowerVR PCX1", "gpu", 1996, 0.0, 0.6, "LEGENDARY", "Tile-based"), - "pcx2": HardwareEntry("pcx2", "NEC PowerVR PCX2", "gpu", 1997, 0.0, 0.5, "LEGENDARY"), - "kyro": HardwareEntry("kyro", "PowerVR Kyro", "gpu", 2000, 0.0, 0.4, "LEGENDARY"), - "kyro_ii": HardwareEntry("kyro_ii", "PowerVR Kyro II", "gpu", 2001, 0.0, 0.35, "LEGENDARY"), -} - - -# ============================================================================= -# HARDWARE LOOKUP FUNCTIONS -# ============================================================================= - -def normalize_id(hw_id: str) -> str: - """Normalize hardware ID for lookup""" - return hw_id.lower().strip().replace(" ", "_").replace("-", "_") - -def lookup_hardware(hw_id: str, family: Optional[str] = None) -> Optional[HardwareEntry]: - """ - Look up hardware by ID with optional family hint. - Returns the HardwareEntry if found, None otherwise. - """ - norm_id = normalize_id(hw_id) - - # Try specific databases based on family hint - databases = [] - if family: - family_lower = family.lower() - if "x86" in family_lower or "intel" in family_lower or "amd" in family_lower: - databases.append(X86_CPUID_DATABASE) - elif "powerpc" in family_lower or "ppc" in family_lower: - databases.append(POWERPC_PVR_DATABASE) - elif "m68k" in family_lower or "68" in family_lower or "motorola" in family_lower: - databases.append(M68K_DATABASE) - elif "arm" in family_lower or "apple" in family_lower: - databases.append(ARM_DATABASE) - elif any(x in family_lower for x in ["sparc", "alpha", "mips", "parisc", "ibm"]): - databases.append(WORKSTATION_DATABASE) - elif any(x in family_lower for x in ["amiga", "atari", "c64", "commodore", "apple2", "spectrum", "msx"]): - databases.append(CLASSIC_CHIPSET_DATABASE) - elif any(x in family_lower for x in ["gpu", "voodoo", "geforce", "radeon", "matrox"]): - databases.append(GRAPHICS_DATABASE) - - # Add all databases as fallback - databases.extend([ - X86_CPUID_DATABASE, - POWERPC_PVR_DATABASE, - M68K_DATABASE, - ARM_DATABASE, - WORKSTATION_DATABASE, - CLASSIC_CHIPSET_DATABASE, - GRAPHICS_DATABASE, - ]) - - # Search through databases - for db in databases: - if norm_id in db: - return db[norm_id] - - # Try partial matching for common variants - for key, entry in db.items(): - if norm_id in key or key in norm_id: - return entry - - return None - -def calculate_poa_multiplier( - device_family: str, - device_arch: str, - device_model: Optional[str] = None, - chipset_ids: Optional[List[str]] = None, - gpu_id: Optional[str] = None, -) -> Tuple[float, str, float, str]: - """ - Calculate PoA multiplier based on hardware detection. - - Returns: - Tuple of (base_multiplier, tier_name, rarity_bonus, hardware_name) - """ - family_lower = device_family.lower() if device_family else "" - arch_lower = device_arch.lower() if device_arch else "" - model_lower = device_model.lower() if device_model else "" - - # Default values - base_mult = 1.0 - tier = "STANDARD" - rarity = 0.0 - hw_name = "Unknown Hardware" - - # Try to look up the exact hardware - entry = None - - # Try arch first - if device_arch: - entry = lookup_hardware(device_arch, device_family) - - # Try model if no match - if not entry and device_model: - entry = lookup_hardware(device_model, device_family) - - # Try chipset IDs - if not entry and chipset_ids: - for chip_id in chipset_ids: - entry = lookup_hardware(chip_id, device_family) - if entry: - break - - # If found in database, use those values - if entry: - base_mult = entry.base_multiplier - tier = entry.tier - rarity = entry.rarity_bonus - hw_name = entry.name - else: - # Fallback to family-based detection - if "m68k" in family_lower or "68" in arch_lower or "motorola" in family_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.2, "Motorola 68K" - elif "amiga" in family_lower or "amiga" in arch_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.3, "Amiga" - elif "atari" in family_lower or "atari" in arch_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.35, "Atari ST" - elif "c64" in family_lower or "commodore" in family_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.25, "Commodore 64" - elif "386" in arch_lower or "i386" in arch_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.3, "Intel 386" - elif "286" in arch_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.4, "Intel 286" - elif "486" in arch_lower or "i486" in arch_lower: - base_mult, tier, rarity, hw_name = 3.8, "LEGENDARY", 0.2, "Intel 486" - elif "pentium" in arch_lower and any(x in arch_lower for x in ["mmx", "p5", "p54", "p55", " 1", "_1"]): - base_mult, tier, rarity, hw_name = 3.5, "LEGENDARY", 0.15, "Pentium 1" - elif "pentium" in arch_lower and any(x in arch_lower for x in [" 2", "_2", "ii", "klamath", "deschutes"]): - base_mult, tier, rarity, hw_name = 3.2, "LEGENDARY", 0.1, "Pentium II" - elif "pentium" in arch_lower and any(x in arch_lower for x in [" 3", "_3", "iii", "katmai", "coppermine"]): - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.1, "Pentium III" - elif "pentium" in arch_lower and any(x in arch_lower for x in [" 4", "_4", "iv", "willamette", "northwood"]): - base_mult, tier, rarity, hw_name = 2.5, "ANCIENT", 0.05, "Pentium 4" - elif "powerpc" in family_lower or "ppc" in family_lower: - if "601" in arch_lower: - base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.4, "PowerPC 601" - elif "603" in arch_lower or "604" in arch_lower: - base_mult, tier, rarity, hw_name = 3.5, "LEGENDARY", 0.15, "PowerPC 603/604" - elif "g3" in arch_lower or "750" in arch_lower: - base_mult, tier, rarity, hw_name = 3.2, "LEGENDARY", 0.1, "PowerPC G3" - elif "g4" in arch_lower or "74" in arch_lower: - base_mult, tier, rarity, hw_name = 2.5, "ANCIENT", 0.1, "PowerPC G4" - elif "g5" in arch_lower or "970" in arch_lower: - base_mult, tier, rarity, hw_name = 2.0, "ANCIENT", 0.1, "PowerPC G5" - else: - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.2, "PowerPC" - elif "alpha" in family_lower: - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "DEC Alpha" - elif "sparc" in family_lower: - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "SPARC" - elif "mips" in family_lower: - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "MIPS" - elif "parisc" in family_lower or "pa-risc" in family_lower: - base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "PA-RISC" - elif "core2" in arch_lower or "core 2" in arch_lower: - base_mult, tier, rarity, hw_name = 1.5, "VINTAGE", 0.05, "Core 2" - elif "core" in arch_lower and "duo" in arch_lower: - base_mult, tier, rarity, hw_name = 2.0, "ANCIENT", 0.1, "Core Duo" - elif any(x in arch_lower for x in ["m1", "m2", "m3", "apple_silicon", "apple silicon"]): - base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Apple Silicon" - elif "arm" in family_lower and any(x in arch_lower for x in ["aarch64", "armv8", "cortex-a7"]): - base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Modern ARM" - elif any(x in arch_lower for x in ["ryzen", "zen", "skylake", "alder", "raptor"]): - base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Modern x86-64" - - # Check for GPU bonus - if gpu_id: - gpu_entry = lookup_hardware(gpu_id, "gpu") - if gpu_entry and gpu_entry.rarity_bonus > 0: - rarity += gpu_entry.rarity_bonus * 0.5 # 50% of GPU rarity bonus added - - return (base_mult, tier, rarity, hw_name) - -def get_total_multiplier(base_mult: float, rarity_bonus: float) -> float: - """Calculate total multiplier including rarity bonus""" - return base_mult + (base_mult * rarity_bonus) - - -# ============================================================================= -# CONVENIENCE FUNCTIONS FOR RIP SERVICE -# ============================================================================= - -def get_poa_info_for_miner(signals: dict) -> dict: - """ - Process miner attestation signals and return PoA info. - - Args: - signals: Dict containing device info from attestation - - Returns: - Dict with multiplier info for database storage - """ - device = signals.get("device", {}) - device_family = device.get("family", signals.get("device_family", "")) - device_arch = device.get("arch", signals.get("device_arch", "")) - device_model = device.get("model", signals.get("device_model", "")) - - # Get chipset IDs if available - chipset_ids = [] - if "chipset" in signals: - chipset_ids.append(signals["chipset"]) - if "pci_ids" in signals: - chipset_ids.extend(signals["pci_ids"]) - if "cpu_id" in signals: - chipset_ids.append(signals["cpu_id"]) - - # Get GPU ID if available - gpu_id = signals.get("gpu", signals.get("gpu_id")) - - base_mult, tier, rarity, hw_name = calculate_poa_multiplier( - device_family, device_arch, device_model, chipset_ids, gpu_id - ) - - total_mult = get_total_multiplier(base_mult, rarity) - - return { - "antiquity_multiplier": round(total_mult, 2), - "base_multiplier": base_mult, - "rarity_bonus": round(rarity, 3), - "tier": tier, - "hardware_type": hw_name, - "device_family": device_family, - "device_arch": device_arch, - } - - -# ============================================================================= -# STATISTICS AND REPORTING -# ============================================================================= - -def get_database_stats() -> dict: - """Get statistics about the hardware database""" - all_dbs = { - "x86": X86_CPUID_DATABASE, - "powerpc": POWERPC_PVR_DATABASE, - "m68k": M68K_DATABASE, - "classic": CLASSIC_CHIPSET_DATABASE, - "workstation": WORKSTATION_DATABASE, - "arm": ARM_DATABASE, - "graphics": GRAPHICS_DATABASE, - } - - stats = { - "total_entries": 0, - "by_family": {}, - "by_tier": { - "MYTHIC": 0, - "LEGENDARY": 0, - "ANCIENT": 0, - "VINTAGE": 0, - "STANDARD": 0, - "PENALTY": 0, - }, - "rarest_hardware": [], - } - - all_entries = [] - for db_name, db in all_dbs.items(): - stats["by_family"][db_name] = len(db) - stats["total_entries"] += len(db) - - for entry in db.values(): - stats["by_tier"][entry.tier] += 1 - all_entries.append(entry) - - # Find rarest hardware (highest rarity bonus) - all_entries.sort(key=lambda x: x.rarity_bonus, reverse=True) - stats["rarest_hardware"] = [ - {"name": e.name, "rarity": e.rarity_bonus, "tier": e.tier} - for e in all_entries[:20] - ] - - return stats - - -if __name__ == "__main__": - # Print database statistics - stats = get_database_stats() - print("=" * 60) - print("RustChain PoA Hardware Database Statistics") - print("=" * 60) - print(f"\nTotal hardware entries: {stats['total_entries']}") - print("\nBy family:") - for family, count in stats['by_family'].items(): - print(f" {family:15} {count:4} entries") - print("\nBy tier:") - for tier, count in stats['by_tier'].items(): - print(f" {tier:12} {count:4} entries") - print("\nTop 10 rarest hardware (highest bonus):") - for i, hw in enumerate(stats['rarest_hardware'][:10], 1): - print(f" {i:2}. {hw['name']:35} +{hw['rarity']*100:.0f}% ({hw['tier']})") - - # Test some lookups - print("\n" + "=" * 60) - print("Test Lookups") - print("=" * 60) - - test_cases = [ - ("PowerPC", "G4"), - ("x86", "486"), - ("x86", "Pentium"), - ("m68k", "68030"), - ("powerpc", "601"), - ("arm", "m1"), - ("x86", "ryzen"), - ] - - for family, arch in test_cases: - base, tier, rarity, name = calculate_poa_multiplier(family, arch) - total = get_total_multiplier(base, rarity) - print(f"\n{family}/{arch}:") - print(f" Hardware: {name}") - print(f" Tier: {tier}") - print(f" Base: {base}x, Rarity: +{rarity*100:.0f}%, Total: {total:.2f}x") +#!/usr/bin/env python3 +""" +RustChain Proof of Antiquity - Hardware Database +================================================ +Comprehensive database of vintage and rare hardware for PoA multiplier calculation. +Includes CPUID values, PVR codes, chipset IDs, and rarity bonuses. + +Reference databases used: +- Intel/AMD CPUID documentation +- IBM PowerPC Processor Version Register (PVR) values +- Amiga Hardware Reference Manual +- PCI ID Repository (pci-ids.ucw.cz) +- USB ID Repository +""" + +from dataclasses import dataclass +from typing import Optional, Dict, List, Tuple +import re + +@dataclass +class HardwareEntry: + """Single hardware entry in the database""" + id: str # Unique identifier (CPUID, PVR, chipset ID) + name: str # Human-readable name + family: str # Hardware family (x86, powerpc, m68k, etc.) + year: int # Release year (approximate) + base_multiplier: float # Base PoA multiplier + rarity_bonus: float # Additional bonus for rare hardware (0.0 - 1.0) + tier: str # MYTHIC, LEGENDARY, ANCIENT, VINTAGE, STANDARD, PENALTY + notes: str = "" # Additional notes + +# ============================================================================= +# x86 PROCESSOR DATABASE (by CPUID Family/Model/Stepping) +# Format: "family_model" or "family_model_stepping" +# ============================================================================= + +X86_CPUID_DATABASE: Dict[str, HardwareEntry] = { + # ============ MYTHIC TIER (4.0x) - Pre-486 ============ + # Intel 8086/8088 (1978-1979) + "8086": HardwareEntry("8086", "Intel 8086", "x86", 1978, 4.0, 0.5, "MYTHIC", "Original x86"), + "8088": HardwareEntry("8088", "Intel 8088", "x86", 1979, 4.0, 0.5, "MYTHIC", "IBM PC original"), + + # Intel 80186/80188 (1982) + "80186": HardwareEntry("80186", "Intel 80186", "x86", 1982, 4.0, 0.6, "MYTHIC", "Embedded variant"), + "80188": HardwareEntry("80188", "Intel 80188", "x86", 1982, 4.0, 0.6, "MYTHIC", "Embedded 8-bit bus"), + + # Intel 80286 (1982) + "2_0": HardwareEntry("2_0", "Intel 80286", "x86", 1982, 4.0, 0.4, "MYTHIC", "Protected mode"), + "286": HardwareEntry("286", "Intel 80286", "x86", 1982, 4.0, 0.4, "MYTHIC"), + + # Intel 80386 (1985) + "3_0": HardwareEntry("3_0", "Intel 80386DX", "x86", 1985, 4.0, 0.3, "MYTHIC", "32-bit x86"), + "3_2": HardwareEntry("3_2", "Intel 80386SX", "x86", 1988, 4.0, 0.25, "MYTHIC", "16-bit bus"), + "3_4": HardwareEntry("3_4", "Intel 80386SL", "x86", 1990, 4.0, 0.35, "MYTHIC", "Low power"), + "386": HardwareEntry("386", "Intel 80386", "x86", 1985, 4.0, 0.3, "MYTHIC"), + + # AMD Am386 variants + "amd_386": HardwareEntry("amd_386", "AMD Am386", "x86", 1991, 4.0, 0.35, "MYTHIC", "AMD clone"), + + # Cyrix 386 variants + "cyrix_386": HardwareEntry("cyrix_386", "Cyrix Cx486SLC", "x86", 1992, 4.0, 0.4, "MYTHIC", "386 pin-compatible"), + + # ============ LEGENDARY-HIGH TIER (3.8x) - 486 ============ + # Intel 486 (1989) + "4_0": HardwareEntry("4_0", "Intel 486DX", "x86", 1989, 3.8, 0.2, "LEGENDARY", "Integrated FPU"), + "4_1": HardwareEntry("4_1", "Intel 486DX-50", "x86", 1990, 3.8, 0.25, "LEGENDARY", "50MHz variant"), + "4_2": HardwareEntry("4_2", "Intel 486SX", "x86", 1991, 3.8, 0.15, "LEGENDARY", "No FPU"), + "4_3": HardwareEntry("4_3", "Intel 486DX2", "x86", 1992, 3.8, 0.2, "LEGENDARY", "Clock doubled"), + "4_4": HardwareEntry("4_4", "Intel 486SL", "x86", 1992, 3.8, 0.3, "LEGENDARY", "Mobile/low power"), + "4_5": HardwareEntry("4_5", "Intel 486SX2", "x86", 1994, 3.8, 0.2, "LEGENDARY"), + "4_7": HardwareEntry("4_7", "Intel 486DX2-WB", "x86", 1994, 3.8, 0.2, "LEGENDARY", "Write-back cache"), + "4_8": HardwareEntry("4_8", "Intel 486DX4", "x86", 1994, 3.8, 0.2, "LEGENDARY", "Clock tripled"), + "4_9": HardwareEntry("4_9", "Intel 486DX4-WB", "x86", 1994, 3.8, 0.2, "LEGENDARY"), + "486": HardwareEntry("486", "Intel 486", "x86", 1989, 3.8, 0.2, "LEGENDARY"), + + # AMD 486 variants (often higher clocks) + "amd_4_3": HardwareEntry("amd_4_3", "AMD Am486DX2", "x86", 1993, 3.8, 0.25, "LEGENDARY"), + "amd_4_7": HardwareEntry("amd_4_7", "AMD Am486DX4", "x86", 1994, 3.8, 0.25, "LEGENDARY"), + "amd_4_8": HardwareEntry("amd_4_8", "AMD Am5x86", "x86", 1995, 3.8, 0.3, "LEGENDARY", "486 socket, P75 perf"), + "am5x86": HardwareEntry("am5x86", "AMD Am5x86", "x86", 1995, 3.8, 0.3, "LEGENDARY"), + + # Cyrix 486 variants + "cyrix_4_4": HardwareEntry("cyrix_4_4", "Cyrix Cx486DX2", "x86", 1993, 3.8, 0.35, "LEGENDARY", "Rare"), + "cyrix_4_9": HardwareEntry("cyrix_4_9", "Cyrix Cx5x86", "x86", 1995, 3.8, 0.4, "LEGENDARY", "Rare Cyrix"), + + # ============ LEGENDARY TIER (3.5x) - Pentium 1 ============ + # Intel Pentium (P5) (1993) + "5_1": HardwareEntry("5_1", "Intel Pentium 60/66", "x86", 1993, 3.5, 0.2, "LEGENDARY", "First Pentium"), + "5_2": HardwareEntry("5_2", "Intel Pentium 75-200", "x86", 1994, 3.5, 0.15, "LEGENDARY", "P54C"), + "5_3": HardwareEntry("5_3", "Intel Pentium OverDrive", "x86", 1995, 3.5, 0.3, "LEGENDARY", "Upgrade chip"), + "5_4": HardwareEntry("5_4", "Intel Pentium MMX", "x86", 1997, 3.5, 0.1, "LEGENDARY", "P55C with MMX"), + "5_7": HardwareEntry("5_7", "Intel Pentium MMX Mobile", "x86", 1997, 3.5, 0.2, "LEGENDARY"), + "5_8": HardwareEntry("5_8", "Intel Pentium MMX Mobile", "x86", 1998, 3.5, 0.2, "LEGENDARY"), + "pentium": HardwareEntry("pentium", "Intel Pentium", "x86", 1993, 3.5, 0.15, "LEGENDARY"), + "p5": HardwareEntry("p5", "Intel Pentium P5", "x86", 1993, 3.5, 0.15, "LEGENDARY"), + "p54c": HardwareEntry("p54c", "Intel Pentium P54C", "x86", 1994, 3.5, 0.15, "LEGENDARY"), + "p55c": HardwareEntry("p55c", "Intel Pentium MMX P55C", "x86", 1997, 3.5, 0.1, "LEGENDARY"), + + # AMD K5 (1996) - Pentium competitor + "amd_5_0": HardwareEntry("amd_5_0", "AMD K5 PR75-PR100", "x86", 1996, 3.5, 0.3, "LEGENDARY", "AMD's first x86"), + "amd_5_1": HardwareEntry("amd_5_1", "AMD K5 PR120-PR133", "x86", 1996, 3.5, 0.3, "LEGENDARY"), + "amd_5_2": HardwareEntry("amd_5_2", "AMD K5 PR150-PR200", "x86", 1996, 3.5, 0.3, "LEGENDARY"), + "k5": HardwareEntry("k5", "AMD K5", "x86", 1996, 3.5, 0.3, "LEGENDARY"), + + # Cyrix 6x86 (1996) - Pentium competitor (actually family 5 compatible) + "cyrix_5_2": HardwareEntry("cyrix_5_2", "Cyrix 6x86", "x86", 1996, 3.5, 0.4, "LEGENDARY", "Rare Cyrix"), + "cyrix_5_4": HardwareEntry("cyrix_5_4", "Cyrix 6x86MX", "x86", 1997, 3.5, 0.4, "LEGENDARY", "Rare"), + "6x86": HardwareEntry("6x86", "Cyrix 6x86", "x86", 1996, 3.5, 0.4, "LEGENDARY"), + + # IDT/Centaur WinChip (1997) + "idt_5_4": HardwareEntry("idt_5_4", "IDT WinChip C6", "x86", 1997, 3.5, 0.5, "LEGENDARY", "Very rare"), + "idt_5_8": HardwareEntry("idt_5_8", "IDT WinChip 2", "x86", 1998, 3.5, 0.5, "LEGENDARY", "Very rare"), + "winchip": HardwareEntry("winchip", "IDT WinChip", "x86", 1997, 3.5, 0.5, "LEGENDARY"), + + # NexGen Nx586 (1994) - Very rare + "nexgen_5": HardwareEntry("nexgen_5", "NexGen Nx586", "x86", 1994, 3.5, 0.7, "LEGENDARY", "Extremely rare"), + "nx586": HardwareEntry("nx586", "NexGen Nx586", "x86", 1994, 3.5, 0.7, "LEGENDARY"), + + # ============ LEGENDARY-LOW TIER (3.2x) - Pentium II / Celeron ============ + # Intel Pentium Pro (1995) - Actually family 6 + "6_1": HardwareEntry("6_1", "Intel Pentium Pro", "x86", 1995, 3.2, 0.2, "LEGENDARY", "P6 architecture"), + "ppro": HardwareEntry("ppro", "Intel Pentium Pro", "x86", 1995, 3.2, 0.2, "LEGENDARY"), + + # Intel Pentium II (1997) + "6_3": HardwareEntry("6_3", "Intel Pentium II Klamath", "x86", 1997, 3.2, 0.15, "LEGENDARY", "Slot 1"), + "6_5": HardwareEntry("6_5", "Intel Pentium II Deschutes", "x86", 1998, 3.2, 0.1, "LEGENDARY"), + "pii": HardwareEntry("pii", "Intel Pentium II", "x86", 1997, 3.2, 0.15, "LEGENDARY"), + "p2": HardwareEntry("p2", "Intel Pentium II", "x86", 1997, 3.2, 0.15, "LEGENDARY"), + "klamath": HardwareEntry("klamath", "Intel Pentium II Klamath", "x86", 1997, 3.2, 0.15, "LEGENDARY"), + + # Intel Celeron (1998) + "6_6": HardwareEntry("6_6", "Intel Celeron Mendocino", "x86", 1998, 3.2, 0.1, "LEGENDARY"), + "celeron_slot1": HardwareEntry("celeron_slot1", "Intel Celeron (Slot 1)", "x86", 1998, 3.2, 0.15, "LEGENDARY"), + "mendocino": HardwareEntry("mendocino", "Intel Celeron Mendocino", "x86", 1998, 3.2, 0.1, "LEGENDARY"), + + # AMD K6 (1997) + "amd_6_6": HardwareEntry("amd_6_6", "AMD K6", "x86", 1997, 3.2, 0.2, "LEGENDARY"), + "amd_6_8": HardwareEntry("amd_6_8", "AMD K6-2", "x86", 1998, 3.2, 0.15, "LEGENDARY", "3DNow!"), + "amd_6_9": HardwareEntry("amd_6_9", "AMD K6-III", "x86", 1999, 3.2, 0.2, "LEGENDARY", "Triple cache"), + "k6": HardwareEntry("k6", "AMD K6", "x86", 1997, 3.2, 0.2, "LEGENDARY"), + "k6-2": HardwareEntry("k6-2", "AMD K6-2", "x86", 1998, 3.2, 0.15, "LEGENDARY"), + "k6-3": HardwareEntry("k6-3", "AMD K6-III", "x86", 1999, 3.2, 0.2, "LEGENDARY"), + + # ============ LEGENDARY-LOW TIER (3.0x) - Pentium III / Athlon ============ + # Intel Pentium III (1999) + "6_7": HardwareEntry("6_7", "Intel Pentium III Katmai", "x86", 1999, 3.0, 0.1, "LEGENDARY", "SSE"), + "6_8": HardwareEntry("6_8", "Intel Pentium III Coppermine", "x86", 1999, 3.0, 0.05, "LEGENDARY"), + "6_10": HardwareEntry("6_10", "Intel Pentium III Coppermine-T", "x86", 2000, 3.0, 0.05, "LEGENDARY"), + "6_11": HardwareEntry("6_11", "Intel Pentium III Tualatin", "x86", 2001, 3.0, 0.1, "LEGENDARY"), + "piii": HardwareEntry("piii", "Intel Pentium III", "x86", 1999, 3.0, 0.1, "LEGENDARY"), + "p3": HardwareEntry("p3", "Intel Pentium III", "x86", 1999, 3.0, 0.1, "LEGENDARY"), + "katmai": HardwareEntry("katmai", "Intel Pentium III Katmai", "x86", 1999, 3.0, 0.1, "LEGENDARY"), + "coppermine": HardwareEntry("coppermine", "Intel Pentium III Coppermine", "x86", 1999, 3.0, 0.05, "LEGENDARY"), + "tualatin": HardwareEntry("tualatin", "Intel Pentium III Tualatin", "x86", 2001, 3.0, 0.1, "LEGENDARY"), + + # AMD Athlon (1999) + "amd_6_1": HardwareEntry("amd_6_1", "AMD Athlon (K7)", "x86", 1999, 3.0, 0.1, "LEGENDARY", "Slot A"), + "amd_6_2": HardwareEntry("amd_6_2", "AMD Athlon (K75)", "x86", 1999, 3.0, 0.1, "LEGENDARY"), + "amd_6_4": HardwareEntry("amd_6_4", "AMD Athlon Thunderbird", "x86", 2000, 3.0, 0.05, "LEGENDARY"), + "amd_6_6_xp": HardwareEntry("amd_6_6_xp", "AMD Athlon XP Palomino", "x86", 2001, 3.0, 0.05, "LEGENDARY"), + "amd_6_8_xp": HardwareEntry("amd_6_8_xp", "AMD Athlon XP Thoroughbred", "x86", 2002, 3.0, 0.05, "LEGENDARY"), + "amd_6_10_xp": HardwareEntry("amd_6_10_xp", "AMD Athlon XP Barton", "x86", 2003, 3.0, 0.1, "LEGENDARY", "512K L2"), + "athlon": HardwareEntry("athlon", "AMD Athlon", "x86", 1999, 3.0, 0.1, "LEGENDARY"), + "athlon_xp": HardwareEntry("athlon_xp", "AMD Athlon XP", "x86", 2001, 3.0, 0.05, "LEGENDARY"), + "thunderbird": HardwareEntry("thunderbird", "AMD Athlon Thunderbird", "x86", 2000, 3.0, 0.05, "LEGENDARY"), + "barton": HardwareEntry("barton", "AMD Athlon XP Barton", "x86", 2003, 3.0, 0.1, "LEGENDARY"), + + # VIA C3 (2001) - Rare + "via_6_7": HardwareEntry("via_6_7", "VIA C3 Samuel", "x86", 2001, 3.0, 0.4, "LEGENDARY", "Rare VIA"), + "via_6_8": HardwareEntry("via_6_8", "VIA C3 Ezra", "x86", 2001, 3.0, 0.4, "LEGENDARY", "Rare"), + "via_6_9": HardwareEntry("via_6_9", "VIA C3 Nehemiah", "x86", 2003, 3.0, 0.4, "LEGENDARY", "Rare"), + "c3": HardwareEntry("c3", "VIA C3", "x86", 2001, 3.0, 0.4, "LEGENDARY"), + + # Transmeta Crusoe (2000) - Very rare + "transmeta_5_4": HardwareEntry("transmeta_5_4", "Transmeta Crusoe TM5400", "x86", 2000, 3.0, 0.6, "LEGENDARY", "Code morphing"), + "transmeta_5_5": HardwareEntry("transmeta_5_5", "Transmeta Crusoe TM5600", "x86", 2000, 3.0, 0.6, "LEGENDARY"), + "transmeta_15": HardwareEntry("transmeta_15", "Transmeta Efficeon", "x86", 2003, 3.0, 0.6, "LEGENDARY"), + "crusoe": HardwareEntry("crusoe", "Transmeta Crusoe", "x86", 2000, 3.0, 0.6, "LEGENDARY"), + "efficeon": HardwareEntry("efficeon", "Transmeta Efficeon", "x86", 2003, 3.0, 0.6, "LEGENDARY"), + + # ============ ANCIENT TIER (2.5x) - Pentium 4 / Athlon 64 ============ + # Intel Pentium 4 (2000) + "15_0": HardwareEntry("15_0", "Intel Pentium 4 Willamette", "x86", 2000, 2.5, 0.1, "ANCIENT", "NetBurst"), + "15_1": HardwareEntry("15_1", "Intel Pentium 4 Willamette-2", "x86", 2001, 2.5, 0.1, "ANCIENT"), + "15_2": HardwareEntry("15_2", "Intel Pentium 4 Northwood", "x86", 2002, 2.5, 0.05, "ANCIENT", "130nm"), + "15_3": HardwareEntry("15_3", "Intel Pentium 4 Prescott", "x86", 2004, 2.5, 0.05, "ANCIENT", "90nm"), + "15_4": HardwareEntry("15_4", "Intel Pentium 4 Prescott-2M", "x86", 2005, 2.5, 0.05, "ANCIENT"), + "15_6": HardwareEntry("15_6", "Intel Pentium D", "x86", 2005, 2.5, 0.1, "ANCIENT", "Dual Prescott"), + "p4": HardwareEntry("p4", "Intel Pentium 4", "x86", 2000, 2.5, 0.05, "ANCIENT"), + "pentium4": HardwareEntry("pentium4", "Intel Pentium 4", "x86", 2000, 2.5, 0.05, "ANCIENT"), + "willamette": HardwareEntry("willamette", "Intel Pentium 4 Willamette", "x86", 2000, 2.5, 0.1, "ANCIENT"), + "northwood": HardwareEntry("northwood", "Intel Pentium 4 Northwood", "x86", 2002, 2.5, 0.05, "ANCIENT"), + "prescott": HardwareEntry("prescott", "Intel Pentium 4 Prescott", "x86", 2004, 2.5, 0.05, "ANCIENT"), + + # Intel Pentium M (2003) + "6_9": HardwareEntry("6_9", "Intel Pentium M Banias", "x86", 2003, 2.5, 0.15, "ANCIENT", "Mobile P6"), + "6_13": HardwareEntry("6_13", "Intel Pentium M Dothan", "x86", 2004, 2.5, 0.1, "ANCIENT"), + "pentium_m": HardwareEntry("pentium_m", "Intel Pentium M", "x86", 2003, 2.5, 0.1, "ANCIENT"), + "banias": HardwareEntry("banias", "Intel Pentium M Banias", "x86", 2003, 2.5, 0.15, "ANCIENT"), + "dothan": HardwareEntry("dothan", "Intel Pentium M Dothan", "x86", 2004, 2.5, 0.1, "ANCIENT"), + + # AMD Athlon 64 (2003) + "amd_15_4": HardwareEntry("amd_15_4", "AMD Athlon 64 Clawhammer", "x86", 2003, 2.5, 0.1, "ANCIENT", "x86-64"), + "amd_15_5": HardwareEntry("amd_15_5", "AMD Opteron", "x86", 2003, 2.5, 0.15, "ANCIENT", "Server"), + "amd_15_7": HardwareEntry("amd_15_7", "AMD Athlon 64 San Diego", "x86", 2005, 2.5, 0.1, "ANCIENT"), + "amd_15_11": HardwareEntry("amd_15_11", "AMD Athlon 64 Orleans", "x86", 2006, 2.5, 0.05, "ANCIENT"), + "amd_15_35": HardwareEntry("amd_15_35", "AMD Athlon 64 X2", "x86", 2005, 2.5, 0.1, "ANCIENT", "Dual core"), + "athlon64": HardwareEntry("athlon64", "AMD Athlon 64", "x86", 2003, 2.5, 0.1, "ANCIENT"), + "athlon64_x2": HardwareEntry("athlon64_x2", "AMD Athlon 64 X2", "x86", 2005, 2.5, 0.1, "ANCIENT"), + "opteron": HardwareEntry("opteron", "AMD Opteron", "x86", 2003, 2.5, 0.15, "ANCIENT"), + + # ============ ANCIENT TIER (2.0x) - Core Duo / Early Core ============ + # Intel Core (2006) + "6_14": HardwareEntry("6_14", "Intel Core Yonah", "x86", 2006, 2.0, 0.1, "ANCIENT", "Core Duo/Solo"), + "core_duo": HardwareEntry("core_duo", "Intel Core Duo", "x86", 2006, 2.0, 0.1, "ANCIENT"), + "core_solo": HardwareEntry("core_solo", "Intel Core Solo", "x86", 2006, 2.0, 0.1, "ANCIENT"), + "yonah": HardwareEntry("yonah", "Intel Core Yonah", "x86", 2006, 2.0, 0.1, "ANCIENT"), + + # Intel Pentium D + "pentium_d": HardwareEntry("pentium_d", "Intel Pentium D", "x86", 2005, 2.0, 0.1, "ANCIENT"), + + # AMD Athlon X2 (socket 939/AM2) + "amd_15_67": HardwareEntry("amd_15_67", "AMD Athlon X2 Brisbane", "x86", 2007, 2.0, 0.05, "ANCIENT"), + + # ============ VINTAGE TIER (1.5x) - Core 2 ============ + # Intel Core 2 (2006) + "6_15": HardwareEntry("6_15", "Intel Core 2 Merom/Conroe", "x86", 2006, 1.5, 0.05, "VINTAGE", "Core 2 Duo"), + "6_22": HardwareEntry("6_22", "Intel Core 2 Merom-L", "x86", 2007, 1.5, 0.05, "VINTAGE"), + "6_23": HardwareEntry("6_23", "Intel Core 2 Penryn", "x86", 2008, 1.5, 0.05, "VINTAGE", "45nm"), + "6_29": HardwareEntry("6_29", "Intel Xeon Dunnington", "x86", 2008, 1.5, 0.1, "VINTAGE", "6-core"), + "core2": HardwareEntry("core2", "Intel Core 2", "x86", 2006, 1.5, 0.05, "VINTAGE"), + "core2_duo": HardwareEntry("core2_duo", "Intel Core 2 Duo", "x86", 2006, 1.5, 0.05, "VINTAGE"), + "core2_quad": HardwareEntry("core2_quad", "Intel Core 2 Quad", "x86", 2007, 1.5, 0.05, "VINTAGE"), + "conroe": HardwareEntry("conroe", "Intel Core 2 Conroe", "x86", 2006, 1.5, 0.05, "VINTAGE"), + "merom": HardwareEntry("merom", "Intel Core 2 Merom", "x86", 2006, 1.5, 0.05, "VINTAGE"), + "penryn": HardwareEntry("penryn", "Intel Core 2 Penryn", "x86", 2008, 1.5, 0.05, "VINTAGE"), + + # AMD Phenom (2007) + "amd_16_2": HardwareEntry("amd_16_2", "AMD Phenom X4 Agena", "x86", 2007, 1.5, 0.1, "VINTAGE"), + "amd_16_4": HardwareEntry("amd_16_4", "AMD Phenom II X4 Deneb", "x86", 2009, 1.5, 0.05, "VINTAGE"), + "amd_16_6": HardwareEntry("amd_16_6", "AMD Phenom II X6 Thuban", "x86", 2010, 1.5, 0.1, "VINTAGE", "6-core"), + "phenom": HardwareEntry("phenom", "AMD Phenom", "x86", 2007, 1.5, 0.1, "VINTAGE"), + "phenom_ii": HardwareEntry("phenom_ii", "AMD Phenom II", "x86", 2009, 1.5, 0.05, "VINTAGE"), + + # AMD FX (2011) + "amd_21_1": HardwareEntry("amd_21_1", "AMD FX Bulldozer", "x86", 2011, 1.5, 0.1, "VINTAGE"), + "amd_21_2": HardwareEntry("amd_21_2", "AMD FX Piledriver", "x86", 2012, 1.5, 0.1, "VINTAGE"), + "fx": HardwareEntry("fx", "AMD FX", "x86", 2011, 1.5, 0.1, "VINTAGE"), + "bulldozer": HardwareEntry("bulldozer", "AMD FX Bulldozer", "x86", 2011, 1.5, 0.1, "VINTAGE"), + "piledriver": HardwareEntry("piledriver", "AMD FX Piledriver", "x86", 2012, 1.5, 0.1, "VINTAGE"), + + # ============ STANDARD TIER (1.0x) - Nehalem through Haswell ============ + "6_26": HardwareEntry("6_26", "Intel Core i7 Nehalem", "x86", 2008, 1.0, 0.0, "STANDARD"), + "6_30": HardwareEntry("6_30", "Intel Core i7 Lynnfield", "x86", 2009, 1.0, 0.0, "STANDARD"), + "6_37": HardwareEntry("6_37", "Intel Core Westmere", "x86", 2010, 1.0, 0.0, "STANDARD"), + "6_42": HardwareEntry("6_42", "Intel Core Sandy Bridge", "x86", 2011, 1.0, 0.0, "STANDARD"), + "6_58": HardwareEntry("6_58", "Intel Core Ivy Bridge", "x86", 2012, 1.0, 0.0, "STANDARD"), + "6_60": HardwareEntry("6_60", "Intel Core Haswell", "x86", 2013, 1.0, 0.0, "STANDARD"), + "nehalem": HardwareEntry("nehalem", "Intel Core Nehalem", "x86", 2008, 1.0, 0.0, "STANDARD"), + "sandy_bridge": HardwareEntry("sandy_bridge", "Intel Core Sandy Bridge", "x86", 2011, 1.0, 0.0, "STANDARD"), + "ivy_bridge": HardwareEntry("ivy_bridge", "Intel Core Ivy Bridge", "x86", 2012, 1.0, 0.0, "STANDARD"), + "haswell": HardwareEntry("haswell", "Intel Core Haswell", "x86", 2013, 1.0, 0.0, "STANDARD"), + + # ============ PENALTY TIER (0.8x) - Modern x86-64 ============ + "6_61": HardwareEntry("6_61", "Intel Core Broadwell", "x86", 2014, 0.8, 0.0, "PENALTY"), + "6_78": HardwareEntry("6_78", "Intel Core Skylake", "x86", 2015, 0.8, 0.0, "PENALTY"), + "6_142": HardwareEntry("6_142", "Intel Core Kaby Lake", "x86", 2016, 0.8, 0.0, "PENALTY"), + "6_158": HardwareEntry("6_158", "Intel Core Coffee Lake", "x86", 2017, 0.8, 0.0, "PENALTY"), + "skylake": HardwareEntry("skylake", "Intel Core Skylake", "x86", 2015, 0.8, 0.0, "PENALTY"), + "kaby_lake": HardwareEntry("kaby_lake", "Intel Core Kaby Lake", "x86", 2016, 0.8, 0.0, "PENALTY"), + "coffee_lake": HardwareEntry("coffee_lake", "Intel Core Coffee Lake", "x86", 2017, 0.8, 0.0, "PENALTY"), + "alder_lake": HardwareEntry("alder_lake", "Intel Core Alder Lake", "x86", 2021, 0.8, 0.0, "PENALTY"), + "raptor_lake": HardwareEntry("raptor_lake", "Intel Core Raptor Lake", "x86", 2022, 0.8, 0.0, "PENALTY"), + + # AMD Ryzen (Modern - Penalty) + "amd_23_1": HardwareEntry("amd_23_1", "AMD Ryzen Zen", "x86", 2017, 0.8, 0.0, "PENALTY"), + "amd_23_8": HardwareEntry("amd_23_8", "AMD Ryzen Zen+", "x86", 2018, 0.8, 0.0, "PENALTY"), + "amd_23_49": HardwareEntry("amd_23_49", "AMD Ryzen Zen 2", "x86", 2019, 0.8, 0.0, "PENALTY"), + "amd_25_33": HardwareEntry("amd_25_33", "AMD Ryzen Zen 3", "x86", 2020, 0.8, 0.0, "PENALTY"), + "amd_25_97": HardwareEntry("amd_25_97", "AMD Ryzen Zen 4", "x86", 2022, 0.8, 0.0, "PENALTY"), + "ryzen": HardwareEntry("ryzen", "AMD Ryzen", "x86", 2017, 0.8, 0.0, "PENALTY"), + "zen": HardwareEntry("zen", "AMD Ryzen Zen", "x86", 2017, 0.8, 0.0, "PENALTY"), + "zen2": HardwareEntry("zen2", "AMD Ryzen Zen 2", "x86", 2019, 0.8, 0.0, "PENALTY"), + "zen3": HardwareEntry("zen3", "AMD Ryzen Zen 3", "x86", 2020, 0.8, 0.0, "PENALTY"), + "zen4": HardwareEntry("zen4", "AMD Ryzen Zen 4", "x86", 2022, 0.8, 0.0, "PENALTY"), +} + +# ============================================================================= +# POWERPC PROCESSOR DATABASE (by PVR - Processor Version Register) +# ============================================================================= + +POWERPC_PVR_DATABASE: Dict[str, HardwareEntry] = { + # ============ MYTHIC TIER (4.0x) - POWER1 / PowerPC 601 ============ + "0x0001": HardwareEntry("0x0001", "PowerPC 601", "powerpc", 1993, 4.0, 0.4, "MYTHIC", "First PowerPC"), + "0x0003": HardwareEntry("0x0003", "PowerPC 603", "powerpc", 1994, 3.5, 0.2, "LEGENDARY", "Low power"), + "0x0004": HardwareEntry("0x0004", "PowerPC 604", "powerpc", 1994, 3.5, 0.2, "LEGENDARY", "High performance"), + "0x0006": HardwareEntry("0x0006", "PowerPC 603e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), + "0x0007": HardwareEntry("0x0007", "PowerPC 603ev", "powerpc", 1997, 3.5, 0.15, "LEGENDARY"), + "0x0009": HardwareEntry("0x0009", "PowerPC 604e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), + "0x000A": HardwareEntry("0x000A", "PowerPC 604ev", "powerpc", 1997, 3.5, 0.15, "LEGENDARY"), + "601": HardwareEntry("601", "PowerPC 601", "powerpc", 1993, 4.0, 0.4, "MYTHIC"), + "603": HardwareEntry("603", "PowerPC 603", "powerpc", 1994, 3.5, 0.2, "LEGENDARY"), + "603e": HardwareEntry("603e", "PowerPC 603e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), + "604": HardwareEntry("604", "PowerPC 604", "powerpc", 1994, 3.5, 0.2, "LEGENDARY"), + "604e": HardwareEntry("604e", "PowerPC 604e", "powerpc", 1996, 3.5, 0.15, "LEGENDARY"), + + # ============ LEGENDARY TIER (3.2x) - PowerPC G3 ============ + "0x0008": HardwareEntry("0x0008", "PowerPC 750 (G3)", "powerpc", 1997, 3.2, 0.1, "LEGENDARY", "G3"), + "0x7000": HardwareEntry("0x7000", "PowerPC 750CX (G3)", "powerpc", 1999, 3.2, 0.1, "LEGENDARY"), + "0x7002": HardwareEntry("0x7002", "PowerPC 750CXe (G3)", "powerpc", 2000, 3.2, 0.1, "LEGENDARY"), + "0x7003": HardwareEntry("0x7003", "PowerPC 750FX (G3)", "powerpc", 2002, 3.2, 0.15, "LEGENDARY"), + "0x7004": HardwareEntry("0x7004", "PowerPC 750GX (G3)", "powerpc", 2004, 3.2, 0.15, "LEGENDARY"), + "750": HardwareEntry("750", "PowerPC 750 (G3)", "powerpc", 1997, 3.2, 0.1, "LEGENDARY"), + "g3": HardwareEntry("g3", "PowerPC G3", "powerpc", 1997, 3.2, 0.1, "LEGENDARY"), + "750cx": HardwareEntry("750cx", "PowerPC 750CX (G3)", "powerpc", 1999, 3.2, 0.1, "LEGENDARY"), + "750fx": HardwareEntry("750fx", "PowerPC 750FX (G3)", "powerpc", 2002, 3.2, 0.15, "LEGENDARY"), + "750gx": HardwareEntry("750gx", "PowerPC 750GX (G3)", "powerpc", 2004, 3.2, 0.15, "LEGENDARY"), + + # ============ ANCIENT TIER (2.5x) - PowerPC G4 ============ + "0x000C": HardwareEntry("0x000C", "PowerPC 7400 (G4)", "powerpc", 1999, 2.5, 0.1, "ANCIENT", "AltiVec"), + "0x800C": HardwareEntry("0x800C", "PowerPC 7410 (G4)", "powerpc", 2000, 2.5, 0.1, "ANCIENT"), + "0x8000": HardwareEntry("0x8000", "PowerPC 7450 (G4)", "powerpc", 2001, 2.5, 0.1, "ANCIENT", "Improved G4"), + "0x8001": HardwareEntry("0x8001", "PowerPC 7445 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), + "0x8002": HardwareEntry("0x8002", "PowerPC 7455 (G4)", "powerpc", 2002, 2.5, 0.1, "ANCIENT"), + "0x8003": HardwareEntry("0x8003", "PowerPC 7447 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), + "0x8004": HardwareEntry("0x8004", "PowerPC 7448 (G4)", "powerpc", 2005, 2.5, 0.15, "ANCIENT", "Last G4"), + "7400": HardwareEntry("7400", "PowerPC 7400 (G4)", "powerpc", 1999, 2.5, 0.1, "ANCIENT"), + "7410": HardwareEntry("7410", "PowerPC 7410 (G4)", "powerpc", 2000, 2.5, 0.1, "ANCIENT"), + "7450": HardwareEntry("7450", "PowerPC 7450 (G4)", "powerpc", 2001, 2.5, 0.1, "ANCIENT"), + "7455": HardwareEntry("7455", "PowerPC 7455 (G4)", "powerpc", 2002, 2.5, 0.1, "ANCIENT"), + "7447": HardwareEntry("7447", "PowerPC 7447 (G4)", "powerpc", 2003, 2.5, 0.1, "ANCIENT"), + "7448": HardwareEntry("7448", "PowerPC 7448 (G4)", "powerpc", 2005, 2.5, 0.15, "ANCIENT"), + "g4": HardwareEntry("g4", "PowerPC G4", "powerpc", 1999, 2.5, 0.1, "ANCIENT"), + + # ============ ANCIENT TIER (2.0x) - PowerPC G5 ============ + "0x0039": HardwareEntry("0x0039", "PowerPC 970 (G5)", "powerpc", 2003, 2.0, 0.1, "ANCIENT", "First G5"), + "0x003C": HardwareEntry("0x003C", "PowerPC 970FX (G5)", "powerpc", 2004, 2.0, 0.1, "ANCIENT", "90nm"), + "0x0044": HardwareEntry("0x0044", "PowerPC 970MP (G5)", "powerpc", 2005, 2.0, 0.15, "ANCIENT", "Dual-core"), + "970": HardwareEntry("970", "PowerPC 970 (G5)", "powerpc", 2003, 2.0, 0.1, "ANCIENT"), + "970fx": HardwareEntry("970fx", "PowerPC 970FX (G5)", "powerpc", 2004, 2.0, 0.1, "ANCIENT"), + "970mp": HardwareEntry("970mp", "PowerPC 970MP (G5)", "powerpc", 2005, 2.0, 0.15, "ANCIENT"), + "g5": HardwareEntry("g5", "PowerPC G5", "powerpc", 2003, 2.0, 0.1, "ANCIENT"), + + # ============ RARE POWERPC VARIANTS ============ + # IBM POWER series (Servers) + "power1": HardwareEntry("power1", "IBM POWER1", "powerpc", 1990, 4.0, 0.7, "MYTHIC", "Extremely rare"), + "power2": HardwareEntry("power2", "IBM POWER2", "powerpc", 1993, 4.0, 0.6, "MYTHIC", "Very rare"), + "power3": HardwareEntry("power3", "IBM POWER3", "powerpc", 1998, 3.5, 0.5, "LEGENDARY", "Rare server"), + "power4": HardwareEntry("power4", "IBM POWER4", "powerpc", 2001, 3.0, 0.4, "LEGENDARY", "First GHz"), + "power5": HardwareEntry("power5", "IBM POWER5", "powerpc", 2004, 2.5, 0.3, "ANCIENT"), + + # Freescale/NXP embedded PowerPC + "mpc5xx": HardwareEntry("mpc5xx", "Freescale MPC5xx", "powerpc", 1996, 3.5, 0.5, "LEGENDARY", "Automotive"), + "mpc8xx": HardwareEntry("mpc8xx", "Freescale MPC8xx", "powerpc", 1997, 3.5, 0.4, "LEGENDARY", "Networking"), + "e300": HardwareEntry("e300", "Freescale e300", "powerpc", 2004, 3.0, 0.3, "LEGENDARY"), + "e500": HardwareEntry("e500", "Freescale e500", "powerpc", 2003, 2.5, 0.3, "ANCIENT"), + "e600": HardwareEntry("e600", "Freescale e600", "powerpc", 2005, 2.5, 0.3, "ANCIENT"), + + # AMCC PowerPC + "ppc405": HardwareEntry("ppc405", "AMCC PPC405", "powerpc", 1999, 3.2, 0.4, "LEGENDARY", "Embedded"), + "ppc440": HardwareEntry("ppc440", "AMCC PPC440", "powerpc", 2002, 3.0, 0.3, "LEGENDARY"), + "ppc460": HardwareEntry("ppc460", "AMCC PPC460", "powerpc", 2006, 2.5, 0.3, "ANCIENT"), +} + +# ============================================================================= +# MOTOROLA 68K PROCESSOR DATABASE +# ============================================================================= + +M68K_DATABASE: Dict[str, HardwareEntry] = { + # ============ MYTHIC TIER (4.0x) ============ + "68000": HardwareEntry("68000", "Motorola 68000", "m68k", 1979, 4.0, 0.3, "MYTHIC", "Original Mac/Amiga"), + "68008": HardwareEntry("68008", "Motorola 68008", "m68k", 1982, 4.0, 0.4, "MYTHIC", "8-bit bus variant"), + "68010": HardwareEntry("68010", "Motorola 68010", "m68k", 1982, 4.0, 0.35, "MYTHIC", "Virtual memory"), + "68012": HardwareEntry("68012", "Motorola 68012", "m68k", 1983, 4.0, 0.6, "MYTHIC", "Very rare"), + "68020": HardwareEntry("68020", "Motorola 68020", "m68k", 1984, 4.0, 0.25, "MYTHIC", "32-bit"), + "68030": HardwareEntry("68030", "Motorola 68030", "m68k", 1987, 4.0, 0.2, "MYTHIC", "Integrated MMU"), + + # ============ LEGENDARY-HIGH TIER (3.8x) ============ + "68040": HardwareEntry("68040", "Motorola 68040", "m68k", 1990, 3.8, 0.2, "LEGENDARY", "Integrated FPU"), + "68lc040": HardwareEntry("68lc040", "Motorola 68LC040", "m68k", 1991, 3.8, 0.25, "LEGENDARY", "No FPU"), + "68060": HardwareEntry("68060", "Motorola 68060", "m68k", 1994, 3.8, 0.3, "LEGENDARY", "Final 68K"), + "68lc060": HardwareEntry("68lc060", "Motorola 68LC060", "m68k", 1995, 3.8, 0.35, "LEGENDARY"), + + # ============ RARE VARIANTS ============ + "cpu32": HardwareEntry("cpu32", "Motorola CPU32", "m68k", 1990, 3.8, 0.5, "LEGENDARY", "Embedded 68K"), + "coldfire": HardwareEntry("coldfire", "Freescale ColdFire", "m68k", 1994, 3.5, 0.3, "LEGENDARY", "68K compatible"), + "dragonball": HardwareEntry("dragonball", "Motorola DragonBall", "m68k", 1995, 3.5, 0.4, "LEGENDARY", "Palm PDAs"), +} + +# ============================================================================= +# CLASSIC COMPUTER CHIPSET DATABASE (Amiga, Atari, C64, etc.) +# ============================================================================= + +CLASSIC_CHIPSET_DATABASE: Dict[str, HardwareEntry] = { + # ============ AMIGA CHIPSETS (MYTHIC) ============ + "ocs": HardwareEntry("ocs", "Amiga OCS (Original Chip Set)", "amiga", 1985, 4.0, 0.3, "MYTHIC", "A1000/A500/A2000"), + "ecs": HardwareEntry("ecs", "Amiga ECS (Enhanced Chip Set)", "amiga", 1990, 4.0, 0.25, "MYTHIC", "A500+/A600/A3000"), + "aga": HardwareEntry("aga", "Amiga AGA (Advanced Graphics)", "amiga", 1992, 4.0, 0.2, "MYTHIC", "A1200/A4000"), + "agnus_8361": HardwareEntry("agnus_8361", "Agnus 8361 (PAL-A)", "amiga", 1985, 4.0, 0.35, "MYTHIC"), + "agnus_8367": HardwareEntry("agnus_8367", "Agnus 8367 (NTSC-A)", "amiga", 1985, 4.0, 0.35, "MYTHIC"), + "agnus_8370": HardwareEntry("agnus_8370", "Fat Agnus 8370", "amiga", 1987, 4.0, 0.3, "MYTHIC", "1MB"), + "agnus_8372": HardwareEntry("agnus_8372", "Fat Agnus 8372", "amiga", 1988, 4.0, 0.3, "MYTHIC", "ECS"), + "agnus_8375": HardwareEntry("agnus_8375", "Alice 8375", "amiga", 1992, 4.0, 0.25, "MYTHIC", "AGA"), + "denise_8362": HardwareEntry("denise_8362", "Denise 8362", "amiga", 1985, 4.0, 0.35, "MYTHIC", "OCS"), + "denise_8373": HardwareEntry("denise_8373", "Super Denise 8373", "amiga", 1990, 4.0, 0.3, "MYTHIC", "ECS"), + "lisa_8364": HardwareEntry("lisa_8364", "Lisa 8364", "amiga", 1992, 4.0, 0.25, "MYTHIC", "AGA"), + "paula_8364": HardwareEntry("paula_8364", "Paula 8364", "amiga", 1985, 4.0, 0.35, "MYTHIC", "Sound/IO"), + + # Amiga Accelerator Cards (RARE!) + "blizzard_1230": HardwareEntry("blizzard_1230", "Blizzard 1230 (68030)", "amiga", 1995, 4.0, 0.5, "MYTHIC", "A1200 accelerator"), + "blizzard_1260": HardwareEntry("blizzard_1260", "Blizzard 1260 (68060)", "amiga", 1997, 4.0, 0.6, "MYTHIC", "Very rare"), + "cyberstorm_060": HardwareEntry("cyberstorm_060", "CyberStorm 68060", "amiga", 1996, 4.0, 0.6, "MYTHIC", "A4000 accelerator"), + "apollo_68080": HardwareEntry("apollo_68080", "Apollo 68080 FPGA", "amiga", 2017, 3.5, 0.7, "LEGENDARY", "Modern retro"), + + # ============ ATARI CHIPSETS (MYTHIC) ============ + "shifter": HardwareEntry("shifter", "Atari ST Shifter", "atari", 1985, 4.0, 0.35, "MYTHIC", "Video"), + "glue": HardwareEntry("glue", "Atari ST GLUE", "atari", 1985, 4.0, 0.35, "MYTHIC", "Bus controller"), + "mmu": HardwareEntry("mmu", "Atari ST MMU", "atari", 1985, 4.0, 0.35, "MYTHIC", "Memory management"), + "blitter": HardwareEntry("blitter", "Atari ST BLiTTER", "atari", 1987, 4.0, 0.4, "MYTHIC", "STE/Mega ST"), + "videl": HardwareEntry("videl", "Atari Falcon VIDEL", "atari", 1992, 4.0, 0.5, "MYTHIC", "Falcon030 only"), + "dsp56001": HardwareEntry("dsp56001", "Motorola DSP56001", "atari", 1992, 4.0, 0.5, "MYTHIC", "Falcon030 DSP"), + + # ============ COMMODORE 64/128 (MYTHIC) ============ + "vic_ii": HardwareEntry("vic_ii", "MOS 6569 VIC-II (PAL)", "c64", 1982, 4.0, 0.25, "MYTHIC", "C64 video"), + "vic_ii_ntsc": HardwareEntry("vic_ii_ntsc", "MOS 6567 VIC-II (NTSC)", "c64", 1982, 4.0, 0.25, "MYTHIC"), + "sid_6581": HardwareEntry("sid_6581", "MOS 6581 SID", "c64", 1982, 4.0, 0.3, "MYTHIC", "C64 sound"), + "sid_8580": HardwareEntry("sid_8580", "MOS 8580 SID", "c64", 1986, 4.0, 0.35, "MYTHIC", "C64C sound"), + "cia_6526": HardwareEntry("cia_6526", "MOS 6526 CIA", "c64", 1982, 4.0, 0.25, "MYTHIC", "I/O"), + "pla_906114": HardwareEntry("pla_906114", "MOS 906114-01 PLA", "c64", 1982, 4.0, 0.3, "MYTHIC"), + "vdc_8563": HardwareEntry("vdc_8563", "MOS 8563 VDC", "c64", 1985, 4.0, 0.45, "MYTHIC", "C128 80-col"), + "mmu_8722": HardwareEntry("mmu_8722", "MOS 8722 MMU", "c64", 1985, 4.0, 0.45, "MYTHIC", "C128 only"), + + # ============ APPLE II (MYTHIC) ============ + "iou": HardwareEntry("iou", "Apple IOU", "apple2", 1977, 4.0, 0.4, "MYTHIC", "I/O controller"), + "mmu_apple2": HardwareEntry("mmu_apple2", "Apple II MMU", "apple2", 1983, 4.0, 0.4, "MYTHIC", "IIe/IIc"), + "iigs_mega2": HardwareEntry("iigs_mega2", "Apple IIgs Mega II", "apple2", 1986, 4.0, 0.5, "MYTHIC", "IIgs"), + "iigs_fpi": HardwareEntry("iigs_fpi", "Apple IIgs FPI", "apple2", 1986, 4.0, 0.5, "MYTHIC"), + + # ============ RARE/OBSCURE SYSTEMS (HIGH BONUS) ============ + # Sinclair ZX Spectrum + "ula_spectrum": HardwareEntry("ula_spectrum", "Ferranti ULA", "spectrum", 1982, 4.0, 0.35, "MYTHIC", "ZX Spectrum"), + + # BBC Micro + "bbc_video_ula": HardwareEntry("bbc_video_ula", "BBC Video ULA", "bbc", 1981, 4.0, 0.5, "MYTHIC"), + + # MSX + "v9938": HardwareEntry("v9938", "Yamaha V9938 VDP", "msx", 1985, 4.0, 0.4, "MYTHIC", "MSX2"), + "v9958": HardwareEntry("v9958", "Yamaha V9958 VDP", "msx", 1988, 4.0, 0.45, "MYTHIC", "MSX2+"), + + # TI-99/4A + "tms9900": HardwareEntry("tms9900", "TI TMS9900", "ti99", 1976, 4.0, 0.6, "MYTHIC", "16-bit!"), + "tms9918a": HardwareEntry("tms9918a", "TI TMS9918A VDP", "ti99", 1979, 4.0, 0.5, "MYTHIC"), + + # Tandy/Radio Shack + "coco_sam": HardwareEntry("coco_sam", "TRS-80 CoCo SAM", "tandy", 1980, 4.0, 0.5, "MYTHIC"), + "gime": HardwareEntry("gime", "GIME (CoCo 3)", "tandy", 1986, 4.0, 0.55, "MYTHIC", "Rare"), + + # Acorn Archimedes + "vidc1": HardwareEntry("vidc1", "ARM VIDC1", "acorn", 1987, 4.0, 0.6, "MYTHIC", "Archimedes"), + "memc1": HardwareEntry("memc1", "ARM MEMC1", "acorn", 1987, 4.0, 0.6, "MYTHIC"), + "ioc": HardwareEntry("ioc", "ARM IOC", "acorn", 1987, 4.0, 0.6, "MYTHIC"), +} + +# ============================================================================= +# WORKSTATION/SERVER PROCESSORS (SPARC, PA-RISC, Alpha, MIPS) +# ============================================================================= + +WORKSTATION_DATABASE: Dict[str, HardwareEntry] = { + # ============ DEC ALPHA (LEGENDARY) ============ + "ev4": HardwareEntry("ev4", "DEC Alpha 21064 (EV4)", "alpha", 1992, 3.0, 0.5, "LEGENDARY", "First Alpha"), + "ev45": HardwareEntry("ev45", "DEC Alpha 21064A (EV45)", "alpha", 1994, 3.0, 0.45, "LEGENDARY"), + "ev5": HardwareEntry("ev5", "DEC Alpha 21164 (EV5)", "alpha", 1995, 3.0, 0.4, "LEGENDARY"), + "ev56": HardwareEntry("ev56", "DEC Alpha 21164A (EV56)", "alpha", 1996, 3.0, 0.35, "LEGENDARY"), + "pca56": HardwareEntry("pca56", "DEC Alpha 21164PC (PCA56)", "alpha", 1997, 3.0, 0.4, "LEGENDARY", "Low cost"), + "ev6": HardwareEntry("ev6", "DEC Alpha 21264 (EV6)", "alpha", 1998, 3.0, 0.35, "LEGENDARY"), + "ev67": HardwareEntry("ev67", "DEC Alpha 21264A (EV67)", "alpha", 1999, 3.0, 0.3, "LEGENDARY"), + "ev68": HardwareEntry("ev68", "DEC Alpha 21264C (EV68)", "alpha", 2001, 3.0, 0.35, "LEGENDARY"), + "ev7": HardwareEntry("ev7", "DEC Alpha 21364 (EV7)", "alpha", 2003, 3.0, 0.5, "LEGENDARY", "Final Alpha"), + "alpha": HardwareEntry("alpha", "DEC Alpha", "alpha", 1992, 3.0, 0.4, "LEGENDARY"), + + # ============ SUN SPARC (LEGENDARY) ============ + "sparc_v7": HardwareEntry("sparc_v7", "SPARC V7", "sparc", 1987, 3.0, 0.5, "LEGENDARY", "Sun-4"), + "sparc_v8": HardwareEntry("sparc_v8", "SPARC V8 (SuperSPARC)", "sparc", 1992, 3.0, 0.4, "LEGENDARY"), + "ultrasparc_i": HardwareEntry("ultrasparc_i", "UltraSPARC I", "sparc", 1995, 3.0, 0.35, "LEGENDARY"), + "ultrasparc_ii": HardwareEntry("ultrasparc_ii", "UltraSPARC II", "sparc", 1997, 3.0, 0.3, "LEGENDARY"), + "ultrasparc_iii": HardwareEntry("ultrasparc_iii", "UltraSPARC III", "sparc", 2001, 2.5, 0.3, "ANCIENT"), + "ultrasparc_iv": HardwareEntry("ultrasparc_iv", "UltraSPARC IV", "sparc", 2004, 2.5, 0.25, "ANCIENT"), + "sparc64": HardwareEntry("sparc64", "Fujitsu SPARC64", "sparc", 1995, 3.0, 0.4, "LEGENDARY"), + "sparc": HardwareEntry("sparc", "SPARC", "sparc", 1987, 3.0, 0.4, "LEGENDARY"), + + # ============ HP PA-RISC (LEGENDARY) ============ + "pa7000": HardwareEntry("pa7000", "HP PA-7000", "parisc", 1991, 3.0, 0.5, "LEGENDARY"), + "pa7100": HardwareEntry("pa7100", "HP PA-7100", "parisc", 1992, 3.0, 0.45, "LEGENDARY"), + "pa7200": HardwareEntry("pa7200", "HP PA-7200", "parisc", 1994, 3.0, 0.4, "LEGENDARY"), + "pa8000": HardwareEntry("pa8000", "HP PA-8000", "parisc", 1996, 3.0, 0.35, "LEGENDARY"), + "pa8200": HardwareEntry("pa8200", "HP PA-8200", "parisc", 1997, 3.0, 0.35, "LEGENDARY"), + "pa8500": HardwareEntry("pa8500", "HP PA-8500", "parisc", 1998, 3.0, 0.35, "LEGENDARY"), + "pa8600": HardwareEntry("pa8600", "HP PA-8600", "parisc", 2000, 2.5, 0.35, "ANCIENT"), + "pa8700": HardwareEntry("pa8700", "HP PA-8700", "parisc", 2001, 2.5, 0.35, "ANCIENT"), + "pa8800": HardwareEntry("pa8800", "HP PA-8800", "parisc", 2003, 2.5, 0.4, "ANCIENT", "Final PA-RISC"), + "parisc": HardwareEntry("parisc", "HP PA-RISC", "parisc", 1986, 3.0, 0.4, "LEGENDARY"), + + # ============ SGI MIPS (LEGENDARY) ============ + "r2000": HardwareEntry("r2000", "MIPS R2000", "mips", 1985, 3.5, 0.5, "LEGENDARY", "First MIPS"), + "r3000": HardwareEntry("r3000", "MIPS R3000", "mips", 1988, 3.5, 0.45, "LEGENDARY"), + "r4000": HardwareEntry("r4000", "MIPS R4000", "mips", 1991, 3.0, 0.4, "LEGENDARY", "64-bit"), + "r4400": HardwareEntry("r4400", "MIPS R4400", "mips", 1992, 3.0, 0.35, "LEGENDARY"), + "r4600": HardwareEntry("r4600", "MIPS R4600 Orion", "mips", 1994, 3.0, 0.3, "LEGENDARY"), + "r5000": HardwareEntry("r5000", "MIPS R5000", "mips", 1996, 3.0, 0.3, "LEGENDARY"), + "r8000": HardwareEntry("r8000", "MIPS R8000", "mips", 1994, 3.0, 0.5, "LEGENDARY", "Superscalar"), + "r10000": HardwareEntry("r10000", "MIPS R10000", "mips", 1996, 3.0, 0.35, "LEGENDARY"), + "r12000": HardwareEntry("r12000", "MIPS R12000", "mips", 1998, 3.0, 0.35, "LEGENDARY"), + "r14000": HardwareEntry("r14000", "MIPS R14000", "mips", 2001, 2.5, 0.35, "ANCIENT"), + "r16000": HardwareEntry("r16000", "MIPS R16000", "mips", 2002, 2.5, 0.4, "ANCIENT", "Final SGI MIPS"), + "mips": HardwareEntry("mips", "MIPS", "mips", 1985, 3.0, 0.4, "LEGENDARY"), + + # ============ IBM mainframes (VERY RARE) ============ + "s390": HardwareEntry("s390", "IBM S/390", "ibm", 1990, 3.0, 0.8, "LEGENDARY", "Mainframe"), + "z900": HardwareEntry("z900", "IBM zSeries z900", "ibm", 2000, 2.5, 0.6, "ANCIENT"), + "z990": HardwareEntry("z990", "IBM zSeries z990", "ibm", 2003, 2.5, 0.5, "ANCIENT"), +} + +# ============================================================================= +# ARM PROCESSORS (Vintage through Modern) +# ============================================================================= + +ARM_DATABASE: Dict[str, HardwareEntry] = { + # ============ LEGENDARY TIER (3.0x) - Early ARM ============ + "arm2": HardwareEntry("arm2", "ARM2", "arm", 1987, 4.0, 0.6, "MYTHIC", "Acorn Archimedes"), + "arm3": HardwareEntry("arm3", "ARM3", "arm", 1989, 4.0, 0.5, "MYTHIC"), + "arm6": HardwareEntry("arm6", "ARM6/ARM610", "arm", 1992, 3.5, 0.4, "LEGENDARY"), + "arm7": HardwareEntry("arm7", "ARM7", "arm", 1994, 3.5, 0.3, "LEGENDARY"), + "arm7tdmi": HardwareEntry("arm7tdmi", "ARM7TDMI", "arm", 1995, 3.5, 0.25, "LEGENDARY", "GBA"), + "strongarm": HardwareEntry("strongarm", "StrongARM SA-110", "arm", 1996, 3.0, 0.3, "LEGENDARY", "DEC/Intel"), + "sa1100": HardwareEntry("sa1100", "StrongARM SA-1100", "arm", 1998, 3.0, 0.3, "LEGENDARY", "iPAQ"), + "xscale": HardwareEntry("xscale", "Intel XScale", "arm", 2000, 2.5, 0.25, "ANCIENT", "PDAs"), + + # ============ ANCIENT TIER (2.0-2.5x) - ARM9/ARM11 ============ + "arm9": HardwareEntry("arm9", "ARM9", "arm", 1998, 2.5, 0.2, "ANCIENT"), + "arm926ej": HardwareEntry("arm926ej", "ARM926EJ-S", "arm", 2001, 2.5, 0.2, "ANCIENT"), + "arm11": HardwareEntry("arm11", "ARM11", "arm", 2003, 2.0, 0.15, "ANCIENT", "iPhone 1"), + "arm1176": HardwareEntry("arm1176", "ARM1176JZF-S", "arm", 2003, 2.0, 0.15, "ANCIENT", "RPi 1"), + + # ============ VINTAGE TIER (1.5x) - Cortex-A ============ + "cortex_a8": HardwareEntry("cortex_a8", "ARM Cortex-A8", "arm", 2005, 1.5, 0.1, "VINTAGE", "iPhone 3GS"), + "cortex_a9": HardwareEntry("cortex_a9", "ARM Cortex-A9", "arm", 2007, 1.5, 0.05, "VINTAGE"), + "cortex_a15": HardwareEntry("cortex_a15", "ARM Cortex-A15", "arm", 2010, 1.5, 0.05, "VINTAGE"), + + # ============ PENALTY TIER (0.8x) - Modern ARM ============ + "cortex_a53": HardwareEntry("cortex_a53", "ARM Cortex-A53", "arm", 2012, 1.0, 0.0, "STANDARD"), + "cortex_a72": HardwareEntry("cortex_a72", "ARM Cortex-A72", "arm", 2015, 0.8, 0.0, "PENALTY"), + "cortex_a76": HardwareEntry("cortex_a76", "ARM Cortex-A76", "arm", 2018, 0.8, 0.0, "PENALTY"), + "cortex_x1": HardwareEntry("cortex_x1", "ARM Cortex-X1", "arm", 2020, 0.8, 0.0, "PENALTY"), + + # Apple Silicon (PENALTY) + "m1": HardwareEntry("m1", "Apple M1", "arm", 2020, 0.8, 0.0, "PENALTY", "Modern ARM"), + "m1_pro": HardwareEntry("m1_pro", "Apple M1 Pro", "arm", 2021, 0.8, 0.0, "PENALTY"), + "m1_max": HardwareEntry("m1_max", "Apple M1 Max", "arm", 2021, 0.8, 0.0, "PENALTY"), + "m1_ultra": HardwareEntry("m1_ultra", "Apple M1 Ultra", "arm", 2022, 0.8, 0.0, "PENALTY"), + "m2": HardwareEntry("m2", "Apple M2", "arm", 2022, 0.8, 0.0, "PENALTY"), + "m2_pro": HardwareEntry("m2_pro", "Apple M2 Pro", "arm", 2023, 0.8, 0.0, "PENALTY"), + "m2_max": HardwareEntry("m2_max", "Apple M2 Max", "arm", 2023, 0.8, 0.0, "PENALTY"), + "m3": HardwareEntry("m3", "Apple M3", "arm", 2023, 0.8, 0.0, "PENALTY"), + "m3_pro": HardwareEntry("m3_pro", "Apple M3 Pro", "arm", 2023, 0.8, 0.0, "PENALTY"), + "m3_max": HardwareEntry("m3_max", "Apple M3 Max", "arm", 2023, 0.8, 0.0, "PENALTY"), + "apple_silicon": HardwareEntry("apple_silicon", "Apple Silicon", "arm", 2020, 0.8, 0.0, "PENALTY"), +} + +# ============================================================================= +# VINTAGE GRAPHICS CARDS (BONUS MULTIPLIERS!) +# ============================================================================= + +GRAPHICS_DATABASE: Dict[str, HardwareEntry] = { + # ============ MYTHIC/LEGENDARY GRAPHICS ============ + # 3dfx Voodoo (MYTHIC!) + "voodoo1": HardwareEntry("voodoo1", "3dfx Voodoo Graphics", "gpu", 1996, 0.0, 0.5, "MYTHIC", "First 3D accelerator"), + "voodoo2": HardwareEntry("voodoo2", "3dfx Voodoo2", "gpu", 1998, 0.0, 0.4, "MYTHIC", "SLI!"), + "voodoo_banshee": HardwareEntry("voodoo_banshee", "3dfx Voodoo Banshee", "gpu", 1998, 0.0, 0.35, "LEGENDARY"), + "voodoo3": HardwareEntry("voodoo3", "3dfx Voodoo3", "gpu", 1999, 0.0, 0.3, "LEGENDARY"), + "voodoo4": HardwareEntry("voodoo4", "3dfx Voodoo4", "gpu", 2000, 0.0, 0.4, "LEGENDARY", "Rare"), + "voodoo5": HardwareEntry("voodoo5", "3dfx Voodoo5", "gpu", 2000, 0.0, 0.5, "LEGENDARY", "Very rare"), + "voodoo5_6000": HardwareEntry("voodoo5_6000", "3dfx Voodoo5 6000", "gpu", 2000, 0.0, 0.9, "LEGENDARY", "Extremely rare"), + + # S3 (MYTHIC/LEGENDARY) + "virge": HardwareEntry("virge", "S3 ViRGE", "gpu", 1995, 0.0, 0.35, "MYTHIC", "First consumer 3D"), + "virge_dx": HardwareEntry("virge_dx", "S3 ViRGE/DX", "gpu", 1996, 0.0, 0.3, "MYTHIC"), + "savage3d": HardwareEntry("savage3d", "S3 Savage3D", "gpu", 1998, 0.0, 0.3, "LEGENDARY"), + "savage4": HardwareEntry("savage4", "S3 Savage4", "gpu", 1999, 0.0, 0.25, "LEGENDARY"), + "savage2000": HardwareEntry("savage2000", "S3 Savage2000", "gpu", 1999, 0.0, 0.35, "LEGENDARY", "Rare"), + + # ATI Rage (LEGENDARY) + "rage_pro": HardwareEntry("rage_pro", "ATI Rage Pro", "gpu", 1997, 0.0, 0.25, "LEGENDARY"), + "rage_128": HardwareEntry("rage_128", "ATI Rage 128", "gpu", 1999, 0.0, 0.2, "LEGENDARY"), + "rage_fury": HardwareEntry("rage_fury", "ATI Rage Fury MAXX", "gpu", 1999, 0.0, 0.4, "LEGENDARY", "Dual GPU"), + "radeon_ddr": HardwareEntry("radeon_ddr", "ATI Radeon DDR", "gpu", 2000, 0.0, 0.2, "LEGENDARY"), + "radeon_7200": HardwareEntry("radeon_7200", "ATI Radeon 7200", "gpu", 2001, 0.0, 0.15, "LEGENDARY"), + + # NVIDIA (LEGENDARY/ANCIENT) + "riva_128": HardwareEntry("riva_128", "NVIDIA RIVA 128", "gpu", 1997, 0.0, 0.35, "LEGENDARY"), + "riva_tnt": HardwareEntry("riva_tnt", "NVIDIA RIVA TNT", "gpu", 1998, 0.0, 0.3, "LEGENDARY"), + "tnt2": HardwareEntry("tnt2", "NVIDIA TNT2", "gpu", 1999, 0.0, 0.25, "LEGENDARY"), + "geforce_256": HardwareEntry("geforce_256", "NVIDIA GeForce 256", "gpu", 1999, 0.0, 0.25, "LEGENDARY", "First GeForce"), + "geforce2": HardwareEntry("geforce2", "NVIDIA GeForce2", "gpu", 2000, 0.0, 0.2, "LEGENDARY"), + "geforce3": HardwareEntry("geforce3", "NVIDIA GeForce3", "gpu", 2001, 0.0, 0.15, "ANCIENT"), + "geforce4": HardwareEntry("geforce4", "NVIDIA GeForce4", "gpu", 2002, 0.0, 0.15, "ANCIENT"), + + # Matrox (RARE!) + "millennium": HardwareEntry("millennium", "Matrox Millennium", "gpu", 1995, 0.0, 0.5, "LEGENDARY", "Professional"), + "mystique": HardwareEntry("mystique", "Matrox Mystique", "gpu", 1996, 0.0, 0.4, "LEGENDARY"), + "g200": HardwareEntry("g200", "Matrox G200", "gpu", 1998, 0.0, 0.35, "LEGENDARY"), + "g400": HardwareEntry("g400", "Matrox G400", "gpu", 1999, 0.0, 0.35, "LEGENDARY", "Best 2D"), + "parhelia": HardwareEntry("parhelia", "Matrox Parhelia", "gpu", 2002, 0.0, 0.5, "LEGENDARY", "Triple-head"), + + # Number Nine (VERY RARE!) + "imagine_128": HardwareEntry("imagine_128", "Number Nine Imagine 128", "gpu", 1995, 0.0, 0.6, "LEGENDARY", "Very rare"), + "revolution_3d": HardwareEntry("revolution_3d", "Number Nine Revolution 3D", "gpu", 1997, 0.0, 0.7, "LEGENDARY", "Extremely rare"), + "revolution_iv": HardwareEntry("revolution_iv", "Number Nine Revolution IV", "gpu", 1998, 0.0, 0.7, "LEGENDARY"), + + # Rendition (MYTHIC - VERY RARE!) + "verite_v1000": HardwareEntry("verite_v1000", "Rendition Verite V1000", "gpu", 1995, 0.0, 0.7, "MYTHIC", "Extremely rare"), + "verite_v2100": HardwareEntry("verite_v2100", "Rendition Verite V2100", "gpu", 1997, 0.0, 0.6, "MYTHIC", "Very rare"), + "verite_v2200": HardwareEntry("verite_v2200", "Rendition Verite V2200", "gpu", 1998, 0.0, 0.6, "MYTHIC", "Very rare"), + + # PowerVR (RARE!) + "pcx1": HardwareEntry("pcx1", "NEC PowerVR PCX1", "gpu", 1996, 0.0, 0.6, "LEGENDARY", "Tile-based"), + "pcx2": HardwareEntry("pcx2", "NEC PowerVR PCX2", "gpu", 1997, 0.0, 0.5, "LEGENDARY"), + "kyro": HardwareEntry("kyro", "PowerVR Kyro", "gpu", 2000, 0.0, 0.4, "LEGENDARY"), + "kyro_ii": HardwareEntry("kyro_ii", "PowerVR Kyro II", "gpu", 2001, 0.0, 0.35, "LEGENDARY"), +} + + +# ============================================================================= +# HARDWARE LOOKUP FUNCTIONS +# ============================================================================= + +def normalize_id(hw_id: str) -> str: + """Normalize hardware ID for lookup""" + return hw_id.lower().strip().replace(" ", "_").replace("-", "_") + +def lookup_hardware(hw_id: str, family: Optional[str] = None) -> Optional[HardwareEntry]: + """ + Look up hardware by ID with optional family hint. + Returns the HardwareEntry if found, None otherwise. + """ + norm_id = normalize_id(hw_id) + + # Try specific databases based on family hint + databases = [] + if family: + family_lower = family.lower() + if "x86" in family_lower or "intel" in family_lower or "amd" in family_lower: + databases.append(X86_CPUID_DATABASE) + elif "powerpc" in family_lower or "ppc" in family_lower: + databases.append(POWERPC_PVR_DATABASE) + elif "m68k" in family_lower or "68" in family_lower or "motorola" in family_lower: + databases.append(M68K_DATABASE) + elif "arm" in family_lower or "apple" in family_lower: + databases.append(ARM_DATABASE) + elif any(x in family_lower for x in ["sparc", "alpha", "mips", "parisc", "ibm"]): + databases.append(WORKSTATION_DATABASE) + elif any(x in family_lower for x in ["amiga", "atari", "c64", "commodore", "apple2", "spectrum", "msx"]): + databases.append(CLASSIC_CHIPSET_DATABASE) + elif any(x in family_lower for x in ["gpu", "voodoo", "geforce", "radeon", "matrox"]): + databases.append(GRAPHICS_DATABASE) + + # Add all databases as fallback + databases.extend([ + X86_CPUID_DATABASE, + POWERPC_PVR_DATABASE, + M68K_DATABASE, + ARM_DATABASE, + WORKSTATION_DATABASE, + CLASSIC_CHIPSET_DATABASE, + GRAPHICS_DATABASE, + ]) + + # Search through databases + for db in databases: + if norm_id in db: + return db[norm_id] + + # Try partial matching for common variants + for key, entry in db.items(): + if norm_id in key or key in norm_id: + return entry + + return None + +def calculate_poa_multiplier( + device_family: str, + device_arch: str, + device_model: Optional[str] = None, + chipset_ids: Optional[List[str]] = None, + gpu_id: Optional[str] = None, +) -> Tuple[float, str, float, str]: + """ + Calculate PoA multiplier based on hardware detection. + + Returns: + Tuple of (base_multiplier, tier_name, rarity_bonus, hardware_name) + """ + family_lower = device_family.lower() if device_family else "" + arch_lower = device_arch.lower() if device_arch else "" + model_lower = device_model.lower() if device_model else "" + + # Default values + base_mult = 1.0 + tier = "STANDARD" + rarity = 0.0 + hw_name = "Unknown Hardware" + + # Try to look up the exact hardware + entry = None + + # Try arch first + if device_arch: + entry = lookup_hardware(device_arch, device_family) + + # Try model if no match + if not entry and device_model: + entry = lookup_hardware(device_model, device_family) + + # Try chipset IDs + if not entry and chipset_ids: + for chip_id in chipset_ids: + entry = lookup_hardware(chip_id, device_family) + if entry: + break + + # If found in database, use those values + if entry: + base_mult = entry.base_multiplier + tier = entry.tier + rarity = entry.rarity_bonus + hw_name = entry.name + else: + # Fallback to family-based detection + if "m68k" in family_lower or "68" in arch_lower or "motorola" in family_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.2, "Motorola 68K" + elif "amiga" in family_lower or "amiga" in arch_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.3, "Amiga" + elif "atari" in family_lower or "atari" in arch_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.35, "Atari ST" + elif "c64" in family_lower or "commodore" in family_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.25, "Commodore 64" + elif "386" in arch_lower or "i386" in arch_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.3, "Intel 386" + elif "286" in arch_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.4, "Intel 286" + elif "486" in arch_lower or "i486" in arch_lower: + base_mult, tier, rarity, hw_name = 3.8, "LEGENDARY", 0.2, "Intel 486" + elif "pentium" in arch_lower and any(x in arch_lower for x in ["mmx", "p5", "p54", "p55", " 1", "_1"]): + base_mult, tier, rarity, hw_name = 3.5, "LEGENDARY", 0.15, "Pentium 1" + elif "pentium" in arch_lower and any(x in arch_lower for x in [" 2", "_2", "ii", "klamath", "deschutes"]): + base_mult, tier, rarity, hw_name = 3.2, "LEGENDARY", 0.1, "Pentium II" + elif "pentium" in arch_lower and any(x in arch_lower for x in [" 3", "_3", "iii", "katmai", "coppermine"]): + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.1, "Pentium III" + elif "pentium" in arch_lower and any(x in arch_lower for x in [" 4", "_4", "iv", "willamette", "northwood"]): + base_mult, tier, rarity, hw_name = 2.5, "ANCIENT", 0.05, "Pentium 4" + elif "powerpc" in family_lower or "ppc" in family_lower: + if "601" in arch_lower: + base_mult, tier, rarity, hw_name = 4.0, "MYTHIC", 0.4, "PowerPC 601" + elif "603" in arch_lower or "604" in arch_lower: + base_mult, tier, rarity, hw_name = 3.5, "LEGENDARY", 0.15, "PowerPC 603/604" + elif "g3" in arch_lower or "750" in arch_lower: + base_mult, tier, rarity, hw_name = 3.2, "LEGENDARY", 0.1, "PowerPC G3" + elif "g4" in arch_lower or "74" in arch_lower: + base_mult, tier, rarity, hw_name = 2.5, "ANCIENT", 0.1, "PowerPC G4" + elif "g5" in arch_lower or "970" in arch_lower: + base_mult, tier, rarity, hw_name = 2.0, "ANCIENT", 0.1, "PowerPC G5" + else: + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.2, "PowerPC" + elif "alpha" in family_lower: + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "DEC Alpha" + elif "sparc" in family_lower: + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "SPARC" + elif "mips" in family_lower: + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "MIPS" + elif "parisc" in family_lower or "pa-risc" in family_lower: + base_mult, tier, rarity, hw_name = 3.0, "LEGENDARY", 0.4, "PA-RISC" + elif "core2" in arch_lower or "core 2" in arch_lower: + base_mult, tier, rarity, hw_name = 1.5, "VINTAGE", 0.05, "Core 2" + elif "core" in arch_lower and "duo" in arch_lower: + base_mult, tier, rarity, hw_name = 2.0, "ANCIENT", 0.1, "Core Duo" + elif any(x in arch_lower for x in ["m1", "m2", "m3", "apple_silicon", "apple silicon"]): + base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Apple Silicon" + elif "arm" in family_lower and any(x in arch_lower for x in ["aarch64", "armv8", "cortex-a7"]): + base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Modern ARM" + elif any(x in arch_lower for x in ["ryzen", "zen", "skylake", "alder", "raptor"]): + base_mult, tier, rarity, hw_name = 0.8, "PENALTY", 0.0, "Modern x86-64" + + # Check for GPU bonus + if gpu_id: + gpu_entry = lookup_hardware(gpu_id, "gpu") + if gpu_entry and gpu_entry.rarity_bonus > 0: + rarity += gpu_entry.rarity_bonus * 0.5 # 50% of GPU rarity bonus added + + return (base_mult, tier, rarity, hw_name) + +def get_total_multiplier(base_mult: float, rarity_bonus: float) -> float: + """Calculate total multiplier including rarity bonus""" + return base_mult + (base_mult * rarity_bonus) + + +# ============================================================================= +# CONVENIENCE FUNCTIONS FOR RIP SERVICE +# ============================================================================= + +def get_poa_info_for_miner(signals: dict) -> dict: + """ + Process miner attestation signals and return PoA info. + + Args: + signals: Dict containing device info from attestation + + Returns: + Dict with multiplier info for database storage + """ + device = signals.get("device", {}) + device_family = device.get("family", signals.get("device_family", "")) + device_arch = device.get("arch", signals.get("device_arch", "")) + device_model = device.get("model", signals.get("device_model", "")) + + # Get chipset IDs if available + chipset_ids = [] + if "chipset" in signals: + chipset_ids.append(signals["chipset"]) + if "pci_ids" in signals: + chipset_ids.extend(signals["pci_ids"]) + if "cpu_id" in signals: + chipset_ids.append(signals["cpu_id"]) + + # Get GPU ID if available + gpu_id = signals.get("gpu", signals.get("gpu_id")) + + base_mult, tier, rarity, hw_name = calculate_poa_multiplier( + device_family, device_arch, device_model, chipset_ids, gpu_id + ) + + total_mult = get_total_multiplier(base_mult, rarity) + + return { + "antiquity_multiplier": round(total_mult, 2), + "base_multiplier": base_mult, + "rarity_bonus": round(rarity, 3), + "tier": tier, + "hardware_type": hw_name, + "device_family": device_family, + "device_arch": device_arch, + } + + +# ============================================================================= +# STATISTICS AND REPORTING +# ============================================================================= + +def get_database_stats() -> dict: + """Get statistics about the hardware database""" + all_dbs = { + "x86": X86_CPUID_DATABASE, + "powerpc": POWERPC_PVR_DATABASE, + "m68k": M68K_DATABASE, + "classic": CLASSIC_CHIPSET_DATABASE, + "workstation": WORKSTATION_DATABASE, + "arm": ARM_DATABASE, + "graphics": GRAPHICS_DATABASE, + } + + stats = { + "total_entries": 0, + "by_family": {}, + "by_tier": { + "MYTHIC": 0, + "LEGENDARY": 0, + "ANCIENT": 0, + "VINTAGE": 0, + "STANDARD": 0, + "PENALTY": 0, + }, + "rarest_hardware": [], + } + + all_entries = [] + for db_name, db in all_dbs.items(): + stats["by_family"][db_name] = len(db) + stats["total_entries"] += len(db) + + for entry in db.values(): + stats["by_tier"][entry.tier] += 1 + all_entries.append(entry) + + # Find rarest hardware (highest rarity bonus) + all_entries.sort(key=lambda x: x.rarity_bonus, reverse=True) + stats["rarest_hardware"] = [ + {"name": e.name, "rarity": e.rarity_bonus, "tier": e.tier} + for e in all_entries[:20] + ] + + return stats + + +if __name__ == "__main__": + # Print database statistics + stats = get_database_stats() + print("=" * 60) + print("RustChain PoA Hardware Database Statistics") + print("=" * 60) + print(f"\nTotal hardware entries: {stats['total_entries']}") + print("\nBy family:") + for family, count in stats['by_family'].items(): + print(f" {family:15} {count:4} entries") + print("\nBy tier:") + for tier, count in stats['by_tier'].items(): + print(f" {tier:12} {count:4} entries") + print("\nTop 10 rarest hardware (highest bonus):") + for i, hw in enumerate(stats['rarest_hardware'][:10], 1): + print(f" {i:2}. {hw['name']:35} +{hw['rarity']*100:.0f}% ({hw['tier']})") + + # Test some lookups + print("\n" + "=" * 60) + print("Test Lookups") + print("=" * 60) + + test_cases = [ + ("PowerPC", "G4"), + ("x86", "486"), + ("x86", "Pentium"), + ("m68k", "68030"), + ("powerpc", "601"), + ("arm", "m1"), + ("x86", "ryzen"), + ] + + for family, arch in test_cases: + base, tier, rarity, name = calculate_poa_multiplier(family, arch) + total = get_total_multiplier(base, rarity) + print(f"\n{family}/{arch}:") + print(f" Hardware: {name}") + print(f" Tier: {tier}") + print(f" Base: {base}x, Rarity: +{rarity*100:.0f}%, Total: {total:.2f}x") diff --git a/node/rustchain_migration.py b/node/rustchain_migration.py index b6930499..a1bced9f 100644 --- a/node/rustchain_migration.py +++ b/node/rustchain_migration.py @@ -1,647 +1,647 @@ -#!/usr/bin/env python3 -""" -RustChain Testnet to Mainnet Migration Script -============================================== - -Phase 6 Implementation: -- Testnet state snapshot -- Database schema migration -- Premine initialization -- Genesis block creation -- Validation and verification - -Run this script ONCE to migrate from testnet to mainnet. -""" - -import os -import sys -import json -import sqlite3 -import shutil -import time -import logging -import hashlib -from datetime import datetime -from typing import Dict, List, Optional - -# Import mainnet modules -from rustchain_crypto import blake2b256_hex, canonical_json, generate_wallet_keypair -from rustchain_genesis_premine import PremineManager, TOTAL_PREMINE_RTC, FOUNDER_ALLOCATIONS -from rustchain_tx_handler import TransactionPool - -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s [MIGRATE] %(levelname)s: %(message)s' -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# MIGRATION CONFIGURATION -# ============================================================================= - -MIGRATION_VERSION = "2.3.0-mainnet" -GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC (same as testnet) - -# Paths -TESTNET_DB_PATH = os.environ.get("TESTNET_DB", "/root/rustchain/rustchain_v2.db") -MAINNET_DB_PATH = os.environ.get("MAINNET_DB", "/root/rustchain/rustchain_mainnet.db") -BACKUP_DIR = os.environ.get("BACKUP_DIR", "/root/rustchain/backups") - -# Migration flags -PRESERVE_ATTESTATION_HISTORY = True -PRESERVE_MINER_STATS = True -RESET_BALANCES = True # Reset to premine only - - -# ============================================================================= -# MIGRATION STEPS -# ============================================================================= - -class RustChainMigration: - """ - Handles testnet -> mainnet migration. - """ - - def __init__( - self, - testnet_db: str = TESTNET_DB_PATH, - mainnet_db: str = MAINNET_DB_PATH, - backup_dir: str = BACKUP_DIR - ): - self.testnet_db = testnet_db - self.mainnet_db = mainnet_db - self.backup_dir = backup_dir - self.migration_log = [] - self.errors = [] - - def log(self, message: str, level: str = "INFO"): - """Log migration step""" - entry = { - "timestamp": datetime.now().isoformat(), - "level": level, - "message": message - } - self.migration_log.append(entry) - - if level == "ERROR": - logger.error(message) - self.errors.append(message) - elif level == "WARNING": - logger.warning(message) - else: - logger.info(message) - - def pre_flight_checks(self) -> bool: - """Run pre-migration validation""" - self.log("=" * 60) - self.log("PRE-FLIGHT CHECKS") - self.log("=" * 60) - - # Check testnet DB exists - if not os.path.exists(self.testnet_db): - self.log(f"Testnet DB not found: {self.testnet_db}", "ERROR") - return False - self.log(f"Testnet DB found: {self.testnet_db}") - - # Check mainnet DB doesn't exist (prevent accidental overwrite) - if os.path.exists(self.mainnet_db): - self.log(f"Mainnet DB already exists: {self.mainnet_db}", "WARNING") - self.log("Will create backup before overwriting") - - # Check backup directory - os.makedirs(self.backup_dir, exist_ok=True) - self.log(f"Backup directory: {self.backup_dir}") - - # Verify testnet DB integrity - try: - with sqlite3.connect(self.testnet_db) as conn: - cursor = conn.cursor() - - # Check tables exist - cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") - tables = [row[0] for row in cursor.fetchall()] - self.log(f"Testnet tables: {tables}") - - # Check miner attestations - if "miner_attest_recent" in tables: - cursor.execute("SELECT COUNT(*) FROM miner_attest_recent") - count = cursor.fetchone()[0] - self.log(f"Active attestations: {count}") - - # Check balances - if "balances" in tables: - cursor.execute("SELECT COUNT(*), SUM(balance_urtc) FROM balances") - row = cursor.fetchone() - self.log(f"Testnet wallets: {row[0]}, Total balance: {(row[1] or 0) / 100_000_000:.2f} RTC") - - except Exception as e: - self.log(f"Failed to verify testnet DB: {e}", "ERROR") - return False - - self.log("Pre-flight checks PASSED") - return True - - def create_backup(self) -> str: - """Create timestamped backup of testnet DB""" - self.log("Creating backup...") - - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - backup_path = os.path.join(self.backup_dir, f"testnet_backup_{timestamp}.db") - - shutil.copy2(self.testnet_db, backup_path) - self.log(f"Backup created: {backup_path}") - - # Also backup mainnet if it exists - if os.path.exists(self.mainnet_db): - mainnet_backup = os.path.join(self.backup_dir, f"mainnet_backup_{timestamp}.db") - shutil.copy2(self.mainnet_db, mainnet_backup) - self.log(f"Mainnet backup created: {mainnet_backup}") - - return backup_path - - def create_mainnet_schema(self): - """Create mainnet database with upgraded schema""" - self.log("Creating mainnet database schema...") - - # Remove existing if present - if os.path.exists(self.mainnet_db): - os.remove(self.mainnet_db) - - with sqlite3.connect(self.mainnet_db) as conn: - cursor = conn.cursor() - - # Core tables - cursor.execute(""" - CREATE TABLE balances ( - wallet TEXT PRIMARY KEY, - balance_urtc INTEGER DEFAULT 0, - wallet_nonce INTEGER DEFAULT 0, - created_at INTEGER, - updated_at INTEGER - ) - """) - - cursor.execute(""" - CREATE TABLE blocks ( - height INTEGER PRIMARY KEY, - block_hash TEXT UNIQUE NOT NULL, - prev_hash TEXT NOT NULL, - timestamp INTEGER NOT NULL, - merkle_root TEXT NOT NULL, - state_root TEXT NOT NULL, - attestations_hash TEXT NOT NULL, - producer TEXT NOT NULL, - producer_sig TEXT NOT NULL, - tx_count INTEGER NOT NULL, - attestation_count INTEGER NOT NULL, - body_json TEXT NOT NULL, - created_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - CREATE TABLE miner_attest_recent ( - miner TEXT PRIMARY KEY, - device_arch TEXT, - device_family TEXT, - device_model TEXT, - device_year INTEGER, - ts_ok INTEGER, - last_block_produced INTEGER, - total_blocks_produced INTEGER DEFAULT 0 - ) - """) - - cursor.execute(""" - CREATE TABLE miner_attest_history ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - miner TEXT NOT NULL, - device_arch TEXT, - device_family TEXT, - ts_ok INTEGER NOT NULL, - block_height INTEGER - ) - """) - - cursor.execute(""" - CREATE TABLE pending_transactions ( - tx_hash TEXT PRIMARY KEY, - from_addr TEXT NOT NULL, - to_addr TEXT NOT NULL, - amount_urtc INTEGER NOT NULL, - nonce INTEGER NOT NULL, - timestamp INTEGER NOT NULL, - memo TEXT DEFAULT '', - signature TEXT NOT NULL, - public_key TEXT NOT NULL, - created_at INTEGER NOT NULL, - status TEXT DEFAULT 'pending' - ) - """) - - cursor.execute(""" - CREATE TABLE transaction_history ( - tx_hash TEXT PRIMARY KEY, - from_addr TEXT NOT NULL, - to_addr TEXT NOT NULL, - amount_urtc INTEGER NOT NULL, - nonce INTEGER NOT NULL, - timestamp INTEGER NOT NULL, - memo TEXT DEFAULT '', - signature TEXT NOT NULL, - public_key TEXT NOT NULL, - block_height INTEGER, - block_hash TEXT, - confirmed_at INTEGER, - status TEXT DEFAULT 'confirmed' - ) - """) - - cursor.execute(""" - CREATE TABLE wallet_pubkeys ( - address TEXT PRIMARY KEY, - public_key TEXT NOT NULL, - registered_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - CREATE TABLE premine_allocations ( - allocation_id TEXT PRIMARY KEY, - name TEXT NOT NULL, - wallet_address TEXT NOT NULL, - public_key TEXT, - total_urtc INTEGER NOT NULL, - vesting_months INTEGER NOT NULL, - cliff_months INTEGER NOT NULL, - claimed_urtc INTEGER DEFAULT 0, - role TEXT NOT NULL, - created_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - CREATE TABLE vesting_claims ( - claim_id INTEGER PRIMARY KEY AUTOINCREMENT, - allocation_id TEXT NOT NULL, - amount_urtc INTEGER NOT NULL, - claimed_at INTEGER NOT NULL, - tx_hash TEXT - ) - """) - - cursor.execute(""" - CREATE TABLE ergo_anchors ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - rustchain_height INTEGER NOT NULL, - rustchain_hash TEXT NOT NULL, - commitment_hash TEXT NOT NULL, - ergo_tx_id TEXT NOT NULL, - ergo_height INTEGER, - confirmations INTEGER DEFAULT 0, - status TEXT DEFAULT 'pending', - created_at INTEGER NOT NULL - ) - """) - - cursor.execute(""" - CREATE TABLE chain_metadata ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL, - updated_at INTEGER NOT NULL - ) - """) - - # Indexes - cursor.execute("CREATE INDEX idx_tx_pending_from ON pending_transactions(from_addr)") - cursor.execute("CREATE INDEX idx_tx_history_from ON transaction_history(from_addr)") - cursor.execute("CREATE INDEX idx_tx_history_to ON transaction_history(to_addr)") - cursor.execute("CREATE INDEX idx_tx_history_block ON transaction_history(block_height)") - cursor.execute("CREATE INDEX idx_attest_history_miner ON miner_attest_history(miner)") - cursor.execute("CREATE INDEX idx_blocks_hash ON blocks(block_hash)") - - # Insert metadata - cursor.execute(""" - INSERT INTO chain_metadata (key, value, updated_at) VALUES - ('version', ?, ?), - ('genesis_timestamp', ?, ?), - ('network', 'mainnet', ?), - ('migration_date', ?, ?) - """, ( - MIGRATION_VERSION, int(time.time()), - str(GENESIS_TIMESTAMP), int(time.time()), - int(time.time()), - datetime.now().isoformat(), int(time.time()) - )) - - conn.commit() - - self.log("Mainnet schema created successfully") - - def migrate_attestation_history(self): - """Migrate attestation history from testnet""" - if not PRESERVE_ATTESTATION_HISTORY: - self.log("Skipping attestation history migration (disabled)") - return - - self.log("Migrating attestation history...") - - try: - with sqlite3.connect(self.testnet_db) as testnet_conn: - testnet_conn.row_factory = sqlite3.Row - cursor = testnet_conn.cursor() - - # Get attestation history - cursor.execute(""" - SELECT miner, device_arch, device_family, ts_ok - FROM miner_attest_recent - """) - attestations = cursor.fetchall() - - with sqlite3.connect(self.mainnet_db) as mainnet_conn: - cursor = mainnet_conn.cursor() - - for att in attestations: - cursor.execute(""" - INSERT INTO miner_attest_recent - (miner, device_arch, device_family, ts_ok) - VALUES (?, ?, ?, ?) - """, (att["miner"], att["device_arch"], att["device_family"], att["ts_ok"])) - - mainnet_conn.commit() - - self.log(f"Migrated {len(attestations)} attestation records") - - except Exception as e: - self.log(f"Attestation migration failed: {e}", "ERROR") - - def initialize_premine(self, wallet_addresses: Dict[str, str] = None) -> Dict: - """Initialize premine allocations""" - self.log("Initializing premine allocations...") - - manager = PremineManager(self.mainnet_db, GENESIS_TIMESTAMP) - result = manager.initialize_premine(wallet_addresses) - - self.log(f"Total premine: {TOTAL_PREMINE_RTC:,} RTC") - self.log(f"Allocations created: {len(result['allocations'])}") - - for alloc in result['allocations']: - self.log(f" {alloc['name']}: {alloc['amount_rtc']:,} RTC -> {alloc['wallet'][:20]}...") - - return result - - def create_genesis_block(self) -> Dict: - """Create genesis block""" - self.log("Creating genesis block...") - - # Genesis block data - genesis = { - "height": 0, - "block_hash": "0" * 64, # Will be computed - "prev_hash": "0" * 64, - "timestamp": GENESIS_TIMESTAMP * 1000, - "merkle_root": "0" * 64, - "state_root": "0" * 64, - "attestations_hash": "0" * 64, - "producer": "genesis", - "producer_sig": "0" * 128, - "tx_count": 0, - "attestation_count": 0, - "body_json": json.dumps({ - "transactions": [], - "attestations": [], - "premine": { - "total_rtc": TOTAL_PREMINE_RTC, - "allocations": list(FOUNDER_ALLOCATIONS.keys()) - } - }) - } - - # Compute genesis hash - genesis_data = canonical_json({ - "height": genesis["height"], - "prev_hash": genesis["prev_hash"], - "timestamp": genesis["timestamp"], - "merkle_root": genesis["merkle_root"], - "producer": genesis["producer"] - }) - genesis["block_hash"] = blake2b256_hex(genesis_data) - - with sqlite3.connect(self.mainnet_db) as conn: - cursor = conn.cursor() - cursor.execute(""" - INSERT INTO blocks - (height, block_hash, prev_hash, timestamp, merkle_root, state_root, - attestations_hash, producer, producer_sig, tx_count, attestation_count, - body_json, created_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - genesis["height"], - genesis["block_hash"], - genesis["prev_hash"], - genesis["timestamp"], - genesis["merkle_root"], - genesis["state_root"], - genesis["attestations_hash"], - genesis["producer"], - genesis["producer_sig"], - genesis["tx_count"], - genesis["attestation_count"], - genesis["body_json"], - int(time.time()) - )) - conn.commit() - - self.log(f"Genesis block created: {genesis['block_hash'][:16]}...") - return genesis - - def verify_migration(self) -> bool: - """Verify migration was successful""" - self.log("=" * 60) - self.log("VERIFICATION") - self.log("=" * 60) - - try: - with sqlite3.connect(self.mainnet_db) as conn: - cursor = conn.cursor() - - # Check genesis block - cursor.execute("SELECT block_hash FROM blocks WHERE height = 0") - genesis = cursor.fetchone() - if not genesis: - self.log("Genesis block not found", "ERROR") - return False - self.log(f"Genesis block: {genesis[0][:16]}...") - - # Check premine - cursor.execute("SELECT COUNT(*), SUM(total_urtc) FROM premine_allocations") - premine = cursor.fetchone() - expected_premine = TOTAL_PREMINE_RTC * 100_000_000 - if premine[1] != expected_premine: - self.log(f"Premine mismatch: {premine[1]} != {expected_premine}", "ERROR") - return False - self.log(f"Premine allocations: {premine[0]}, Total: {premine[1] / 100_000_000:,.0f} RTC") - - # Check balances - cursor.execute("SELECT COUNT(*), SUM(balance_urtc) FROM balances") - balances = cursor.fetchone() - self.log(f"Wallet count: {balances[0]}, Total balance: {(balances[1] or 0) / 100_000_000:,.2f} RTC") - - # Check chain metadata - cursor.execute("SELECT key, value FROM chain_metadata") - metadata = dict(cursor.fetchall()) - self.log(f"Chain version: {metadata.get('version', 'unknown')}") - self.log(f"Network: {metadata.get('network', 'unknown')}") - - except Exception as e: - self.log(f"Verification failed: {e}", "ERROR") - return False - - if self.errors: - self.log(f"Migration completed with {len(self.errors)} errors", "WARNING") - return False - - self.log("Verification PASSED") - return True - - def run(self, wallet_addresses: Dict[str, str] = None) -> Dict: - """ - Run full migration process. - - Args: - wallet_addresses: Optional dict mapping allocation_id to existing wallet addresses. - If not provided, new wallets will be generated. - - Returns: - Migration result including any generated wallets - """ - self.log("=" * 60) - self.log("RUSTCHAIN TESTNET -> MAINNET MIGRATION") - self.log(f"Version: {MIGRATION_VERSION}") - self.log(f"Started: {datetime.now().isoformat()}") - self.log("=" * 60) - - result = { - "success": False, - "version": MIGRATION_VERSION, - "started_at": datetime.now().isoformat(), - "completed_at": None, - "backup_path": None, - "genesis_hash": None, - "premine": None, - "errors": [] - } - - try: - # Step 1: Pre-flight checks - if not self.pre_flight_checks(): - result["errors"] = self.errors - return result - - # Step 2: Backup - result["backup_path"] = self.create_backup() - - # Step 3: Create mainnet schema - self.create_mainnet_schema() - - # Step 4: Migrate attestation history - self.migrate_attestation_history() - - # Step 5: Initialize premine - premine_result = self.initialize_premine(wallet_addresses) - result["premine"] = premine_result - - # Step 6: Create genesis block - genesis = self.create_genesis_block() - result["genesis_hash"] = genesis["block_hash"] - - # Step 7: Verify - if self.verify_migration(): - result["success"] = True - self.log("=" * 60) - self.log("MIGRATION COMPLETED SUCCESSFULLY") - self.log("=" * 60) - else: - result["errors"] = self.errors - - except Exception as e: - self.log(f"Migration failed: {e}", "ERROR") - result["errors"] = self.errors + [str(e)] - - result["completed_at"] = datetime.now().isoformat() - result["log"] = self.migration_log - - # Save migration log - log_path = os.path.join(self.backup_dir, f"migration_log_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json") - with open(log_path, 'w') as f: - json.dump(result, f, indent=2) - self.log(f"Migration log saved: {log_path}") - - return result - - -# ============================================================================= -# CLI -# ============================================================================= - -def main(): - """CLI entry point""" - import argparse - - parser = argparse.ArgumentParser(description="RustChain Testnet -> Mainnet Migration") - parser.add_argument("--testnet-db", default=TESTNET_DB_PATH, help="Testnet database path") - parser.add_argument("--mainnet-db", default=MAINNET_DB_PATH, help="Mainnet database path") - parser.add_argument("--backup-dir", default=BACKUP_DIR, help="Backup directory") - parser.add_argument("--wallets-file", help="JSON file with existing wallet addresses") - parser.add_argument("--dry-run", action="store_true", help="Run validation only") - - args = parser.parse_args() - - # Load wallet addresses if provided - wallet_addresses = None - if args.wallets_file and os.path.exists(args.wallets_file): - with open(args.wallets_file) as f: - wallet_addresses = json.load(f) - print(f"Loaded {len(wallet_addresses)} wallet addresses") - - # Create migration instance - migration = RustChainMigration( - testnet_db=args.testnet_db, - mainnet_db=args.mainnet_db, - backup_dir=args.backup_dir - ) - - if args.dry_run: - print("DRY RUN - Validation only") - success = migration.pre_flight_checks() - sys.exit(0 if success else 1) - - # Run migration - result = migration.run(wallet_addresses) - - # Print summary - print("\n" + "=" * 60) - print("MIGRATION SUMMARY") - print("=" * 60) - print(f"Success: {result['success']}") - print(f"Genesis Hash: {result.get('genesis_hash', 'N/A')}") - print(f"Backup: {result.get('backup_path', 'N/A')}") - - if result.get('premine', {}).get('generated_wallets'): - print("\nGENERATED WALLETS (SAVE THESE SECURELY!):") - for alloc_id, wallet in result['premine']['generated_wallets'].items(): - print(f"\n{alloc_id}:") - print(f" Address: {wallet['address']}") - print(f" Private Key: {wallet['private_key']}") - - if result.get('errors'): - print(f"\nErrors: {len(result['errors'])}") - for err in result['errors']: - print(f" - {err}") - - sys.exit(0 if result['success'] else 1) - - -if __name__ == "__main__": - main() +#!/usr/bin/env python3 +""" +RustChain Testnet to Mainnet Migration Script +============================================== + +Phase 6 Implementation: +- Testnet state snapshot +- Database schema migration +- Premine initialization +- Genesis block creation +- Validation and verification + +Run this script ONCE to migrate from testnet to mainnet. +""" + +import os +import sys +import json +import sqlite3 +import shutil +import time +import logging +import hashlib +from datetime import datetime +from typing import Dict, List, Optional + +# Import mainnet modules +from rustchain_crypto import blake2b256_hex, canonical_json, generate_wallet_keypair +from rustchain_genesis_premine import PremineManager, TOTAL_PREMINE_RTC, FOUNDER_ALLOCATIONS +from rustchain_tx_handler import TransactionPool + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [MIGRATE] %(levelname)s: %(message)s' +) +logger = logging.getLogger(__name__) + + +# ============================================================================= +# MIGRATION CONFIGURATION +# ============================================================================= + +MIGRATION_VERSION = "2.3.0-mainnet" +GENESIS_TIMESTAMP = 1728000000 # Oct 4, 2024 00:00:00 UTC (same as testnet) + +# Paths +TESTNET_DB_PATH = os.environ.get("TESTNET_DB", "/root/rustchain/rustchain_v2.db") +MAINNET_DB_PATH = os.environ.get("MAINNET_DB", "/root/rustchain/rustchain_mainnet.db") +BACKUP_DIR = os.environ.get("BACKUP_DIR", "/root/rustchain/backups") + +# Migration flags +PRESERVE_ATTESTATION_HISTORY = True +PRESERVE_MINER_STATS = True +RESET_BALANCES = True # Reset to premine only + + +# ============================================================================= +# MIGRATION STEPS +# ============================================================================= + +class RustChainMigration: + """ + Handles testnet -> mainnet migration. + """ + + def __init__( + self, + testnet_db: str = TESTNET_DB_PATH, + mainnet_db: str = MAINNET_DB_PATH, + backup_dir: str = BACKUP_DIR + ): + self.testnet_db = testnet_db + self.mainnet_db = mainnet_db + self.backup_dir = backup_dir + self.migration_log = [] + self.errors = [] + + def log(self, message: str, level: str = "INFO"): + """Log migration step""" + entry = { + "timestamp": datetime.now().isoformat(), + "level": level, + "message": message + } + self.migration_log.append(entry) + + if level == "ERROR": + logger.error(message) + self.errors.append(message) + elif level == "WARNING": + logger.warning(message) + else: + logger.info(message) + + def pre_flight_checks(self) -> bool: + """Run pre-migration validation""" + self.log("=" * 60) + self.log("PRE-FLIGHT CHECKS") + self.log("=" * 60) + + # Check testnet DB exists + if not os.path.exists(self.testnet_db): + self.log(f"Testnet DB not found: {self.testnet_db}", "ERROR") + return False + self.log(f"Testnet DB found: {self.testnet_db}") + + # Check mainnet DB doesn't exist (prevent accidental overwrite) + if os.path.exists(self.mainnet_db): + self.log(f"Mainnet DB already exists: {self.mainnet_db}", "WARNING") + self.log("Will create backup before overwriting") + + # Check backup directory + os.makedirs(self.backup_dir, exist_ok=True) + self.log(f"Backup directory: {self.backup_dir}") + + # Verify testnet DB integrity + try: + with sqlite3.connect(self.testnet_db) as conn: + cursor = conn.cursor() + + # Check tables exist + cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") + tables = [row[0] for row in cursor.fetchall()] + self.log(f"Testnet tables: {tables}") + + # Check miner attestations + if "miner_attest_recent" in tables: + cursor.execute("SELECT COUNT(*) FROM miner_attest_recent") + count = cursor.fetchone()[0] + self.log(f"Active attestations: {count}") + + # Check balances + if "balances" in tables: + cursor.execute("SELECT COUNT(*), SUM(balance_urtc) FROM balances") + row = cursor.fetchone() + self.log(f"Testnet wallets: {row[0]}, Total balance: {(row[1] or 0) / 100_000_000:.2f} RTC") + + except Exception as e: + self.log(f"Failed to verify testnet DB: {e}", "ERROR") + return False + + self.log("Pre-flight checks PASSED") + return True + + def create_backup(self) -> str: + """Create timestamped backup of testnet DB""" + self.log("Creating backup...") + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = os.path.join(self.backup_dir, f"testnet_backup_{timestamp}.db") + + shutil.copy2(self.testnet_db, backup_path) + self.log(f"Backup created: {backup_path}") + + # Also backup mainnet if it exists + if os.path.exists(self.mainnet_db): + mainnet_backup = os.path.join(self.backup_dir, f"mainnet_backup_{timestamp}.db") + shutil.copy2(self.mainnet_db, mainnet_backup) + self.log(f"Mainnet backup created: {mainnet_backup}") + + return backup_path + + def create_mainnet_schema(self): + """Create mainnet database with upgraded schema""" + self.log("Creating mainnet database schema...") + + # Remove existing if present + if os.path.exists(self.mainnet_db): + os.remove(self.mainnet_db) + + with sqlite3.connect(self.mainnet_db) as conn: + cursor = conn.cursor() + + # Core tables + cursor.execute(""" + CREATE TABLE balances ( + wallet TEXT PRIMARY KEY, + balance_urtc INTEGER DEFAULT 0, + wallet_nonce INTEGER DEFAULT 0, + created_at INTEGER, + updated_at INTEGER + ) + """) + + cursor.execute(""" + CREATE TABLE blocks ( + height INTEGER PRIMARY KEY, + block_hash TEXT UNIQUE NOT NULL, + prev_hash TEXT NOT NULL, + timestamp INTEGER NOT NULL, + merkle_root TEXT NOT NULL, + state_root TEXT NOT NULL, + attestations_hash TEXT NOT NULL, + producer TEXT NOT NULL, + producer_sig TEXT NOT NULL, + tx_count INTEGER NOT NULL, + attestation_count INTEGER NOT NULL, + body_json TEXT NOT NULL, + created_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + CREATE TABLE miner_attest_recent ( + miner TEXT PRIMARY KEY, + device_arch TEXT, + device_family TEXT, + device_model TEXT, + device_year INTEGER, + ts_ok INTEGER, + last_block_produced INTEGER, + total_blocks_produced INTEGER DEFAULT 0 + ) + """) + + cursor.execute(""" + CREATE TABLE miner_attest_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + miner TEXT NOT NULL, + device_arch TEXT, + device_family TEXT, + ts_ok INTEGER NOT NULL, + block_height INTEGER + ) + """) + + cursor.execute(""" + CREATE TABLE pending_transactions ( + tx_hash TEXT PRIMARY KEY, + from_addr TEXT NOT NULL, + to_addr TEXT NOT NULL, + amount_urtc INTEGER NOT NULL, + nonce INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + memo TEXT DEFAULT '', + signature TEXT NOT NULL, + public_key TEXT NOT NULL, + created_at INTEGER NOT NULL, + status TEXT DEFAULT 'pending' + ) + """) + + cursor.execute(""" + CREATE TABLE transaction_history ( + tx_hash TEXT PRIMARY KEY, + from_addr TEXT NOT NULL, + to_addr TEXT NOT NULL, + amount_urtc INTEGER NOT NULL, + nonce INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + memo TEXT DEFAULT '', + signature TEXT NOT NULL, + public_key TEXT NOT NULL, + block_height INTEGER, + block_hash TEXT, + confirmed_at INTEGER, + status TEXT DEFAULT 'confirmed' + ) + """) + + cursor.execute(""" + CREATE TABLE wallet_pubkeys ( + address TEXT PRIMARY KEY, + public_key TEXT NOT NULL, + registered_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + CREATE TABLE premine_allocations ( + allocation_id TEXT PRIMARY KEY, + name TEXT NOT NULL, + wallet_address TEXT NOT NULL, + public_key TEXT, + total_urtc INTEGER NOT NULL, + vesting_months INTEGER NOT NULL, + cliff_months INTEGER NOT NULL, + claimed_urtc INTEGER DEFAULT 0, + role TEXT NOT NULL, + created_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + CREATE TABLE vesting_claims ( + claim_id INTEGER PRIMARY KEY AUTOINCREMENT, + allocation_id TEXT NOT NULL, + amount_urtc INTEGER NOT NULL, + claimed_at INTEGER NOT NULL, + tx_hash TEXT + ) + """) + + cursor.execute(""" + CREATE TABLE ergo_anchors ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + rustchain_height INTEGER NOT NULL, + rustchain_hash TEXT NOT NULL, + commitment_hash TEXT NOT NULL, + ergo_tx_id TEXT NOT NULL, + ergo_height INTEGER, + confirmations INTEGER DEFAULT 0, + status TEXT DEFAULT 'pending', + created_at INTEGER NOT NULL + ) + """) + + cursor.execute(""" + CREATE TABLE chain_metadata ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + updated_at INTEGER NOT NULL + ) + """) + + # Indexes + cursor.execute("CREATE INDEX idx_tx_pending_from ON pending_transactions(from_addr)") + cursor.execute("CREATE INDEX idx_tx_history_from ON transaction_history(from_addr)") + cursor.execute("CREATE INDEX idx_tx_history_to ON transaction_history(to_addr)") + cursor.execute("CREATE INDEX idx_tx_history_block ON transaction_history(block_height)") + cursor.execute("CREATE INDEX idx_attest_history_miner ON miner_attest_history(miner)") + cursor.execute("CREATE INDEX idx_blocks_hash ON blocks(block_hash)") + + # Insert metadata + cursor.execute(""" + INSERT INTO chain_metadata (key, value, updated_at) VALUES + ('version', ?, ?), + ('genesis_timestamp', ?, ?), + ('network', 'mainnet', ?), + ('migration_date', ?, ?) + """, ( + MIGRATION_VERSION, int(time.time()), + str(GENESIS_TIMESTAMP), int(time.time()), + int(time.time()), + datetime.now().isoformat(), int(time.time()) + )) + + conn.commit() + + self.log("Mainnet schema created successfully") + + def migrate_attestation_history(self): + """Migrate attestation history from testnet""" + if not PRESERVE_ATTESTATION_HISTORY: + self.log("Skipping attestation history migration (disabled)") + return + + self.log("Migrating attestation history...") + + try: + with sqlite3.connect(self.testnet_db) as testnet_conn: + testnet_conn.row_factory = sqlite3.Row + cursor = testnet_conn.cursor() + + # Get attestation history + cursor.execute(""" + SELECT miner, device_arch, device_family, ts_ok + FROM miner_attest_recent + """) + attestations = cursor.fetchall() + + with sqlite3.connect(self.mainnet_db) as mainnet_conn: + cursor = mainnet_conn.cursor() + + for att in attestations: + cursor.execute(""" + INSERT INTO miner_attest_recent + (miner, device_arch, device_family, ts_ok) + VALUES (?, ?, ?, ?) + """, (att["miner"], att["device_arch"], att["device_family"], att["ts_ok"])) + + mainnet_conn.commit() + + self.log(f"Migrated {len(attestations)} attestation records") + + except Exception as e: + self.log(f"Attestation migration failed: {e}", "ERROR") + + def initialize_premine(self, wallet_addresses: Dict[str, str] = None) -> Dict: + """Initialize premine allocations""" + self.log("Initializing premine allocations...") + + manager = PremineManager(self.mainnet_db, GENESIS_TIMESTAMP) + result = manager.initialize_premine(wallet_addresses) + + self.log(f"Total premine: {TOTAL_PREMINE_RTC:,} RTC") + self.log(f"Allocations created: {len(result['allocations'])}") + + for alloc in result['allocations']: + self.log(f" {alloc['name']}: {alloc['amount_rtc']:,} RTC -> {alloc['wallet'][:20]}...") + + return result + + def create_genesis_block(self) -> Dict: + """Create genesis block""" + self.log("Creating genesis block...") + + # Genesis block data + genesis = { + "height": 0, + "block_hash": "0" * 64, # Will be computed + "prev_hash": "0" * 64, + "timestamp": GENESIS_TIMESTAMP * 1000, + "merkle_root": "0" * 64, + "state_root": "0" * 64, + "attestations_hash": "0" * 64, + "producer": "genesis", + "producer_sig": "0" * 128, + "tx_count": 0, + "attestation_count": 0, + "body_json": json.dumps({ + "transactions": [], + "attestations": [], + "premine": { + "total_rtc": TOTAL_PREMINE_RTC, + "allocations": list(FOUNDER_ALLOCATIONS.keys()) + } + }) + } + + # Compute genesis hash + genesis_data = canonical_json({ + "height": genesis["height"], + "prev_hash": genesis["prev_hash"], + "timestamp": genesis["timestamp"], + "merkle_root": genesis["merkle_root"], + "producer": genesis["producer"] + }) + genesis["block_hash"] = blake2b256_hex(genesis_data) + + with sqlite3.connect(self.mainnet_db) as conn: + cursor = conn.cursor() + cursor.execute(""" + INSERT INTO blocks + (height, block_hash, prev_hash, timestamp, merkle_root, state_root, + attestations_hash, producer, producer_sig, tx_count, attestation_count, + body_json, created_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + genesis["height"], + genesis["block_hash"], + genesis["prev_hash"], + genesis["timestamp"], + genesis["merkle_root"], + genesis["state_root"], + genesis["attestations_hash"], + genesis["producer"], + genesis["producer_sig"], + genesis["tx_count"], + genesis["attestation_count"], + genesis["body_json"], + int(time.time()) + )) + conn.commit() + + self.log(f"Genesis block created: {genesis['block_hash'][:16]}...") + return genesis + + def verify_migration(self) -> bool: + """Verify migration was successful""" + self.log("=" * 60) + self.log("VERIFICATION") + self.log("=" * 60) + + try: + with sqlite3.connect(self.mainnet_db) as conn: + cursor = conn.cursor() + + # Check genesis block + cursor.execute("SELECT block_hash FROM blocks WHERE height = 0") + genesis = cursor.fetchone() + if not genesis: + self.log("Genesis block not found", "ERROR") + return False + self.log(f"Genesis block: {genesis[0][:16]}...") + + # Check premine + cursor.execute("SELECT COUNT(*), SUM(total_urtc) FROM premine_allocations") + premine = cursor.fetchone() + expected_premine = TOTAL_PREMINE_RTC * 100_000_000 + if premine[1] != expected_premine: + self.log(f"Premine mismatch: {premine[1]} != {expected_premine}", "ERROR") + return False + self.log(f"Premine allocations: {premine[0]}, Total: {premine[1] / 100_000_000:,.0f} RTC") + + # Check balances + cursor.execute("SELECT COUNT(*), SUM(balance_urtc) FROM balances") + balances = cursor.fetchone() + self.log(f"Wallet count: {balances[0]}, Total balance: {(balances[1] or 0) / 100_000_000:,.2f} RTC") + + # Check chain metadata + cursor.execute("SELECT key, value FROM chain_metadata") + metadata = dict(cursor.fetchall()) + self.log(f"Chain version: {metadata.get('version', 'unknown')}") + self.log(f"Network: {metadata.get('network', 'unknown')}") + + except Exception as e: + self.log(f"Verification failed: {e}", "ERROR") + return False + + if self.errors: + self.log(f"Migration completed with {len(self.errors)} errors", "WARNING") + return False + + self.log("Verification PASSED") + return True + + def run(self, wallet_addresses: Dict[str, str] = None) -> Dict: + """ + Run full migration process. + + Args: + wallet_addresses: Optional dict mapping allocation_id to existing wallet addresses. + If not provided, new wallets will be generated. + + Returns: + Migration result including any generated wallets + """ + self.log("=" * 60) + self.log("RUSTCHAIN TESTNET -> MAINNET MIGRATION") + self.log(f"Version: {MIGRATION_VERSION}") + self.log(f"Started: {datetime.now().isoformat()}") + self.log("=" * 60) + + result = { + "success": False, + "version": MIGRATION_VERSION, + "started_at": datetime.now().isoformat(), + "completed_at": None, + "backup_path": None, + "genesis_hash": None, + "premine": None, + "errors": [] + } + + try: + # Step 1: Pre-flight checks + if not self.pre_flight_checks(): + result["errors"] = self.errors + return result + + # Step 2: Backup + result["backup_path"] = self.create_backup() + + # Step 3: Create mainnet schema + self.create_mainnet_schema() + + # Step 4: Migrate attestation history + self.migrate_attestation_history() + + # Step 5: Initialize premine + premine_result = self.initialize_premine(wallet_addresses) + result["premine"] = premine_result + + # Step 6: Create genesis block + genesis = self.create_genesis_block() + result["genesis_hash"] = genesis["block_hash"] + + # Step 7: Verify + if self.verify_migration(): + result["success"] = True + self.log("=" * 60) + self.log("MIGRATION COMPLETED SUCCESSFULLY") + self.log("=" * 60) + else: + result["errors"] = self.errors + + except Exception as e: + self.log(f"Migration failed: {e}", "ERROR") + result["errors"] = self.errors + [str(e)] + + result["completed_at"] = datetime.now().isoformat() + result["log"] = self.migration_log + + # Save migration log + log_path = os.path.join(self.backup_dir, f"migration_log_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json") + with open(log_path, 'w') as f: + json.dump(result, f, indent=2) + self.log(f"Migration log saved: {log_path}") + + return result + + +# ============================================================================= +# CLI +# ============================================================================= + +def main(): + """CLI entry point""" + import argparse + + parser = argparse.ArgumentParser(description="RustChain Testnet -> Mainnet Migration") + parser.add_argument("--testnet-db", default=TESTNET_DB_PATH, help="Testnet database path") + parser.add_argument("--mainnet-db", default=MAINNET_DB_PATH, help="Mainnet database path") + parser.add_argument("--backup-dir", default=BACKUP_DIR, help="Backup directory") + parser.add_argument("--wallets-file", help="JSON file with existing wallet addresses") + parser.add_argument("--dry-run", action="store_true", help="Run validation only") + + args = parser.parse_args() + + # Load wallet addresses if provided + wallet_addresses = None + if args.wallets_file and os.path.exists(args.wallets_file): + with open(args.wallets_file) as f: + wallet_addresses = json.load(f) + print(f"Loaded {len(wallet_addresses)} wallet addresses") + + # Create migration instance + migration = RustChainMigration( + testnet_db=args.testnet_db, + mainnet_db=args.mainnet_db, + backup_dir=args.backup_dir + ) + + if args.dry_run: + print("DRY RUN - Validation only") + success = migration.pre_flight_checks() + sys.exit(0 if success else 1) + + # Run migration + result = migration.run(wallet_addresses) + + # Print summary + print("\n" + "=" * 60) + print("MIGRATION SUMMARY") + print("=" * 60) + print(f"Success: {result['success']}") + print(f"Genesis Hash: {result.get('genesis_hash', 'N/A')}") + print(f"Backup: {result.get('backup_path', 'N/A')}") + + if result.get('premine', {}).get('generated_wallets'): + print("\nGENERATED WALLETS (SAVE THESE SECURELY!):") + for alloc_id, wallet in result['premine']['generated_wallets'].items(): + print(f"\n{alloc_id}:") + print(f" Address: {wallet['address']}") + print(f" Private Key: {wallet['private_key']}") + + if result.get('errors'): + print(f"\nErrors: {len(result['errors'])}") + for err in result['errors']: + print(f" - {err}") + + sys.exit(0 if result['success'] else 1) + + +if __name__ == "__main__": + main() diff --git a/node/rustchain_p2p_gossip.py b/node/rustchain_p2p_gossip.py index 9d2a037f..7bd374d6 100644 --- a/node/rustchain_p2p_gossip.py +++ b/node/rustchain_p2p_gossip.py @@ -1,819 +1,819 @@ -#!/usr/bin/env python3 -""" -RustChain P2P Gossip & CRDT Synchronization Module -=================================================== - -Implements fully decentralized P2P sync with: -- Gossip protocol (Bitcoin-style INV/GETDATA) -- CRDT state merging (conflict-free eventual consistency) -- Epoch consensus (2-phase commit) - -Designed for 3+ nodes with no single point of failure. -""" - -import hashlib -import hmac -import json -import os -import secrets -import sqlite3 -import threading -import time -from dataclasses import dataclass, asdict, field -from enum import Enum -from typing import Dict, List, Optional, Set, Tuple, Any -from collections import defaultdict -import logging -import requests - -# Configuration -P2P_SECRET = os.environ.get("RC_P2P_SECRET", "rustchain_p2p_secret_2025_decentralized") -GOSSIP_TTL = 3 -SYNC_INTERVAL = 30 -MESSAGE_EXPIRY = 300 # 5 minutes -MAX_INV_BATCH = 1000 -DB_PATH = os.environ.get("RUSTCHAIN_DB", "/root/rustchain/rustchain_v2.db") - -logging.basicConfig(level=logging.INFO, format='%(asctime)s [P2P] %(message)s') -logger = logging.getLogger(__name__) - - -# ============================================================================= -# MESSAGE TYPES -# ============================================================================= - -class MessageType(Enum): - # Discovery & Health - PING = "ping" - PONG = "pong" - PEER_ANNOUNCE = "peer_announce" - PEER_LIST_REQ = "peer_list_req" - PEER_LIST = "peer_list" - - # Inventory Announcements (INV-style, hash only) - INV_ATTESTATION = "inv_attest" - INV_EPOCH = "inv_epoch" - INV_BALANCE = "inv_balance" - - # Data Requests (GETDATA-style) - GET_ATTESTATION = "get_attest" - GET_EPOCH = "get_epoch" - GET_BALANCES = "get_balances" - GET_STATE = "get_state" - - # Data Responses - ATTESTATION = "attestation" - EPOCH_DATA = "epoch_data" - BALANCES = "balances" - STATE = "state" - - # Epoch Consensus - EPOCH_PROPOSE = "epoch_propose" - EPOCH_VOTE = "epoch_vote" - EPOCH_COMMIT = "epoch_commit" - - -@dataclass -class GossipMessage: - """Base gossip message structure""" - msg_type: str - msg_id: str - sender_id: str - timestamp: int - ttl: int - signature: str - payload: Dict - - def to_dict(self) -> Dict: - return asdict(self) - - @classmethod - def from_dict(cls, data: Dict) -> 'GossipMessage': - return cls(**data) - - def compute_hash(self) -> str: - """Compute hash of message content for deduplication""" - content = f"{self.msg_type}:{self.sender_id}:{json.dumps(self.payload, sort_keys=True)}" - return hashlib.sha256(content.encode()).hexdigest()[:32] - - -# ============================================================================= -# CRDT IMPLEMENTATIONS -# ============================================================================= - -class LWWRegister: - """ - Last-Write-Wins Register for attestations. - The value with the highest timestamp wins. - """ - - def __init__(self): - self.data: Dict[str, Tuple[int, Dict]] = {} # key -> (timestamp, value) - - def set(self, key: str, value: Dict, timestamp: int): - """Set value if timestamp is newer""" - if key not in self.data or timestamp > self.data[key][0]: - self.data[key] = (timestamp, value) - return True - return False - - def get(self, key: str) -> Optional[Dict]: - """Get current value""" - if key in self.data: - return self.data[key][1] - return None - - def merge(self, other: 'LWWRegister'): - """Merge another LWW register into this one""" - for key, (ts, value) in other.data.items(): - self.set(key, value, ts) - - def to_dict(self) -> Dict: - return {k: {"ts": ts, "value": v} for k, (ts, v) in self.data.items()} - - @classmethod - def from_dict(cls, data: Dict) -> 'LWWRegister': - reg = cls() - for k, v in data.items(): - reg.data[k] = (v["ts"], v["value"]) - return reg - - -class PNCounter: - """ - Positive-Negative Counter for balances. - Tracks increments and decrements per node for conflict-free merging. - """ - - def __init__(self): - # miner_id -> {node_id: total_amount} - self.increments: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) - self.decrements: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) - - def credit(self, miner_id: str, node_id: str, amount: int): - """Record a credit (reward)""" - self.increments[miner_id][node_id] += amount - - def debit(self, miner_id: str, node_id: str, amount: int): - """Record a debit (withdrawal)""" - self.decrements[miner_id][node_id] += amount - - def get_balance(self, miner_id: str) -> int: - """Compute current balance from CRDT state""" - incr = sum(self.increments.get(miner_id, {}).values()) - decr = sum(self.decrements.get(miner_id, {}).values()) - return incr - decr - - def get_all_balances(self) -> Dict[str, int]: - """Get all miner balances""" - all_miners = set(self.increments.keys()) | set(self.decrements.keys()) - return {m: self.get_balance(m) for m in all_miners} - - def merge(self, other: 'PNCounter'): - """Merge remote state - take max for each (node_id, miner_id) pair""" - for miner_id, node_amounts in other.increments.items(): - for node_id, amount in node_amounts.items(): - self.increments[miner_id][node_id] = max( - self.increments[miner_id][node_id], amount - ) - - for miner_id, node_amounts in other.decrements.items(): - for node_id, amount in node_amounts.items(): - self.decrements[miner_id][node_id] = max( - self.decrements[miner_id][node_id], amount - ) - - def to_dict(self) -> Dict: - return { - "increments": {k: dict(v) for k, v in self.increments.items()}, - "decrements": {k: dict(v) for k, v in self.decrements.items()} - } - - @classmethod - def from_dict(cls, data: Dict) -> 'PNCounter': - counter = cls() - for miner_id, nodes in data.get("increments", {}).items(): - for node_id, amount in nodes.items(): - counter.increments[miner_id][node_id] = amount - for miner_id, nodes in data.get("decrements", {}).items(): - for node_id, amount in nodes.items(): - counter.decrements[miner_id][node_id] = amount - return counter - - -class GSet: - """ - Grow-only Set for settled epochs. - Once an epoch is settled, it can never be unsettled. - """ - - def __init__(self): - self.items: Set[int] = set() - self.metadata: Dict[int, Dict] = {} # epoch -> {settled_ts, merkle_root, ...} - - def add(self, epoch: int, metadata: Dict = None): - """Add epoch to settled set""" - self.items.add(epoch) - if metadata: - self.metadata[epoch] = metadata - - def contains(self, epoch: int) -> bool: - return epoch in self.items - - def merge(self, other: 'GSet'): - """Merge another G-Set - union operation""" - self.items |= other.items - for epoch, meta in other.metadata.items(): - if epoch not in self.metadata: - self.metadata[epoch] = meta - - def to_dict(self) -> Dict: - return { - "epochs": list(self.items), - "metadata": self.metadata - } - - @classmethod - def from_dict(cls, data: Dict) -> 'GSet': - gset = cls() - gset.items = set(data.get("epochs", [])) - gset.metadata = data.get("metadata", {}) - return gset - - -# ============================================================================= -# GOSSIP LAYER -# ============================================================================= - -class GossipLayer: - """ - Gossip protocol implementation with INV/GETDATA model. - """ - - def __init__(self, node_id: str, peers: Dict[str, str], db_path: str = DB_PATH): - self.node_id = node_id - self.peers = peers # peer_id -> url - self.db_path = db_path - self.seen_messages: Set[str] = set() - self.message_queue: List[GossipMessage] = [] - self.lock = threading.Lock() - - # CRDT state - self.attestation_crdt = LWWRegister() - self.balance_crdt = PNCounter() - self.epoch_crdt = GSet() - - # Load initial state from DB - self._load_state_from_db() - - def _load_state_from_db(self): - """Load existing state into CRDTs""" - try: - with sqlite3.connect(self.db_path) as conn: - # Load attestations - rows = conn.execute(""" - SELECT miner, ts_ok, device_family, device_arch, entropy_score - FROM miner_attest_recent - """).fetchall() - for miner, ts_ok, family, arch, entropy in rows: - self.attestation_crdt.set(miner, { - "miner": miner, - "device_family": family, - "device_arch": arch, - "entropy_score": entropy or 0 - }, ts_ok) - - # Load settled epochs - rows = conn.execute(""" - SELECT epoch FROM epoch_state WHERE settled = 1 - """).fetchall() - for (epoch,) in rows: - self.epoch_crdt.add(epoch) - - logger.info(f"Loaded {len(self.attestation_crdt.data)} attestations, " - f"{len(self.epoch_crdt.items)} settled epochs") - except Exception as e: - logger.error(f"Failed to load state from DB: {e}") - - def _sign_message(self, content: str) -> Tuple[str, int]: - """Generate HMAC signature for message""" - timestamp = int(time.time()) - message = f"{content}:{timestamp}" - sig = hmac.new(P2P_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest() - return sig, timestamp - - def _verify_signature(self, content: str, signature: str, timestamp: int) -> bool: - """Verify HMAC signature""" - # Check timestamp freshness - if abs(time.time() - timestamp) > MESSAGE_EXPIRY: - return False - message = f"{content}:{timestamp}" - expected = hmac.new(P2P_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest() - return hmac.compare_digest(signature, expected) - - def create_message(self, msg_type: MessageType, payload: Dict, ttl: int = GOSSIP_TTL) -> GossipMessage: - """Create a new gossip message""" - content = f"{msg_type.value}:{json.dumps(payload, sort_keys=True)}" - sig, ts = self._sign_message(content) - - msg = GossipMessage( - msg_type=msg_type.value, - msg_id=hashlib.sha256(f"{content}:{ts}".encode()).hexdigest()[:24], - sender_id=self.node_id, - timestamp=ts, - ttl=ttl, - signature=sig, - payload=payload - ) - return msg - - def verify_message(self, msg: GossipMessage) -> bool: - """Verify message signature and freshness""" - content = f"{msg.msg_type}:{json.dumps(msg.payload, sort_keys=True)}" - return self._verify_signature(content, msg.signature, msg.timestamp) - - def broadcast(self, msg: GossipMessage, exclude_peer: str = None): - """Broadcast message to all peers""" - for peer_id, peer_url in self.peers.items(): - if peer_id == exclude_peer: - continue - try: - self._send_to_peer(peer_url, msg) - except Exception as e: - logger.warning(f"Failed to send to {peer_id}: {e}") - - def _send_to_peer(self, peer_url: str, msg: GossipMessage): - """Send message to a specific peer""" - try: - resp = requests.post( - f"{peer_url}/p2p/gossip", - json=msg.to_dict(), - timeout=10, - verify=False - ) - if resp.status_code != 200: - logger.warning(f"Peer {peer_url} returned {resp.status_code}") - except Exception as e: - logger.debug(f"Send to {peer_url} failed: {e}") - - def handle_message(self, msg: GossipMessage) -> Optional[Dict]: - """Handle received gossip message""" - # Deduplication - if msg.msg_id in self.seen_messages: - return {"status": "duplicate"} - - # Verify signature - if not self.verify_message(msg): - logger.warning(f"Invalid signature from {msg.sender_id}") - return {"status": "invalid_signature"} - - self.seen_messages.add(msg.msg_id) - - # Limit seen_messages size - if len(self.seen_messages) > 10000: - self.seen_messages = set(list(self.seen_messages)[-5000:]) - - # Handle by type - msg_type = MessageType(msg.msg_type) - - if msg_type == MessageType.PING: - return self._handle_ping(msg) - elif msg_type == MessageType.INV_ATTESTATION: - return self._handle_inv_attestation(msg) - elif msg_type == MessageType.INV_EPOCH: - return self._handle_inv_epoch(msg) - elif msg_type == MessageType.ATTESTATION: - return self._handle_attestation(msg) - elif msg_type == MessageType.EPOCH_PROPOSE: - return self._handle_epoch_propose(msg) - elif msg_type == MessageType.EPOCH_VOTE: - return self._handle_epoch_vote(msg) - elif msg_type == MessageType.GET_STATE: - return self._handle_get_state(msg) - elif msg_type == MessageType.STATE: - return self._handle_state(msg) - - # Forward if TTL > 0 - if msg.ttl > 0: - msg.ttl -= 1 - self.broadcast(msg, exclude_peer=msg.sender_id) - - return {"status": "ok"} - - def _handle_ping(self, msg: GossipMessage) -> Dict: - """Respond to ping with pong""" - pong = self.create_message(MessageType.PONG, { - "node_id": self.node_id, - "attestation_count": len(self.attestation_crdt.data), - "settled_epochs": len(self.epoch_crdt.items) - }) - return {"status": "ok", "pong": pong.to_dict()} - - def _handle_inv_attestation(self, msg: GossipMessage) -> Dict: - """Handle attestation inventory announcement""" - miner_id = msg.payload.get("miner_id") - remote_ts = msg.payload.get("ts_ok", 0) - - # Check if we need this attestation - local = self.attestation_crdt.get(miner_id) - if local is None or remote_ts > self.attestation_crdt.data.get(miner_id, (0, {}))[0]: - # Request full data - return {"status": "need_data", "miner_id": miner_id} - - return {"status": "have_data"} - - def _handle_attestation(self, msg: GossipMessage) -> Dict: - """Handle full attestation data""" - attestation = msg.payload - miner_id = attestation.get("miner") - ts_ok = attestation.get("ts_ok", int(time.time())) - - # Update CRDT - if self.attestation_crdt.set(miner_id, attestation, ts_ok): - # Also update database - self._save_attestation_to_db(attestation, ts_ok) - logger.info(f"Merged attestation for {miner_id[:16]}...") - - return {"status": "ok"} - - def _save_attestation_to_db(self, attestation: Dict, ts_ok: int): - """Save attestation to SQLite database""" - try: - with sqlite3.connect(self.db_path) as conn: - conn.execute(""" - INSERT OR REPLACE INTO miner_attest_recent - (miner, ts_ok, device_family, device_arch, entropy_score) - VALUES (?, ?, ?, ?, ?) - """, ( - attestation.get("miner"), - ts_ok, - attestation.get("device_family", "unknown"), - attestation.get("device_arch", "unknown"), - attestation.get("entropy_score", 0) - )) - conn.commit() - except Exception as e: - logger.error(f"Failed to save attestation: {e}") - - def _handle_inv_epoch(self, msg: GossipMessage) -> Dict: - """Handle epoch settlement inventory""" - epoch = msg.payload.get("epoch") - if not self.epoch_crdt.contains(epoch): - return {"status": "need_data", "epoch": epoch} - return {"status": "have_data"} - - def _handle_epoch_propose(self, msg: GossipMessage) -> Dict: - """Handle epoch settlement proposal""" - proposal = msg.payload - epoch = proposal.get("epoch") - proposer = proposal.get("proposer") - - # Verify proposer is legitimate leader - nodes = sorted(list(self.peers.keys()) + [self.node_id]) - expected_leader = nodes[epoch % len(nodes)] - - if proposer != expected_leader: - logger.warning(f"Invalid proposer {proposer} for epoch {epoch}, expected {expected_leader}") - return {"status": "reject", "reason": "invalid_leader"} - - # Validate distribution - # TODO: Verify merkle root matches our local calculation - - # Vote to accept - vote = self.create_message(MessageType.EPOCH_VOTE, { - "epoch": epoch, - "proposal_hash": proposal.get("proposal_hash"), - "vote": "accept", - "voter": self.node_id - }) - - self.broadcast(vote) - - return {"status": "voted", "vote": "accept"} - - def _handle_epoch_vote(self, msg: GossipMessage) -> Dict: - """Handle epoch vote""" - # TODO: Collect votes and commit when majority reached - return {"status": "ok"} - - def _handle_get_state(self, msg: GossipMessage) -> Dict: - """Handle state request - return full CRDT state""" - return { - "status": "ok", - "state": { - "attestations": self.attestation_crdt.to_dict(), - "epochs": self.epoch_crdt.to_dict(), - "balances": self.balance_crdt.to_dict() - } - } - - def _handle_state(self, msg: GossipMessage) -> Dict: - """Handle incoming state - merge with local""" - state = msg.payload.get("state", {}) - - # Merge attestations - if "attestations" in state: - remote_attest = LWWRegister.from_dict(state["attestations"]) - self.attestation_crdt.merge(remote_attest) - - # Merge epochs - if "epochs" in state: - remote_epochs = GSet.from_dict(state["epochs"]) - self.epoch_crdt.merge(remote_epochs) - - # Merge balances - if "balances" in state: - remote_balances = PNCounter.from_dict(state["balances"]) - self.balance_crdt.merge(remote_balances) - - logger.info(f"Merged state from {msg.sender_id}") - return {"status": "ok"} - - def announce_attestation(self, miner_id: str, ts_ok: int, device_arch: str): - """Announce new attestation to peers""" - msg = self.create_message(MessageType.INV_ATTESTATION, { - "miner_id": miner_id, - "ts_ok": ts_ok, - "device_arch": device_arch, - "attestation_hash": hashlib.sha256(f"{miner_id}:{ts_ok}".encode()).hexdigest()[:16] - }) - self.broadcast(msg) - - def request_full_sync(self, peer_url: str): - """Request full state sync from a peer""" - msg = self.create_message(MessageType.GET_STATE, { - "requester": self.node_id - }) - try: - resp = requests.post( - f"{peer_url}/p2p/gossip", - json=msg.to_dict(), - timeout=30, - verify=False - ) - if resp.status_code == 200: - data = resp.json() - if "state" in data: - state_msg = GossipMessage( - msg_type=MessageType.STATE.value, - msg_id="sync", - sender_id="peer", - timestamp=int(time.time()), - ttl=0, - signature="", - payload=data - ) - self._handle_state(state_msg) - except Exception as e: - logger.error(f"Full sync failed: {e}") - - -# ============================================================================= -# EPOCH CONSENSUS -# ============================================================================= - -class EpochConsensus: - """ - Epoch settlement consensus using 2-phase commit. - Round-robin leader selection based on epoch number. - """ - - def __init__(self, node_id: str, nodes: List[str], gossip: GossipLayer): - self.node_id = node_id - self.nodes = sorted(nodes) - self.gossip = gossip - self.votes: Dict[int, Dict[str, str]] = defaultdict(dict) # epoch -> {voter: vote} - self.proposals: Dict[int, Dict] = {} # epoch -> proposal - - def get_leader(self, epoch: int) -> str: - """Deterministic leader selection""" - return self.nodes[epoch % len(self.nodes)] - - def is_leader(self, epoch: int) -> bool: - return self.get_leader(epoch) == self.node_id - - def propose_settlement(self, epoch: int, distribution: Dict[str, int]) -> Optional[Dict]: - """Leader proposes epoch settlement""" - if not self.is_leader(epoch): - logger.warning(f"Not leader for epoch {epoch}") - return None - - # Compute merkle root of distribution - sorted_dist = sorted(distribution.items()) - merkle_data = json.dumps(sorted_dist, sort_keys=True) - merkle_root = hashlib.sha256(merkle_data.encode()).hexdigest() - - proposal = { - "epoch": epoch, - "proposer": self.node_id, - "distribution": distribution, - "merkle_root": merkle_root, - "proposal_hash": hashlib.sha256(f"{epoch}:{merkle_root}".encode()).hexdigest()[:24], - "timestamp": int(time.time()) - } - - self.proposals[epoch] = proposal - - # Broadcast proposal - msg = self.gossip.create_message(MessageType.EPOCH_PROPOSE, proposal) - self.gossip.broadcast(msg) - - logger.info(f"Proposed settlement for epoch {epoch} with {len(distribution)} miners") - return proposal - - def vote(self, epoch: int, proposal_hash: str, accept: bool): - """Vote on epoch proposal""" - vote = "accept" if accept else "reject" - self.votes[epoch][self.node_id] = vote - - msg = self.gossip.create_message(MessageType.EPOCH_VOTE, { - "epoch": epoch, - "proposal_hash": proposal_hash, - "vote": vote, - "voter": self.node_id - }) - self.gossip.broadcast(msg) - - def check_consensus(self, epoch: int) -> bool: - """Check if consensus reached for epoch""" - votes = self.votes.get(epoch, {}) - accept_count = sum(1 for v in votes.values() if v == "accept") - required = (len(self.nodes) // 2) + 1 - return accept_count >= required - - def receive_vote(self, epoch: int, voter: str, vote: str): - """Record received vote""" - self.votes[epoch][voter] = vote - - if self.check_consensus(epoch): - logger.info(f"Consensus reached for epoch {epoch}!") - self.gossip.epoch_crdt.add(epoch, self.proposals.get(epoch, {})) - - -# ============================================================================= -# P2P NODE COORDINATOR -# ============================================================================= - -class RustChainP2PNode: - """ - Main P2P node coordinator. - Manages gossip, CRDT state, and epoch consensus. - """ - - def __init__(self, node_id: str, db_path: str, peers: Dict[str, str]): - self.node_id = node_id - self.db_path = db_path - self.peers = peers - - # Initialize components - self.gossip = GossipLayer(node_id, peers, db_path) - self.consensus = EpochConsensus( - node_id, - list(peers.keys()) + [node_id], - self.gossip - ) - - self.running = False - self.sync_thread = None - - def start(self): - """Start P2P services""" - self.running = True - self.sync_thread = threading.Thread(target=self._sync_loop, daemon=True) - self.sync_thread.start() - logger.info(f"P2P Node {self.node_id} started with {len(self.peers)} peers") - - def stop(self): - """Stop P2P services""" - self.running = False - - def _sync_loop(self): - """Periodic sync with peers""" - while self.running: - for peer_id, peer_url in self.peers.items(): - try: - self.gossip.request_full_sync(peer_url) - except Exception as e: - logger.debug(f"Sync with {peer_id} failed: {e}") - time.sleep(SYNC_INTERVAL) - - def handle_gossip(self, data: Dict) -> Dict: - """Handle incoming gossip message""" - try: - msg = GossipMessage.from_dict(data) - return self.gossip.handle_message(msg) - except Exception as e: - logger.error(f"Failed to handle gossip: {e}") - return {"status": "error", "message": str(e)} - - def get_attestation_state(self) -> Dict: - """Get attestation state for sync""" - return { - "node_id": self.node_id, - "attestations": { - k: v[0] for k, v in self.gossip.attestation_crdt.data.items() - } - } - - def get_full_state(self) -> Dict: - """Get full CRDT state""" - return { - "node_id": self.node_id, - "attestations": self.gossip.attestation_crdt.to_dict(), - "epochs": self.gossip.epoch_crdt.to_dict(), - "balances": self.gossip.balance_crdt.to_dict() - } - - def announce_new_attestation(self, miner_id: str, attestation: Dict): - """Announce new attestation received by this node""" - ts_ok = attestation.get("ts_ok", int(time.time())) - - # Update local CRDT - self.gossip.attestation_crdt.set(miner_id, attestation, ts_ok) - - # Broadcast to peers - self.gossip.announce_attestation( - miner_id, - ts_ok, - attestation.get("device_arch", "unknown") - ) - - -# ============================================================================= -# FLASK ENDPOINTS REGISTRATION -# ============================================================================= - -def register_p2p_endpoints(app, p2p_node: RustChainP2PNode): - """Register P2P synchronization endpoints on Flask app""" - - from flask import request, jsonify - - @app.route('/p2p/gossip', methods=['POST']) - def receive_gossip(): - """Receive and process gossip message""" - data = request.get_json() - result = p2p_node.handle_gossip(data) - return jsonify(result) - - @app.route('/p2p/state', methods=['GET']) - def get_state(): - """Get full CRDT state for sync""" - return jsonify(p2p_node.get_full_state()) - - @app.route('/p2p/attestation_state', methods=['GET']) - def get_attestation_state(): - """Get attestation timestamps for efficient sync""" - return jsonify(p2p_node.get_attestation_state()) - - @app.route('/p2p/peers', methods=['GET']) - def get_peers(): - """Get list of known peers""" - return jsonify({ - "node_id": p2p_node.node_id, - "peers": list(p2p_node.peers.keys()) - }) - - @app.route('/p2p/health', methods=['GET']) - def p2p_health(): - """P2P subsystem health check""" - return jsonify({ - "node_id": p2p_node.node_id, - "running": p2p_node.running, - "peer_count": len(p2p_node.peers), - "attestation_count": len(p2p_node.gossip.attestation_crdt.data), - "settled_epochs": len(p2p_node.gossip.epoch_crdt.items) - }) - - logger.info("P2P endpoints registered") - - -# ============================================================================= -# MAIN (for testing) -# ============================================================================= - -if __name__ == "__main__": - # Test configuration - NODE_ID = os.environ.get("RC_NODE_ID", "node1") - - PEERS = { - "node1": "https://rustchain.org", - "node2": "http://50.28.86.153:8099", - "node3": "http://76.8.228.245:8099" - } - - # Remove self from peers - if NODE_ID in PEERS: - del PEERS[NODE_ID] - - # Create and start node - node = RustChainP2PNode(NODE_ID, DB_PATH, PEERS) - node.start() - - print(f"P2P Node {NODE_ID} running. Press Ctrl+C to stop.") - - try: - while True: - time.sleep(1) - except KeyboardInterrupt: - node.stop() - print("Stopped.") +#!/usr/bin/env python3 +""" +RustChain P2P Gossip & CRDT Synchronization Module +=================================================== + +Implements fully decentralized P2P sync with: +- Gossip protocol (Bitcoin-style INV/GETDATA) +- CRDT state merging (conflict-free eventual consistency) +- Epoch consensus (2-phase commit) + +Designed for 3+ nodes with no single point of failure. +""" + +import hashlib +import hmac +import json +import os +import secrets +import sqlite3 +import threading +import time +from dataclasses import dataclass, asdict, field +from enum import Enum +from typing import Dict, List, Optional, Set, Tuple, Any +from collections import defaultdict +import logging +import requests + +# Configuration +P2P_SECRET = os.environ.get("RC_P2P_SECRET", "rustchain_p2p_secret_2025_decentralized") +GOSSIP_TTL = 3 +SYNC_INTERVAL = 30 +MESSAGE_EXPIRY = 300 # 5 minutes +MAX_INV_BATCH = 1000 +DB_PATH = os.environ.get("RUSTCHAIN_DB", "/root/rustchain/rustchain_v2.db") + +logging.basicConfig(level=logging.INFO, format='%(asctime)s [P2P] %(message)s') +logger = logging.getLogger(__name__) + + +# ============================================================================= +# MESSAGE TYPES +# ============================================================================= + +class MessageType(Enum): + # Discovery & Health + PING = "ping" + PONG = "pong" + PEER_ANNOUNCE = "peer_announce" + PEER_LIST_REQ = "peer_list_req" + PEER_LIST = "peer_list" + + # Inventory Announcements (INV-style, hash only) + INV_ATTESTATION = "inv_attest" + INV_EPOCH = "inv_epoch" + INV_BALANCE = "inv_balance" + + # Data Requests (GETDATA-style) + GET_ATTESTATION = "get_attest" + GET_EPOCH = "get_epoch" + GET_BALANCES = "get_balances" + GET_STATE = "get_state" + + # Data Responses + ATTESTATION = "attestation" + EPOCH_DATA = "epoch_data" + BALANCES = "balances" + STATE = "state" + + # Epoch Consensus + EPOCH_PROPOSE = "epoch_propose" + EPOCH_VOTE = "epoch_vote" + EPOCH_COMMIT = "epoch_commit" + + +@dataclass +class GossipMessage: + """Base gossip message structure""" + msg_type: str + msg_id: str + sender_id: str + timestamp: int + ttl: int + signature: str + payload: Dict + + def to_dict(self) -> Dict: + return asdict(self) + + @classmethod + def from_dict(cls, data: Dict) -> 'GossipMessage': + return cls(**data) + + def compute_hash(self) -> str: + """Compute hash of message content for deduplication""" + content = f"{self.msg_type}:{self.sender_id}:{json.dumps(self.payload, sort_keys=True)}" + return hashlib.sha256(content.encode()).hexdigest()[:32] + + +# ============================================================================= +# CRDT IMPLEMENTATIONS +# ============================================================================= + +class LWWRegister: + """ + Last-Write-Wins Register for attestations. + The value with the highest timestamp wins. + """ + + def __init__(self): + self.data: Dict[str, Tuple[int, Dict]] = {} # key -> (timestamp, value) + + def set(self, key: str, value: Dict, timestamp: int): + """Set value if timestamp is newer""" + if key not in self.data or timestamp > self.data[key][0]: + self.data[key] = (timestamp, value) + return True + return False + + def get(self, key: str) -> Optional[Dict]: + """Get current value""" + if key in self.data: + return self.data[key][1] + return None + + def merge(self, other: 'LWWRegister'): + """Merge another LWW register into this one""" + for key, (ts, value) in other.data.items(): + self.set(key, value, ts) + + def to_dict(self) -> Dict: + return {k: {"ts": ts, "value": v} for k, (ts, v) in self.data.items()} + + @classmethod + def from_dict(cls, data: Dict) -> 'LWWRegister': + reg = cls() + for k, v in data.items(): + reg.data[k] = (v["ts"], v["value"]) + return reg + + +class PNCounter: + """ + Positive-Negative Counter for balances. + Tracks increments and decrements per node for conflict-free merging. + """ + + def __init__(self): + # miner_id -> {node_id: total_amount} + self.increments: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) + self.decrements: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int)) + + def credit(self, miner_id: str, node_id: str, amount: int): + """Record a credit (reward)""" + self.increments[miner_id][node_id] += amount + + def debit(self, miner_id: str, node_id: str, amount: int): + """Record a debit (withdrawal)""" + self.decrements[miner_id][node_id] += amount + + def get_balance(self, miner_id: str) -> int: + """Compute current balance from CRDT state""" + incr = sum(self.increments.get(miner_id, {}).values()) + decr = sum(self.decrements.get(miner_id, {}).values()) + return incr - decr + + def get_all_balances(self) -> Dict[str, int]: + """Get all miner balances""" + all_miners = set(self.increments.keys()) | set(self.decrements.keys()) + return {m: self.get_balance(m) for m in all_miners} + + def merge(self, other: 'PNCounter'): + """Merge remote state - take max for each (node_id, miner_id) pair""" + for miner_id, node_amounts in other.increments.items(): + for node_id, amount in node_amounts.items(): + self.increments[miner_id][node_id] = max( + self.increments[miner_id][node_id], amount + ) + + for miner_id, node_amounts in other.decrements.items(): + for node_id, amount in node_amounts.items(): + self.decrements[miner_id][node_id] = max( + self.decrements[miner_id][node_id], amount + ) + + def to_dict(self) -> Dict: + return { + "increments": {k: dict(v) for k, v in self.increments.items()}, + "decrements": {k: dict(v) for k, v in self.decrements.items()} + } + + @classmethod + def from_dict(cls, data: Dict) -> 'PNCounter': + counter = cls() + for miner_id, nodes in data.get("increments", {}).items(): + for node_id, amount in nodes.items(): + counter.increments[miner_id][node_id] = amount + for miner_id, nodes in data.get("decrements", {}).items(): + for node_id, amount in nodes.items(): + counter.decrements[miner_id][node_id] = amount + return counter + + +class GSet: + """ + Grow-only Set for settled epochs. + Once an epoch is settled, it can never be unsettled. + """ + + def __init__(self): + self.items: Set[int] = set() + self.metadata: Dict[int, Dict] = {} # epoch -> {settled_ts, merkle_root, ...} + + def add(self, epoch: int, metadata: Dict = None): + """Add epoch to settled set""" + self.items.add(epoch) + if metadata: + self.metadata[epoch] = metadata + + def contains(self, epoch: int) -> bool: + return epoch in self.items + + def merge(self, other: 'GSet'): + """Merge another G-Set - union operation""" + self.items |= other.items + for epoch, meta in other.metadata.items(): + if epoch not in self.metadata: + self.metadata[epoch] = meta + + def to_dict(self) -> Dict: + return { + "epochs": list(self.items), + "metadata": self.metadata + } + + @classmethod + def from_dict(cls, data: Dict) -> 'GSet': + gset = cls() + gset.items = set(data.get("epochs", [])) + gset.metadata = data.get("metadata", {}) + return gset + + +# ============================================================================= +# GOSSIP LAYER +# ============================================================================= + +class GossipLayer: + """ + Gossip protocol implementation with INV/GETDATA model. + """ + + def __init__(self, node_id: str, peers: Dict[str, str], db_path: str = DB_PATH): + self.node_id = node_id + self.peers = peers # peer_id -> url + self.db_path = db_path + self.seen_messages: Set[str] = set() + self.message_queue: List[GossipMessage] = [] + self.lock = threading.Lock() + + # CRDT state + self.attestation_crdt = LWWRegister() + self.balance_crdt = PNCounter() + self.epoch_crdt = GSet() + + # Load initial state from DB + self._load_state_from_db() + + def _load_state_from_db(self): + """Load existing state into CRDTs""" + try: + with sqlite3.connect(self.db_path) as conn: + # Load attestations + rows = conn.execute(""" + SELECT miner, ts_ok, device_family, device_arch, entropy_score + FROM miner_attest_recent + """).fetchall() + for miner, ts_ok, family, arch, entropy in rows: + self.attestation_crdt.set(miner, { + "miner": miner, + "device_family": family, + "device_arch": arch, + "entropy_score": entropy or 0 + }, ts_ok) + + # Load settled epochs + rows = conn.execute(""" + SELECT epoch FROM epoch_state WHERE settled = 1 + """).fetchall() + for (epoch,) in rows: + self.epoch_crdt.add(epoch) + + logger.info(f"Loaded {len(self.attestation_crdt.data)} attestations, " + f"{len(self.epoch_crdt.items)} settled epochs") + except Exception as e: + logger.error(f"Failed to load state from DB: {e}") + + def _sign_message(self, content: str) -> Tuple[str, int]: + """Generate HMAC signature for message""" + timestamp = int(time.time()) + message = f"{content}:{timestamp}" + sig = hmac.new(P2P_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest() + return sig, timestamp + + def _verify_signature(self, content: str, signature: str, timestamp: int) -> bool: + """Verify HMAC signature""" + # Check timestamp freshness + if abs(time.time() - timestamp) > MESSAGE_EXPIRY: + return False + message = f"{content}:{timestamp}" + expected = hmac.new(P2P_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest() + return hmac.compare_digest(signature, expected) + + def create_message(self, msg_type: MessageType, payload: Dict, ttl: int = GOSSIP_TTL) -> GossipMessage: + """Create a new gossip message""" + content = f"{msg_type.value}:{json.dumps(payload, sort_keys=True)}" + sig, ts = self._sign_message(content) + + msg = GossipMessage( + msg_type=msg_type.value, + msg_id=hashlib.sha256(f"{content}:{ts}".encode()).hexdigest()[:24], + sender_id=self.node_id, + timestamp=ts, + ttl=ttl, + signature=sig, + payload=payload + ) + return msg + + def verify_message(self, msg: GossipMessage) -> bool: + """Verify message signature and freshness""" + content = f"{msg.msg_type}:{json.dumps(msg.payload, sort_keys=True)}" + return self._verify_signature(content, msg.signature, msg.timestamp) + + def broadcast(self, msg: GossipMessage, exclude_peer: str = None): + """Broadcast message to all peers""" + for peer_id, peer_url in self.peers.items(): + if peer_id == exclude_peer: + continue + try: + self._send_to_peer(peer_url, msg) + except Exception as e: + logger.warning(f"Failed to send to {peer_id}: {e}") + + def _send_to_peer(self, peer_url: str, msg: GossipMessage): + """Send message to a specific peer""" + try: + resp = requests.post( + f"{peer_url}/p2p/gossip", + json=msg.to_dict(), + timeout=10, + verify=False + ) + if resp.status_code != 200: + logger.warning(f"Peer {peer_url} returned {resp.status_code}") + except Exception as e: + logger.debug(f"Send to {peer_url} failed: {e}") + + def handle_message(self, msg: GossipMessage) -> Optional[Dict]: + """Handle received gossip message""" + # Deduplication + if msg.msg_id in self.seen_messages: + return {"status": "duplicate"} + + # Verify signature + if not self.verify_message(msg): + logger.warning(f"Invalid signature from {msg.sender_id}") + return {"status": "invalid_signature"} + + self.seen_messages.add(msg.msg_id) + + # Limit seen_messages size + if len(self.seen_messages) > 10000: + self.seen_messages = set(list(self.seen_messages)[-5000:]) + + # Handle by type + msg_type = MessageType(msg.msg_type) + + if msg_type == MessageType.PING: + return self._handle_ping(msg) + elif msg_type == MessageType.INV_ATTESTATION: + return self._handle_inv_attestation(msg) + elif msg_type == MessageType.INV_EPOCH: + return self._handle_inv_epoch(msg) + elif msg_type == MessageType.ATTESTATION: + return self._handle_attestation(msg) + elif msg_type == MessageType.EPOCH_PROPOSE: + return self._handle_epoch_propose(msg) + elif msg_type == MessageType.EPOCH_VOTE: + return self._handle_epoch_vote(msg) + elif msg_type == MessageType.GET_STATE: + return self._handle_get_state(msg) + elif msg_type == MessageType.STATE: + return self._handle_state(msg) + + # Forward if TTL > 0 + if msg.ttl > 0: + msg.ttl -= 1 + self.broadcast(msg, exclude_peer=msg.sender_id) + + return {"status": "ok"} + + def _handle_ping(self, msg: GossipMessage) -> Dict: + """Respond to ping with pong""" + pong = self.create_message(MessageType.PONG, { + "node_id": self.node_id, + "attestation_count": len(self.attestation_crdt.data), + "settled_epochs": len(self.epoch_crdt.items) + }) + return {"status": "ok", "pong": pong.to_dict()} + + def _handle_inv_attestation(self, msg: GossipMessage) -> Dict: + """Handle attestation inventory announcement""" + miner_id = msg.payload.get("miner_id") + remote_ts = msg.payload.get("ts_ok", 0) + + # Check if we need this attestation + local = self.attestation_crdt.get(miner_id) + if local is None or remote_ts > self.attestation_crdt.data.get(miner_id, (0, {}))[0]: + # Request full data + return {"status": "need_data", "miner_id": miner_id} + + return {"status": "have_data"} + + def _handle_attestation(self, msg: GossipMessage) -> Dict: + """Handle full attestation data""" + attestation = msg.payload + miner_id = attestation.get("miner") + ts_ok = attestation.get("ts_ok", int(time.time())) + + # Update CRDT + if self.attestation_crdt.set(miner_id, attestation, ts_ok): + # Also update database + self._save_attestation_to_db(attestation, ts_ok) + logger.info(f"Merged attestation for {miner_id[:16]}...") + + return {"status": "ok"} + + def _save_attestation_to_db(self, attestation: Dict, ts_ok: int): + """Save attestation to SQLite database""" + try: + with sqlite3.connect(self.db_path) as conn: + conn.execute(""" + INSERT OR REPLACE INTO miner_attest_recent + (miner, ts_ok, device_family, device_arch, entropy_score) + VALUES (?, ?, ?, ?, ?) + """, ( + attestation.get("miner"), + ts_ok, + attestation.get("device_family", "unknown"), + attestation.get("device_arch", "unknown"), + attestation.get("entropy_score", 0) + )) + conn.commit() + except Exception as e: + logger.error(f"Failed to save attestation: {e}") + + def _handle_inv_epoch(self, msg: GossipMessage) -> Dict: + """Handle epoch settlement inventory""" + epoch = msg.payload.get("epoch") + if not self.epoch_crdt.contains(epoch): + return {"status": "need_data", "epoch": epoch} + return {"status": "have_data"} + + def _handle_epoch_propose(self, msg: GossipMessage) -> Dict: + """Handle epoch settlement proposal""" + proposal = msg.payload + epoch = proposal.get("epoch") + proposer = proposal.get("proposer") + + # Verify proposer is legitimate leader + nodes = sorted(list(self.peers.keys()) + [self.node_id]) + expected_leader = nodes[epoch % len(nodes)] + + if proposer != expected_leader: + logger.warning(f"Invalid proposer {proposer} for epoch {epoch}, expected {expected_leader}") + return {"status": "reject", "reason": "invalid_leader"} + + # Validate distribution + # TODO: Verify merkle root matches our local calculation + + # Vote to accept + vote = self.create_message(MessageType.EPOCH_VOTE, { + "epoch": epoch, + "proposal_hash": proposal.get("proposal_hash"), + "vote": "accept", + "voter": self.node_id + }) + + self.broadcast(vote) + + return {"status": "voted", "vote": "accept"} + + def _handle_epoch_vote(self, msg: GossipMessage) -> Dict: + """Handle epoch vote""" + # TODO: Collect votes and commit when majority reached + return {"status": "ok"} + + def _handle_get_state(self, msg: GossipMessage) -> Dict: + """Handle state request - return full CRDT state""" + return { + "status": "ok", + "state": { + "attestations": self.attestation_crdt.to_dict(), + "epochs": self.epoch_crdt.to_dict(), + "balances": self.balance_crdt.to_dict() + } + } + + def _handle_state(self, msg: GossipMessage) -> Dict: + """Handle incoming state - merge with local""" + state = msg.payload.get("state", {}) + + # Merge attestations + if "attestations" in state: + remote_attest = LWWRegister.from_dict(state["attestations"]) + self.attestation_crdt.merge(remote_attest) + + # Merge epochs + if "epochs" in state: + remote_epochs = GSet.from_dict(state["epochs"]) + self.epoch_crdt.merge(remote_epochs) + + # Merge balances + if "balances" in state: + remote_balances = PNCounter.from_dict(state["balances"]) + self.balance_crdt.merge(remote_balances) + + logger.info(f"Merged state from {msg.sender_id}") + return {"status": "ok"} + + def announce_attestation(self, miner_id: str, ts_ok: int, device_arch: str): + """Announce new attestation to peers""" + msg = self.create_message(MessageType.INV_ATTESTATION, { + "miner_id": miner_id, + "ts_ok": ts_ok, + "device_arch": device_arch, + "attestation_hash": hashlib.sha256(f"{miner_id}:{ts_ok}".encode()).hexdigest()[:16] + }) + self.broadcast(msg) + + def request_full_sync(self, peer_url: str): + """Request full state sync from a peer""" + msg = self.create_message(MessageType.GET_STATE, { + "requester": self.node_id + }) + try: + resp = requests.post( + f"{peer_url}/p2p/gossip", + json=msg.to_dict(), + timeout=30, + verify=False + ) + if resp.status_code == 200: + data = resp.json() + if "state" in data: + state_msg = GossipMessage( + msg_type=MessageType.STATE.value, + msg_id="sync", + sender_id="peer", + timestamp=int(time.time()), + ttl=0, + signature="", + payload=data + ) + self._handle_state(state_msg) + except Exception as e: + logger.error(f"Full sync failed: {e}") + + +# ============================================================================= +# EPOCH CONSENSUS +# ============================================================================= + +class EpochConsensus: + """ + Epoch settlement consensus using 2-phase commit. + Round-robin leader selection based on epoch number. + """ + + def __init__(self, node_id: str, nodes: List[str], gossip: GossipLayer): + self.node_id = node_id + self.nodes = sorted(nodes) + self.gossip = gossip + self.votes: Dict[int, Dict[str, str]] = defaultdict(dict) # epoch -> {voter: vote} + self.proposals: Dict[int, Dict] = {} # epoch -> proposal + + def get_leader(self, epoch: int) -> str: + """Deterministic leader selection""" + return self.nodes[epoch % len(self.nodes)] + + def is_leader(self, epoch: int) -> bool: + return self.get_leader(epoch) == self.node_id + + def propose_settlement(self, epoch: int, distribution: Dict[str, int]) -> Optional[Dict]: + """Leader proposes epoch settlement""" + if not self.is_leader(epoch): + logger.warning(f"Not leader for epoch {epoch}") + return None + + # Compute merkle root of distribution + sorted_dist = sorted(distribution.items()) + merkle_data = json.dumps(sorted_dist, sort_keys=True) + merkle_root = hashlib.sha256(merkle_data.encode()).hexdigest() + + proposal = { + "epoch": epoch, + "proposer": self.node_id, + "distribution": distribution, + "merkle_root": merkle_root, + "proposal_hash": hashlib.sha256(f"{epoch}:{merkle_root}".encode()).hexdigest()[:24], + "timestamp": int(time.time()) + } + + self.proposals[epoch] = proposal + + # Broadcast proposal + msg = self.gossip.create_message(MessageType.EPOCH_PROPOSE, proposal) + self.gossip.broadcast(msg) + + logger.info(f"Proposed settlement for epoch {epoch} with {len(distribution)} miners") + return proposal + + def vote(self, epoch: int, proposal_hash: str, accept: bool): + """Vote on epoch proposal""" + vote = "accept" if accept else "reject" + self.votes[epoch][self.node_id] = vote + + msg = self.gossip.create_message(MessageType.EPOCH_VOTE, { + "epoch": epoch, + "proposal_hash": proposal_hash, + "vote": vote, + "voter": self.node_id + }) + self.gossip.broadcast(msg) + + def check_consensus(self, epoch: int) -> bool: + """Check if consensus reached for epoch""" + votes = self.votes.get(epoch, {}) + accept_count = sum(1 for v in votes.values() if v == "accept") + required = (len(self.nodes) // 2) + 1 + return accept_count >= required + + def receive_vote(self, epoch: int, voter: str, vote: str): + """Record received vote""" + self.votes[epoch][voter] = vote + + if self.check_consensus(epoch): + logger.info(f"Consensus reached for epoch {epoch}!") + self.gossip.epoch_crdt.add(epoch, self.proposals.get(epoch, {})) + + +# ============================================================================= +# P2P NODE COORDINATOR +# ============================================================================= + +class RustChainP2PNode: + """ + Main P2P node coordinator. + Manages gossip, CRDT state, and epoch consensus. + """ + + def __init__(self, node_id: str, db_path: str, peers: Dict[str, str]): + self.node_id = node_id + self.db_path = db_path + self.peers = peers + + # Initialize components + self.gossip = GossipLayer(node_id, peers, db_path) + self.consensus = EpochConsensus( + node_id, + list(peers.keys()) + [node_id], + self.gossip + ) + + self.running = False + self.sync_thread = None + + def start(self): + """Start P2P services""" + self.running = True + self.sync_thread = threading.Thread(target=self._sync_loop, daemon=True) + self.sync_thread.start() + logger.info(f"P2P Node {self.node_id} started with {len(self.peers)} peers") + + def stop(self): + """Stop P2P services""" + self.running = False + + def _sync_loop(self): + """Periodic sync with peers""" + while self.running: + for peer_id, peer_url in self.peers.items(): + try: + self.gossip.request_full_sync(peer_url) + except Exception as e: + logger.debug(f"Sync with {peer_id} failed: {e}") + time.sleep(SYNC_INTERVAL) + + def handle_gossip(self, data: Dict) -> Dict: + """Handle incoming gossip message""" + try: + msg = GossipMessage.from_dict(data) + return self.gossip.handle_message(msg) + except Exception as e: + logger.error(f"Failed to handle gossip: {e}") + return {"status": "error", "message": str(e)} + + def get_attestation_state(self) -> Dict: + """Get attestation state for sync""" + return { + "node_id": self.node_id, + "attestations": { + k: v[0] for k, v in self.gossip.attestation_crdt.data.items() + } + } + + def get_full_state(self) -> Dict: + """Get full CRDT state""" + return { + "node_id": self.node_id, + "attestations": self.gossip.attestation_crdt.to_dict(), + "epochs": self.gossip.epoch_crdt.to_dict(), + "balances": self.gossip.balance_crdt.to_dict() + } + + def announce_new_attestation(self, miner_id: str, attestation: Dict): + """Announce new attestation received by this node""" + ts_ok = attestation.get("ts_ok", int(time.time())) + + # Update local CRDT + self.gossip.attestation_crdt.set(miner_id, attestation, ts_ok) + + # Broadcast to peers + self.gossip.announce_attestation( + miner_id, + ts_ok, + attestation.get("device_arch", "unknown") + ) + + +# ============================================================================= +# FLASK ENDPOINTS REGISTRATION +# ============================================================================= + +def register_p2p_endpoints(app, p2p_node: RustChainP2PNode): + """Register P2P synchronization endpoints on Flask app""" + + from flask import request, jsonify + + @app.route('/p2p/gossip', methods=['POST']) + def receive_gossip(): + """Receive and process gossip message""" + data = request.get_json() + result = p2p_node.handle_gossip(data) + return jsonify(result) + + @app.route('/p2p/state', methods=['GET']) + def get_state(): + """Get full CRDT state for sync""" + return jsonify(p2p_node.get_full_state()) + + @app.route('/p2p/attestation_state', methods=['GET']) + def get_attestation_state(): + """Get attestation timestamps for efficient sync""" + return jsonify(p2p_node.get_attestation_state()) + + @app.route('/p2p/peers', methods=['GET']) + def get_peers(): + """Get list of known peers""" + return jsonify({ + "node_id": p2p_node.node_id, + "peers": list(p2p_node.peers.keys()) + }) + + @app.route('/p2p/health', methods=['GET']) + def p2p_health(): + """P2P subsystem health check""" + return jsonify({ + "node_id": p2p_node.node_id, + "running": p2p_node.running, + "peer_count": len(p2p_node.peers), + "attestation_count": len(p2p_node.gossip.attestation_crdt.data), + "settled_epochs": len(p2p_node.gossip.epoch_crdt.items) + }) + + logger.info("P2P endpoints registered") + + +# ============================================================================= +# MAIN (for testing) +# ============================================================================= + +if __name__ == "__main__": + # Test configuration + NODE_ID = os.environ.get("RC_NODE_ID", "node1") + + PEERS = { + "node1": "https://rustchain.org", + "node2": "http://50.28.86.153:8099", + "node3": "http://76.8.228.245:8099" + } + + # Remove self from peers + if NODE_ID in PEERS: + del PEERS[NODE_ID] + + # Create and start node + node = RustChainP2PNode(NODE_ID, DB_PATH, PEERS) + node.start() + + print(f"P2P Node {NODE_ID} running. Press Ctrl+C to stop.") + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + node.stop() + print("Stopped.") diff --git a/node/rustchain_tx_handler.py b/node/rustchain_tx_handler.py index 09be9e0a..57cef2a7 100644 --- a/node/rustchain_tx_handler.py +++ b/node/rustchain_tx_handler.py @@ -1,775 +1,775 @@ -#!/usr/bin/env python3 -""" -RustChain Transaction Handler - Mainnet Security -================================================= - -Phase 1 Implementation: -- Signed transaction validation -- Replay protection via nonces -- Balance checking with proper locking -- Transaction pool management - -All transactions MUST be signed with Ed25519. -""" - -import sqlite3 -import time -import threading -import logging -from typing import Dict, Optional, Tuple, List -from dataclasses import dataclass -from contextlib import contextmanager - -from rustchain_crypto import ( - SignedTransaction, - Ed25519Signer, - blake2b256_hex, - address_from_public_key -) - -logging.basicConfig( - level=logging.INFO, - format='%(asctime)s [TX] %(levelname)s: %(message)s' -) -logger = logging.getLogger(__name__) - - -# ============================================================================= -# DATABASE SCHEMA UPGRADES -# ============================================================================= - -SCHEMA_UPGRADE_SQL = """ --- Upgrade balances table to include nonce -ALTER TABLE balances ADD COLUMN wallet_nonce INTEGER DEFAULT 0; - --- Create pending transactions table -CREATE TABLE IF NOT EXISTS pending_transactions ( - tx_hash TEXT PRIMARY KEY, - from_addr TEXT NOT NULL, - to_addr TEXT NOT NULL, - amount_urtc INTEGER NOT NULL, - nonce INTEGER NOT NULL, - timestamp INTEGER NOT NULL, - memo TEXT DEFAULT '', - signature TEXT NOT NULL, - public_key TEXT NOT NULL, - created_at INTEGER NOT NULL, - status TEXT DEFAULT 'pending' -); - --- Create transaction history table -CREATE TABLE IF NOT EXISTS transaction_history ( - tx_hash TEXT PRIMARY KEY, - from_addr TEXT NOT NULL, - to_addr TEXT NOT NULL, - amount_urtc INTEGER NOT NULL, - nonce INTEGER NOT NULL, - timestamp INTEGER NOT NULL, - memo TEXT DEFAULT '', - signature TEXT NOT NULL, - public_key TEXT NOT NULL, - block_height INTEGER, - block_hash TEXT, - confirmed_at INTEGER, - status TEXT DEFAULT 'confirmed' -); - --- Create wallet public key registry -CREATE TABLE IF NOT EXISTS wallet_pubkeys ( - address TEXT PRIMARY KEY, - public_key TEXT NOT NULL, - registered_at INTEGER NOT NULL -); - --- Index for faster queries -CREATE INDEX IF NOT EXISTS idx_pending_from ON pending_transactions(from_addr); -CREATE INDEX IF NOT EXISTS idx_pending_nonce ON pending_transactions(from_addr, nonce); -CREATE INDEX IF NOT EXISTS idx_history_from ON transaction_history(from_addr); -CREATE INDEX IF NOT EXISTS idx_history_to ON transaction_history(to_addr); -CREATE INDEX IF NOT EXISTS idx_history_block ON transaction_history(block_height); -""" - - -# ============================================================================= -# TRANSACTION POOL -# ============================================================================= - -class TransactionPool: - """ - Manages pending transactions with proper validation. - """ - - def __init__(self, db_path: str): - self.db_path = db_path - self._lock = threading.Lock() - self._ensure_schema() - - def _ensure_schema(self): - """Ensure database schema is up to date""" - with sqlite3.connect(self.db_path) as conn: - cursor = conn.cursor() - - # Check if wallet_nonce column exists - cursor.execute("PRAGMA table_info(balances)") - columns = [col[1] for col in cursor.fetchall()] - - if "wallet_nonce" not in columns: - try: - cursor.execute("ALTER TABLE balances ADD COLUMN wallet_nonce INTEGER DEFAULT 0") - logger.info("Added wallet_nonce column to balances table") - except sqlite3.OperationalError: - pass # Column might already exist - - # Create other tables - for statement in SCHEMA_UPGRADE_SQL.split(';'): - statement = statement.strip() - if statement and not statement.startswith('ALTER'): - try: - cursor.execute(statement) - except sqlite3.OperationalError as e: - if "already exists" not in str(e): - logger.warning(f"Schema statement failed: {e}") - - conn.commit() - - @contextmanager - def _get_connection(self): - """Get database connection with proper locking""" - with self._lock: - conn = sqlite3.connect(self.db_path) - conn.row_factory = sqlite3.Row - try: - yield conn - conn.commit() - except Exception: - conn.rollback() - raise - finally: - conn.close() - - def get_wallet_nonce(self, address: str) -> int: - """Get current nonce for a wallet""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - "SELECT wallet_nonce FROM balances WHERE wallet = ?", - (address,) - ) - result = cursor.fetchone() - return result["wallet_nonce"] if result else 0 - - def get_balance(self, address: str) -> int: - """Get current balance for a wallet (in uRTC)""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - "SELECT balance_urtc FROM balances WHERE wallet = ?", - (address,) - ) - result = cursor.fetchone() - return result["balance_urtc"] if result else 0 - - def get_pending_amount(self, address: str) -> int: - """Get total pending outgoing amount for address""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - """SELECT COALESCE(SUM(amount_urtc), 0) as pending - FROM pending_transactions - WHERE from_addr = ? AND status = 'pending'""", - (address,) - ) - result = cursor.fetchone() - return result["pending"] if result else 0 - - def get_available_balance(self, address: str) -> int: - """Get available balance (total - pending)""" - balance = self.get_balance(address) - pending = self.get_pending_amount(address) - return max(0, balance - pending) - - def register_public_key(self, address: str, public_key: str) -> bool: - """Register a wallet's public key""" - with self._get_connection() as conn: - cursor = conn.cursor() - - # Verify address derives from public key - derived_addr = address_from_public_key(bytes.fromhex(public_key)) - if derived_addr != address: - logger.warning(f"Address mismatch: {address} != {derived_addr}") - return False - - try: - cursor.execute( - """INSERT OR REPLACE INTO wallet_pubkeys - (address, public_key, registered_at) - VALUES (?, ?, ?)""", - (address, public_key, int(time.time())) - ) - return True - except Exception as e: - logger.error(f"Failed to register public key: {e}") - return False - - def get_public_key(self, address: str) -> Optional[str]: - """Get registered public key for address""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - "SELECT public_key FROM wallet_pubkeys WHERE address = ?", - (address,) - ) - result = cursor.fetchone() - return result["public_key"] if result else None - - def validate_transaction(self, tx: SignedTransaction) -> Tuple[bool, str]: - """ - Validate a signed transaction. - - Checks: - 1. Signature validity - 2. Public key matches from_addr - 3. Nonce is correct (replay protection) - 4. Sufficient balance - 5. No duplicate in pool - """ - # 1. Verify signature - if not tx.verify(): - return False, "Invalid signature" - - # 2. Verify public key matches address - derived_addr = address_from_public_key(bytes.fromhex(tx.public_key)) - if derived_addr != tx.from_addr: - return False, f"Public key does not match from_addr" - - # 3. Check nonce - expected_nonce = self.get_wallet_nonce(tx.from_addr) + 1 - pending_nonces = self._get_pending_nonces(tx.from_addr) - - # Account for pending transactions - while expected_nonce in pending_nonces: - expected_nonce += 1 - - if tx.nonce != expected_nonce: - return False, f"Invalid nonce: expected {expected_nonce}, got {tx.nonce}" - - # 4. Validate amount and check balance - if tx.amount_urtc <= 0: - return False, "Invalid amount: must be > 0" - - available = self.get_available_balance(tx.from_addr) - if tx.amount_urtc > available: - return False, f"Insufficient balance: have {available}, need {tx.amount_urtc}" - - # 5. Check for duplicate - if self._tx_exists(tx.tx_hash): - return False, "Transaction already exists" - - return True, "" - - def _get_pending_nonces(self, address: str) -> set: - """Get set of pending nonces for address""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - "SELECT nonce FROM pending_transactions WHERE from_addr = ? AND status = 'pending'", - (address,) - ) - return {row["nonce"] for row in cursor.fetchall()} - - def _tx_exists(self, tx_hash: str) -> bool: - """Check if transaction already exists""" - with self._get_connection() as conn: - cursor = conn.cursor() - - # Check pending - cursor.execute( - "SELECT 1 FROM pending_transactions WHERE tx_hash = ?", - (tx_hash,) - ) - if cursor.fetchone(): - return True - - # Check history - cursor.execute( - "SELECT 1 FROM transaction_history WHERE tx_hash = ?", - (tx_hash,) - ) - return cursor.fetchone() is not None - - def submit_transaction(self, tx: SignedTransaction) -> Tuple[bool, str]: - """ - Submit a signed transaction to the pool. - - Returns (success, error_or_tx_hash) - """ - # Validate - is_valid, error = self.validate_transaction(tx) - if not is_valid: - return False, error - - # Register public key if not already registered - self.register_public_key(tx.from_addr, tx.public_key) - - # Add to pending pool - with self._get_connection() as conn: - cursor = conn.cursor() - - try: - cursor.execute( - """INSERT INTO pending_transactions - (tx_hash, from_addr, to_addr, amount_urtc, nonce, - timestamp, memo, signature, public_key, created_at, status) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'pending')""", - ( - tx.tx_hash, - tx.from_addr, - tx.to_addr, - tx.amount_urtc, - tx.nonce, - tx.timestamp, - tx.memo, - tx.signature, - tx.public_key, - int(time.time()) - ) - ) - - logger.info(f"TX accepted: {tx.tx_hash[:16]}... " - f"{tx.from_addr[:16]}... -> {tx.to_addr[:16]}... " - f"amount={tx.amount_urtc}") - - return True, tx.tx_hash - - except sqlite3.IntegrityError as e: - return False, f"Transaction already exists: {e}" - - def get_pending_transactions(self, limit: int = 100) -> List[SignedTransaction]: - """Get pending transactions ordered by nonce""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - """SELECT * FROM pending_transactions - WHERE status = 'pending' - ORDER BY nonce ASC - LIMIT ?""", - (limit,) - ) - - return [ - SignedTransaction( - from_addr=row["from_addr"], - to_addr=row["to_addr"], - amount_urtc=row["amount_urtc"], - nonce=row["nonce"], - timestamp=row["timestamp"], - memo=row["memo"], - signature=row["signature"], - public_key=row["public_key"], - tx_hash=row["tx_hash"] - ) - for row in cursor.fetchall() - ] - - def confirm_transaction( - self, - tx_hash: str, - block_height: int, - block_hash: str - ) -> bool: - """ - Confirm a transaction (move from pending to history). - Also updates balances and nonces. - """ - with self._get_connection() as conn: - cursor = conn.cursor() - - # Get pending transaction - cursor.execute( - "SELECT * FROM pending_transactions WHERE tx_hash = ?", - (tx_hash,) - ) - row = cursor.fetchone() - - if not row: - logger.warning(f"Transaction not found in pending: {tx_hash}") - return False - - try: - # Move to history - cursor.execute( - """INSERT INTO transaction_history - (tx_hash, from_addr, to_addr, amount_urtc, nonce, - timestamp, memo, signature, public_key, - block_height, block_hash, confirmed_at, status) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'confirmed')""", - ( - row["tx_hash"], - row["from_addr"], - row["to_addr"], - row["amount_urtc"], - row["nonce"], - row["timestamp"], - row["memo"], - row["signature"], - row["public_key"], - block_height, - block_hash, - int(time.time()) - ) - ) - - # Update sender balance and nonce - cursor.execute( - """UPDATE balances - SET balance_urtc = balance_urtc - ?, - wallet_nonce = ? - WHERE wallet = ?""", - (row["amount_urtc"], row["nonce"], row["from_addr"]) - ) - - # Update receiver balance (create if not exists) - cursor.execute( - """INSERT INTO balances (wallet, balance_urtc, wallet_nonce) - VALUES (?, ?, 0) - ON CONFLICT(wallet) DO UPDATE SET - balance_urtc = balance_urtc + ?""", - (row["to_addr"], row["amount_urtc"], row["amount_urtc"]) - ) - - # Remove from pending - cursor.execute( - "DELETE FROM pending_transactions WHERE tx_hash = ?", - (tx_hash,) - ) - - logger.info(f"TX confirmed: {tx_hash[:16]}... in block {block_height}") - return True - - except Exception as e: - logger.error(f"Failed to confirm transaction: {e}") - return False - - def reject_transaction(self, tx_hash: str, reason: str = "") -> bool: - """Reject a pending transaction""" - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - """UPDATE pending_transactions - SET status = 'rejected' - WHERE tx_hash = ?""", - (tx_hash,) - ) - - if cursor.rowcount > 0: - logger.info(f"TX rejected: {tx_hash[:16]}... reason: {reason}") - return True - return False - - def cleanup_expired(self, max_age_seconds: int = 3600) -> int: - """Remove transactions older than max_age""" - cutoff = int(time.time()) - max_age_seconds - - with self._get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - """DELETE FROM pending_transactions - WHERE status = 'pending' AND created_at < ?""", - (cutoff,) - ) - count = cursor.rowcount - - if count > 0: - logger.info(f"Cleaned up {count} expired pending transactions") - - return count - - def get_transaction_status(self, tx_hash: str) -> Dict: - """Get transaction status""" - with self._get_connection() as conn: - cursor = conn.cursor() - - # Check pending - cursor.execute( - "SELECT *, 'pending' as location FROM pending_transactions WHERE tx_hash = ?", - (tx_hash,) - ) - row = cursor.fetchone() - if row: - return dict(row) - - # Check history - cursor.execute( - "SELECT *, 'history' as location FROM transaction_history WHERE tx_hash = ?", - (tx_hash,) - ) - row = cursor.fetchone() - if row: - return dict(row) - - return {"status": "not_found"} - - -# ============================================================================= -# TRANSACTION API ENDPOINTS -# ============================================================================= - -def create_tx_api_routes(app, tx_pool: TransactionPool): - """ - Create Flask routes for transaction API. - - Endpoints: - - POST /tx/submit - Submit signed transaction - - GET /tx/status/ - Get transaction status - - GET /tx/pending - List pending transactions - - GET /wallet//balance - Get wallet balance - - GET /wallet//nonce - Get wallet nonce - - GET /wallet//history - Get transaction history - """ - from flask import request, jsonify - - @app.route('/tx/submit', methods=['POST']) - def submit_transaction(): - """Submit a signed transaction""" - try: - data = request.get_json() - - if not data: - return jsonify({"error": "No JSON data provided"}), 400 - - # Create transaction object - tx = SignedTransaction.from_dict(data) - - # Compute hash if not provided - if not tx.tx_hash: - tx.tx_hash = tx.compute_hash() - - # Submit to pool - success, result = tx_pool.submit_transaction(tx) - - if success: - return jsonify({ - "success": True, - "tx_hash": result, - "status": "pending" - }) - else: - return jsonify({ - "success": False, - "error": result - }), 400 - - except Exception as e: - logger.error(f"Error submitting transaction: {e}") - return jsonify({"error": str(e)}), 500 - - @app.route('/tx/status/', methods=['GET']) - def get_tx_status(tx_hash: str): - """Get transaction status""" - try: - status = tx_pool.get_transaction_status(tx_hash) - return jsonify(status) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - @app.route('/tx/pending', methods=['GET']) - def list_pending(): - """List pending transactions""" - try: - limit = request.args.get('limit', 100, type=int) - pending = tx_pool.get_pending_transactions(limit) - return jsonify({ - "count": len(pending), - "transactions": [tx.to_dict() for tx in pending] - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - @app.route('/wallet/
/balance', methods=['GET']) - def get_wallet_balance(address: str): - """Get wallet balance""" - try: - balance = tx_pool.get_balance(address) - available = tx_pool.get_available_balance(address) - pending = tx_pool.get_pending_amount(address) - - return jsonify({ - "address": address, - "balance_urtc": balance, - "available_urtc": available, - "pending_urtc": pending, - "balance_rtc": balance / 100_000_000, - "available_rtc": available / 100_000_000 - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - @app.route('/wallet/
/nonce', methods=['GET']) - def get_wallet_nonce(address: str): - """Get wallet nonce (for transaction construction)""" - try: - nonce = tx_pool.get_wallet_nonce(address) - pending_nonces = tx_pool._get_pending_nonces(address) - - # Next nonce to use - next_nonce = nonce + 1 - while next_nonce in pending_nonces: - next_nonce += 1 - - return jsonify({ - "address": address, - "confirmed_nonce": nonce, - "next_nonce": next_nonce, - "pending_nonces": sorted(pending_nonces) - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - @app.route('/wallet/
/history', methods=['GET']) - def get_wallet_history(address: str): - """Get transaction history for wallet""" - try: - limit = request.args.get('limit', 50, type=int) - offset = request.args.get('offset', 0, type=int) - - with sqlite3.connect(tx_pool.db_path) as conn: - conn.row_factory = sqlite3.Row - cursor = conn.cursor() - - cursor.execute( - """SELECT * FROM transaction_history - WHERE from_addr = ? OR to_addr = ? - ORDER BY confirmed_at DESC - LIMIT ? OFFSET ?""", - (address, address, limit, offset) - ) - - transactions = [dict(row) for row in cursor.fetchall()] - - return jsonify({ - "address": address, - "count": len(transactions), - "transactions": transactions - }) - except Exception as e: - return jsonify({"error": str(e)}), 500 - - -# ============================================================================= -# TESTING -# ============================================================================= - -if __name__ == "__main__": - import tempfile - import os - - print("=" * 70) - print("RustChain Transaction Handler - Test Suite") - print("=" * 70) - - # Create temporary database - with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as f: - db_path = f.name - - try: - # Initialize pool - pool = TransactionPool(db_path) - - # Create test wallet - print("\n=== Creating Test Wallets ===") - from rustchain_crypto import generate_wallet_keypair - - addr1, pub1, priv1 = generate_wallet_keypair() - addr2, pub2, priv2 = generate_wallet_keypair() - - print(f"Wallet 1: {addr1}") - print(f"Wallet 2: {addr2}") - - # Seed balance for wallet 1 - with sqlite3.connect(db_path) as conn: - conn.execute( - "INSERT INTO balances (wallet, balance_urtc, wallet_nonce) VALUES (?, ?, ?)", - (addr1, 1000_000_000, 0) # 10 RTC - ) - - print(f"\nSeeded Wallet 1 with 10 RTC") - - # Check balance - print(f"\n=== Balance Check ===") - balance = pool.get_balance(addr1) - nonce = pool.get_wallet_nonce(addr1) - print(f"Wallet 1 balance: {balance / 100_000_000} RTC, nonce: {nonce}") - - # Create and sign transaction - print("\n=== Creating Transaction ===") - signer = Ed25519Signer(bytes.fromhex(priv1)) - - tx = SignedTransaction( - from_addr=addr1, - to_addr=addr2, - amount_urtc=100_000_000, # 1 RTC - nonce=1, - timestamp=int(time.time() * 1000), - memo="Test transfer" - ) - tx.sign(signer) - - print(f"TX Hash: {tx.tx_hash}") - print(f"Signature: {tx.signature[:32]}...") - - # Submit transaction - print("\n=== Submitting Transaction ===") - success, result = pool.submit_transaction(tx) - print(f"Success: {success}") - print(f"Result: {result}") - - # Check pending - print("\n=== Pending Transactions ===") - pending = pool.get_pending_transactions() - print(f"Count: {len(pending)}") - for p in pending: - print(f" {p.tx_hash[:16]}... {p.amount_urtc} uRTC") - - # Check available balance - print("\n=== Available Balance ===") - available = pool.get_available_balance(addr1) - print(f"Available: {available / 100_000_000} RTC") - - # Try duplicate (should fail) - print("\n=== Duplicate Test ===") - success, result = pool.submit_transaction(tx) - print(f"Duplicate result: {success}, {result}") - - # Try invalid nonce - print("\n=== Invalid Nonce Test ===") - tx2 = SignedTransaction( - from_addr=addr1, - to_addr=addr2, - amount_urtc=50_000_000, - nonce=5, # Wrong nonce - timestamp=int(time.time() * 1000) - ) - tx2.sign(signer) - success, result = pool.validate_transaction(tx2) - print(f"Invalid nonce result: {success}, {result}") - - # Confirm transaction - print("\n=== Confirming Transaction ===") - pool.confirm_transaction(tx.tx_hash, 100, "blockhash123") - - # Check balances after confirmation - print("\n=== Post-Confirmation Balances ===") - bal1 = pool.get_balance(addr1) - bal2 = pool.get_balance(addr2) - nonce1 = pool.get_wallet_nonce(addr1) - - print(f"Wallet 1: {bal1 / 100_000_000} RTC, nonce: {nonce1}") - print(f"Wallet 2: {bal2 / 100_000_000} RTC") - - print("\n" + "=" * 70) - print("All tests passed!") - print("=" * 70) - - finally: - # Cleanup - os.unlink(db_path) +#!/usr/bin/env python3 +""" +RustChain Transaction Handler - Mainnet Security +================================================= + +Phase 1 Implementation: +- Signed transaction validation +- Replay protection via nonces +- Balance checking with proper locking +- Transaction pool management + +All transactions MUST be signed with Ed25519. +""" + +import sqlite3 +import time +import threading +import logging +from typing import Dict, Optional, Tuple, List +from dataclasses import dataclass +from contextlib import contextmanager + +from rustchain_crypto import ( + SignedTransaction, + Ed25519Signer, + blake2b256_hex, + address_from_public_key +) + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [TX] %(levelname)s: %(message)s' +) +logger = logging.getLogger(__name__) + + +# ============================================================================= +# DATABASE SCHEMA UPGRADES +# ============================================================================= + +SCHEMA_UPGRADE_SQL = """ +-- Upgrade balances table to include nonce +ALTER TABLE balances ADD COLUMN wallet_nonce INTEGER DEFAULT 0; + +-- Create pending transactions table +CREATE TABLE IF NOT EXISTS pending_transactions ( + tx_hash TEXT PRIMARY KEY, + from_addr TEXT NOT NULL, + to_addr TEXT NOT NULL, + amount_urtc INTEGER NOT NULL, + nonce INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + memo TEXT DEFAULT '', + signature TEXT NOT NULL, + public_key TEXT NOT NULL, + created_at INTEGER NOT NULL, + status TEXT DEFAULT 'pending' +); + +-- Create transaction history table +CREATE TABLE IF NOT EXISTS transaction_history ( + tx_hash TEXT PRIMARY KEY, + from_addr TEXT NOT NULL, + to_addr TEXT NOT NULL, + amount_urtc INTEGER NOT NULL, + nonce INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + memo TEXT DEFAULT '', + signature TEXT NOT NULL, + public_key TEXT NOT NULL, + block_height INTEGER, + block_hash TEXT, + confirmed_at INTEGER, + status TEXT DEFAULT 'confirmed' +); + +-- Create wallet public key registry +CREATE TABLE IF NOT EXISTS wallet_pubkeys ( + address TEXT PRIMARY KEY, + public_key TEXT NOT NULL, + registered_at INTEGER NOT NULL +); + +-- Index for faster queries +CREATE INDEX IF NOT EXISTS idx_pending_from ON pending_transactions(from_addr); +CREATE INDEX IF NOT EXISTS idx_pending_nonce ON pending_transactions(from_addr, nonce); +CREATE INDEX IF NOT EXISTS idx_history_from ON transaction_history(from_addr); +CREATE INDEX IF NOT EXISTS idx_history_to ON transaction_history(to_addr); +CREATE INDEX IF NOT EXISTS idx_history_block ON transaction_history(block_height); +""" + + +# ============================================================================= +# TRANSACTION POOL +# ============================================================================= + +class TransactionPool: + """ + Manages pending transactions with proper validation. + """ + + def __init__(self, db_path: str): + self.db_path = db_path + self._lock = threading.Lock() + self._ensure_schema() + + def _ensure_schema(self): + """Ensure database schema is up to date""" + with sqlite3.connect(self.db_path) as conn: + cursor = conn.cursor() + + # Check if wallet_nonce column exists + cursor.execute("PRAGMA table_info(balances)") + columns = [col[1] for col in cursor.fetchall()] + + if "wallet_nonce" not in columns: + try: + cursor.execute("ALTER TABLE balances ADD COLUMN wallet_nonce INTEGER DEFAULT 0") + logger.info("Added wallet_nonce column to balances table") + except sqlite3.OperationalError: + pass # Column might already exist + + # Create other tables + for statement in SCHEMA_UPGRADE_SQL.split(';'): + statement = statement.strip() + if statement and not statement.startswith('ALTER'): + try: + cursor.execute(statement) + except sqlite3.OperationalError as e: + if "already exists" not in str(e): + logger.warning(f"Schema statement failed: {e}") + + conn.commit() + + @contextmanager + def _get_connection(self): + """Get database connection with proper locking""" + with self._lock: + conn = sqlite3.connect(self.db_path) + conn.row_factory = sqlite3.Row + try: + yield conn + conn.commit() + except Exception: + conn.rollback() + raise + finally: + conn.close() + + def get_wallet_nonce(self, address: str) -> int: + """Get current nonce for a wallet""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + "SELECT wallet_nonce FROM balances WHERE wallet = ?", + (address,) + ) + result = cursor.fetchone() + return result["wallet_nonce"] if result else 0 + + def get_balance(self, address: str) -> int: + """Get current balance for a wallet (in uRTC)""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + "SELECT balance_urtc FROM balances WHERE wallet = ?", + (address,) + ) + result = cursor.fetchone() + return result["balance_urtc"] if result else 0 + + def get_pending_amount(self, address: str) -> int: + """Get total pending outgoing amount for address""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + """SELECT COALESCE(SUM(amount_urtc), 0) as pending + FROM pending_transactions + WHERE from_addr = ? AND status = 'pending'""", + (address,) + ) + result = cursor.fetchone() + return result["pending"] if result else 0 + + def get_available_balance(self, address: str) -> int: + """Get available balance (total - pending)""" + balance = self.get_balance(address) + pending = self.get_pending_amount(address) + return max(0, balance - pending) + + def register_public_key(self, address: str, public_key: str) -> bool: + """Register a wallet's public key""" + with self._get_connection() as conn: + cursor = conn.cursor() + + # Verify address derives from public key + derived_addr = address_from_public_key(bytes.fromhex(public_key)) + if derived_addr != address: + logger.warning(f"Address mismatch: {address} != {derived_addr}") + return False + + try: + cursor.execute( + """INSERT OR REPLACE INTO wallet_pubkeys + (address, public_key, registered_at) + VALUES (?, ?, ?)""", + (address, public_key, int(time.time())) + ) + return True + except Exception as e: + logger.error(f"Failed to register public key: {e}") + return False + + def get_public_key(self, address: str) -> Optional[str]: + """Get registered public key for address""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + "SELECT public_key FROM wallet_pubkeys WHERE address = ?", + (address,) + ) + result = cursor.fetchone() + return result["public_key"] if result else None + + def validate_transaction(self, tx: SignedTransaction) -> Tuple[bool, str]: + """ + Validate a signed transaction. + + Checks: + 1. Signature validity + 2. Public key matches from_addr + 3. Nonce is correct (replay protection) + 4. Sufficient balance + 5. No duplicate in pool + """ + # 1. Verify signature + if not tx.verify(): + return False, "Invalid signature" + + # 2. Verify public key matches address + derived_addr = address_from_public_key(bytes.fromhex(tx.public_key)) + if derived_addr != tx.from_addr: + return False, f"Public key does not match from_addr" + + # 3. Check nonce + expected_nonce = self.get_wallet_nonce(tx.from_addr) + 1 + pending_nonces = self._get_pending_nonces(tx.from_addr) + + # Account for pending transactions + while expected_nonce in pending_nonces: + expected_nonce += 1 + + if tx.nonce != expected_nonce: + return False, f"Invalid nonce: expected {expected_nonce}, got {tx.nonce}" + + # 4. Validate amount and check balance + if tx.amount_urtc <= 0: + return False, "Invalid amount: must be > 0" + + available = self.get_available_balance(tx.from_addr) + if tx.amount_urtc > available: + return False, f"Insufficient balance: have {available}, need {tx.amount_urtc}" + + # 5. Check for duplicate + if self._tx_exists(tx.tx_hash): + return False, "Transaction already exists" + + return True, "" + + def _get_pending_nonces(self, address: str) -> set: + """Get set of pending nonces for address""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + "SELECT nonce FROM pending_transactions WHERE from_addr = ? AND status = 'pending'", + (address,) + ) + return {row["nonce"] for row in cursor.fetchall()} + + def _tx_exists(self, tx_hash: str) -> bool: + """Check if transaction already exists""" + with self._get_connection() as conn: + cursor = conn.cursor() + + # Check pending + cursor.execute( + "SELECT 1 FROM pending_transactions WHERE tx_hash = ?", + (tx_hash,) + ) + if cursor.fetchone(): + return True + + # Check history + cursor.execute( + "SELECT 1 FROM transaction_history WHERE tx_hash = ?", + (tx_hash,) + ) + return cursor.fetchone() is not None + + def submit_transaction(self, tx: SignedTransaction) -> Tuple[bool, str]: + """ + Submit a signed transaction to the pool. + + Returns (success, error_or_tx_hash) + """ + # Validate + is_valid, error = self.validate_transaction(tx) + if not is_valid: + return False, error + + # Register public key if not already registered + self.register_public_key(tx.from_addr, tx.public_key) + + # Add to pending pool + with self._get_connection() as conn: + cursor = conn.cursor() + + try: + cursor.execute( + """INSERT INTO pending_transactions + (tx_hash, from_addr, to_addr, amount_urtc, nonce, + timestamp, memo, signature, public_key, created_at, status) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'pending')""", + ( + tx.tx_hash, + tx.from_addr, + tx.to_addr, + tx.amount_urtc, + tx.nonce, + tx.timestamp, + tx.memo, + tx.signature, + tx.public_key, + int(time.time()) + ) + ) + + logger.info(f"TX accepted: {tx.tx_hash[:16]}... " + f"{tx.from_addr[:16]}... -> {tx.to_addr[:16]}... " + f"amount={tx.amount_urtc}") + + return True, tx.tx_hash + + except sqlite3.IntegrityError as e: + return False, f"Transaction already exists: {e}" + + def get_pending_transactions(self, limit: int = 100) -> List[SignedTransaction]: + """Get pending transactions ordered by nonce""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + """SELECT * FROM pending_transactions + WHERE status = 'pending' + ORDER BY nonce ASC + LIMIT ?""", + (limit,) + ) + + return [ + SignedTransaction( + from_addr=row["from_addr"], + to_addr=row["to_addr"], + amount_urtc=row["amount_urtc"], + nonce=row["nonce"], + timestamp=row["timestamp"], + memo=row["memo"], + signature=row["signature"], + public_key=row["public_key"], + tx_hash=row["tx_hash"] + ) + for row in cursor.fetchall() + ] + + def confirm_transaction( + self, + tx_hash: str, + block_height: int, + block_hash: str + ) -> bool: + """ + Confirm a transaction (move from pending to history). + Also updates balances and nonces. + """ + with self._get_connection() as conn: + cursor = conn.cursor() + + # Get pending transaction + cursor.execute( + "SELECT * FROM pending_transactions WHERE tx_hash = ?", + (tx_hash,) + ) + row = cursor.fetchone() + + if not row: + logger.warning(f"Transaction not found in pending: {tx_hash}") + return False + + try: + # Move to history + cursor.execute( + """INSERT INTO transaction_history + (tx_hash, from_addr, to_addr, amount_urtc, nonce, + timestamp, memo, signature, public_key, + block_height, block_hash, confirmed_at, status) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'confirmed')""", + ( + row["tx_hash"], + row["from_addr"], + row["to_addr"], + row["amount_urtc"], + row["nonce"], + row["timestamp"], + row["memo"], + row["signature"], + row["public_key"], + block_height, + block_hash, + int(time.time()) + ) + ) + + # Update sender balance and nonce + cursor.execute( + """UPDATE balances + SET balance_urtc = balance_urtc - ?, + wallet_nonce = ? + WHERE wallet = ?""", + (row["amount_urtc"], row["nonce"], row["from_addr"]) + ) + + # Update receiver balance (create if not exists) + cursor.execute( + """INSERT INTO balances (wallet, balance_urtc, wallet_nonce) + VALUES (?, ?, 0) + ON CONFLICT(wallet) DO UPDATE SET + balance_urtc = balance_urtc + ?""", + (row["to_addr"], row["amount_urtc"], row["amount_urtc"]) + ) + + # Remove from pending + cursor.execute( + "DELETE FROM pending_transactions WHERE tx_hash = ?", + (tx_hash,) + ) + + logger.info(f"TX confirmed: {tx_hash[:16]}... in block {block_height}") + return True + + except Exception as e: + logger.error(f"Failed to confirm transaction: {e}") + return False + + def reject_transaction(self, tx_hash: str, reason: str = "") -> bool: + """Reject a pending transaction""" + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + """UPDATE pending_transactions + SET status = 'rejected' + WHERE tx_hash = ?""", + (tx_hash,) + ) + + if cursor.rowcount > 0: + logger.info(f"TX rejected: {tx_hash[:16]}... reason: {reason}") + return True + return False + + def cleanup_expired(self, max_age_seconds: int = 3600) -> int: + """Remove transactions older than max_age""" + cutoff = int(time.time()) - max_age_seconds + + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + """DELETE FROM pending_transactions + WHERE status = 'pending' AND created_at < ?""", + (cutoff,) + ) + count = cursor.rowcount + + if count > 0: + logger.info(f"Cleaned up {count} expired pending transactions") + + return count + + def get_transaction_status(self, tx_hash: str) -> Dict: + """Get transaction status""" + with self._get_connection() as conn: + cursor = conn.cursor() + + # Check pending + cursor.execute( + "SELECT *, 'pending' as location FROM pending_transactions WHERE tx_hash = ?", + (tx_hash,) + ) + row = cursor.fetchone() + if row: + return dict(row) + + # Check history + cursor.execute( + "SELECT *, 'history' as location FROM transaction_history WHERE tx_hash = ?", + (tx_hash,) + ) + row = cursor.fetchone() + if row: + return dict(row) + + return {"status": "not_found"} + + +# ============================================================================= +# TRANSACTION API ENDPOINTS +# ============================================================================= + +def create_tx_api_routes(app, tx_pool: TransactionPool): + """ + Create Flask routes for transaction API. + + Endpoints: + - POST /tx/submit - Submit signed transaction + - GET /tx/status/ - Get transaction status + - GET /tx/pending - List pending transactions + - GET /wallet//balance - Get wallet balance + - GET /wallet//nonce - Get wallet nonce + - GET /wallet//history - Get transaction history + """ + from flask import request, jsonify + + @app.route('/tx/submit', methods=['POST']) + def submit_transaction(): + """Submit a signed transaction""" + try: + data = request.get_json() + + if not data: + return jsonify({"error": "No JSON data provided"}), 400 + + # Create transaction object + tx = SignedTransaction.from_dict(data) + + # Compute hash if not provided + if not tx.tx_hash: + tx.tx_hash = tx.compute_hash() + + # Submit to pool + success, result = tx_pool.submit_transaction(tx) + + if success: + return jsonify({ + "success": True, + "tx_hash": result, + "status": "pending" + }) + else: + return jsonify({ + "success": False, + "error": result + }), 400 + + except Exception as e: + logger.error(f"Error submitting transaction: {e}") + return jsonify({"error": str(e)}), 500 + + @app.route('/tx/status/', methods=['GET']) + def get_tx_status(tx_hash: str): + """Get transaction status""" + try: + status = tx_pool.get_transaction_status(tx_hash) + return jsonify(status) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + @app.route('/tx/pending', methods=['GET']) + def list_pending(): + """List pending transactions""" + try: + limit = request.args.get('limit', 100, type=int) + pending = tx_pool.get_pending_transactions(limit) + return jsonify({ + "count": len(pending), + "transactions": [tx.to_dict() for tx in pending] + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + @app.route('/wallet/
/balance', methods=['GET']) + def get_wallet_balance(address: str): + """Get wallet balance""" + try: + balance = tx_pool.get_balance(address) + available = tx_pool.get_available_balance(address) + pending = tx_pool.get_pending_amount(address) + + return jsonify({ + "address": address, + "balance_urtc": balance, + "available_urtc": available, + "pending_urtc": pending, + "balance_rtc": balance / 100_000_000, + "available_rtc": available / 100_000_000 + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + @app.route('/wallet/
/nonce', methods=['GET']) + def get_wallet_nonce(address: str): + """Get wallet nonce (for transaction construction)""" + try: + nonce = tx_pool.get_wallet_nonce(address) + pending_nonces = tx_pool._get_pending_nonces(address) + + # Next nonce to use + next_nonce = nonce + 1 + while next_nonce in pending_nonces: + next_nonce += 1 + + return jsonify({ + "address": address, + "confirmed_nonce": nonce, + "next_nonce": next_nonce, + "pending_nonces": sorted(pending_nonces) + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + @app.route('/wallet/
/history', methods=['GET']) + def get_wallet_history(address: str): + """Get transaction history for wallet""" + try: + limit = request.args.get('limit', 50, type=int) + offset = request.args.get('offset', 0, type=int) + + with sqlite3.connect(tx_pool.db_path) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.cursor() + + cursor.execute( + """SELECT * FROM transaction_history + WHERE from_addr = ? OR to_addr = ? + ORDER BY confirmed_at DESC + LIMIT ? OFFSET ?""", + (address, address, limit, offset) + ) + + transactions = [dict(row) for row in cursor.fetchall()] + + return jsonify({ + "address": address, + "count": len(transactions), + "transactions": transactions + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + + +# ============================================================================= +# TESTING +# ============================================================================= + +if __name__ == "__main__": + import tempfile + import os + + print("=" * 70) + print("RustChain Transaction Handler - Test Suite") + print("=" * 70) + + # Create temporary database + with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as f: + db_path = f.name + + try: + # Initialize pool + pool = TransactionPool(db_path) + + # Create test wallet + print("\n=== Creating Test Wallets ===") + from rustchain_crypto import generate_wallet_keypair + + addr1, pub1, priv1 = generate_wallet_keypair() + addr2, pub2, priv2 = generate_wallet_keypair() + + print(f"Wallet 1: {addr1}") + print(f"Wallet 2: {addr2}") + + # Seed balance for wallet 1 + with sqlite3.connect(db_path) as conn: + conn.execute( + "INSERT INTO balances (wallet, balance_urtc, wallet_nonce) VALUES (?, ?, ?)", + (addr1, 1000_000_000, 0) # 10 RTC + ) + + print(f"\nSeeded Wallet 1 with 10 RTC") + + # Check balance + print(f"\n=== Balance Check ===") + balance = pool.get_balance(addr1) + nonce = pool.get_wallet_nonce(addr1) + print(f"Wallet 1 balance: {balance / 100_000_000} RTC, nonce: {nonce}") + + # Create and sign transaction + print("\n=== Creating Transaction ===") + signer = Ed25519Signer(bytes.fromhex(priv1)) + + tx = SignedTransaction( + from_addr=addr1, + to_addr=addr2, + amount_urtc=100_000_000, # 1 RTC + nonce=1, + timestamp=int(time.time() * 1000), + memo="Test transfer" + ) + tx.sign(signer) + + print(f"TX Hash: {tx.tx_hash}") + print(f"Signature: {tx.signature[:32]}...") + + # Submit transaction + print("\n=== Submitting Transaction ===") + success, result = pool.submit_transaction(tx) + print(f"Success: {success}") + print(f"Result: {result}") + + # Check pending + print("\n=== Pending Transactions ===") + pending = pool.get_pending_transactions() + print(f"Count: {len(pending)}") + for p in pending: + print(f" {p.tx_hash[:16]}... {p.amount_urtc} uRTC") + + # Check available balance + print("\n=== Available Balance ===") + available = pool.get_available_balance(addr1) + print(f"Available: {available / 100_000_000} RTC") + + # Try duplicate (should fail) + print("\n=== Duplicate Test ===") + success, result = pool.submit_transaction(tx) + print(f"Duplicate result: {success}, {result}") + + # Try invalid nonce + print("\n=== Invalid Nonce Test ===") + tx2 = SignedTransaction( + from_addr=addr1, + to_addr=addr2, + amount_urtc=50_000_000, + nonce=5, # Wrong nonce + timestamp=int(time.time() * 1000) + ) + tx2.sign(signer) + success, result = pool.validate_transaction(tx2) + print(f"Invalid nonce result: {success}, {result}") + + # Confirm transaction + print("\n=== Confirming Transaction ===") + pool.confirm_transaction(tx.tx_hash, 100, "blockhash123") + + # Check balances after confirmation + print("\n=== Post-Confirmation Balances ===") + bal1 = pool.get_balance(addr1) + bal2 = pool.get_balance(addr2) + nonce1 = pool.get_wallet_nonce(addr1) + + print(f"Wallet 1: {bal1 / 100_000_000} RTC, nonce: {nonce1}") + print(f"Wallet 2: {bal2 / 100_000_000} RTC") + + print("\n" + "=" * 70) + print("All tests passed!") + print("=" * 70) + + finally: + # Cleanup + os.unlink(db_path) diff --git a/node/rustchain_v2_integrated_v2.2.1_rip200.py b/node/rustchain_v2_integrated_v2.2.1_rip200.py index e9551164..3d40d281 100644 --- a/node/rustchain_v2_integrated_v2.2.1_rip200.py +++ b/node/rustchain_v2_integrated_v2.2.1_rip200.py @@ -6341,3 +6341,100 @@ def check_hardware_wallet_consistency(hardware_id, miner_wallet, conn): return False, f'hardware_bound_to_different_wallet:{bound_wallet[:20]}' return True, 'ok' + + + +# === WALLET HISTORY ENDPOINT (Bounty #908) === +@app.route('/wallet/history', methods=['GET']) +def get_wallet_history(): + ''' + Get transaction history for a given wallet/miner ID. + + Query params: + - miner_id: Wallet/miner ID to query + - limit: Number of transactions to return (default: 50) + - offset: Offset for pagination (default: 0) + + Returns: + JSON with transaction history including rewards and transfers + ''' + miner_id = request.args.get('miner_id') + limit = int(request.args.get('limit', 50)) + offset = int(request.args.get('offset', 0)) + + if not miner_id: + return jsonify({'ok': False, 'error': 'miner_id is required'}), 400 + + try: + transactions = [] + + # Query epoch_rewards for mining rewards + c = conn.cursor() + c.execute(''' + SELECT 'reward' as type, amount, epoch, timestamp, tx_hash + FROM epoch_rewards + WHERE miner_id = ? + ORDER BY epoch DESC + LIMIT ? OFFSET ? + ''', (miner_id, limit, offset)) + + for row in c.fetchall(): + transactions.append({ + 'type': 'reward', + 'amount': row[0], + 'epoch': row[1], + 'timestamp': row[2], + 'tx_hash': row[3] + }) + + # Query ledger for transfers + c.execute(''' + SELECT 'transfer_in' as type, from_wallet, amount, timestamp, tx_hash + FROM ledger + WHERE to_wallet = ? + ORDER BY timestamp DESC + LIMIT ? OFFSET ? + ''', (miner_id, limit, offset)) + + for row in c.fetchall(): + transactions.append({ + 'type': 'transfer_in', + 'from': row[1], + 'amount': row[2], + 'timestamp': row[3], + 'tx_hash': row[4] + }) + + c.execute(''' + SELECT 'transfer_out' as type, to_wallet, amount, timestamp, tx_hash + FROM ledger + WHERE from_wallet = ? + ORDER BY timestamp DESC + LIMIT ? OFFSET ? + ''', (miner_id, limit, offset)) + + for row in c.fetchall(): + transactions.append({ + 'type': 'transfer_out', + 'to': row[1], + 'amount': row[2], + 'timestamp': row[3], + 'tx_hash': row[4] + }) + + # Sort by timestamp descending + transactions.sort(key=lambda x: x.get('timestamp', ''), reverse=True) + + # Get total count + c.execute('SELECT COUNT(*) FROM epoch_rewards WHERE miner_id = ?', (miner_id,)) + total = c.fetchone()[0] + + return jsonify({ + 'ok': True, + 'miner_id': miner_id, + 'transactions': transactions[:limit], + 'total': total + }) + + except Exception as e: + return jsonify({'ok': False, 'error': str(e)}), 500 diff --git a/node/rustchain_x402.py b/node/rustchain_x402.py index ef31c3ee..3c415d10 100644 --- a/node/rustchain_x402.py +++ b/node/rustchain_x402.py @@ -1,114 +1,114 @@ -""" -RustChain x402 Integration — Swap Info + Coinbase Wallet Linking -Adds /wallet/swap-info and /wallet/link-coinbase endpoints. - -Usage in rustchain server: - import rustchain_x402 - rustchain_x402.init_app(app, DB_PATH) -""" - -import logging -import os -import sqlite3 -import time - -from flask import jsonify, request - -log = logging.getLogger("rustchain.x402") - -# Import shared config -try: - import sys - sys.path.insert(0, "/root/shared") - from x402_config import SWAP_INFO, WRTC_BASE, USDC_BASE, AERODROME_POOL - X402_CONFIG_OK = True -except ImportError: - log.warning("x402_config not found — using inline swap info") - X402_CONFIG_OK = False - SWAP_INFO = { - "wrtc_contract": "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", - "usdc_contract": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913", - "aerodrome_pool": "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F", - "swap_url": "https://aerodrome.finance/swap?from=0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913&to=0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", - "network": "Base (eip155:8453)", - "reference_price_usd": 0.10, - } - - -COINBASE_MIGRATION = "ALTER TABLE balances ADD COLUMN coinbase_address TEXT DEFAULT NULL" - - -def _run_migration(db_path): - """Add coinbase_address column to balances if missing.""" - conn = sqlite3.connect(db_path) - cursor = conn.execute("PRAGMA table_info(balances)") - existing = {row[1] for row in cursor.fetchall()} - if "coinbase_address" not in existing: - try: - conn.execute(COINBASE_MIGRATION) - conn.commit() - log.info("Added coinbase_address column to balances") - except sqlite3.OperationalError: - pass - conn.close() - - -def init_app(app, db_path): - """Register x402 routes on the RustChain Flask app.""" - - try: - _run_migration(db_path) - except Exception as e: - log.error(f"RustChain x402 migration failed: {e}") - - @app.route("/wallet/swap-info", methods=["GET"]) - def wallet_swap_info(): - """Returns Aerodrome pool info for USDC→wRTC swap guidance.""" - return jsonify(SWAP_INFO) - - @app.route("/wallet/link-coinbase", methods=["PATCH", "POST"]) - def wallet_link_coinbase(): - """Link a Coinbase Base address to a miner_id. Requires admin key.""" - admin_key = request.headers.get("X-Admin-Key", "") or request.headers.get("X-API-Key", "") - expected = os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64") - if admin_key != expected: - return jsonify({"error": "Unauthorized — admin key required"}), 401 - - data = request.get_json(silent=True) or {} - miner_id = data.get("miner_id", "").strip() - coinbase_address = data.get("coinbase_address", "").strip() - - if not miner_id: - return jsonify({"error": "miner_id is required"}), 400 - if not coinbase_address or not coinbase_address.startswith("0x") or len(coinbase_address) != 42: - return jsonify({"error": "Invalid Base address (must be 0x + 40 hex chars)"}), 400 - - conn = sqlite3.connect(db_path) - row = conn.execute( - "SELECT miner_id FROM balances WHERE miner_id = ?", (miner_id,) - ).fetchone() - if not row: - # Try miner_pk - row = conn.execute( - "SELECT miner_id FROM balances WHERE miner_pk = ?", (miner_id,) - ).fetchone() - if not row: - conn.close() - return jsonify({"error": f"Miner '{miner_id}' not found in balances"}), 404 - - actual_id = row[0] - conn.execute( - "UPDATE balances SET coinbase_address = ? WHERE miner_id = ?", - (coinbase_address, actual_id), - ) - conn.commit() - conn.close() - - return jsonify({ - "ok": True, - "miner_id": actual_id, - "coinbase_address": coinbase_address, - "network": "Base (eip155:8453)", - }) - - log.info("RustChain x402 module initialized") +""" +RustChain x402 Integration — Swap Info + Coinbase Wallet Linking +Adds /wallet/swap-info and /wallet/link-coinbase endpoints. + +Usage in rustchain server: + import rustchain_x402 + rustchain_x402.init_app(app, DB_PATH) +""" + +import logging +import os +import sqlite3 +import time + +from flask import jsonify, request + +log = logging.getLogger("rustchain.x402") + +# Import shared config +try: + import sys + sys.path.insert(0, "/root/shared") + from x402_config import SWAP_INFO, WRTC_BASE, USDC_BASE, AERODROME_POOL + X402_CONFIG_OK = True +except ImportError: + log.warning("x402_config not found — using inline swap info") + X402_CONFIG_OK = False + SWAP_INFO = { + "wrtc_contract": "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", + "usdc_contract": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913", + "aerodrome_pool": "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F", + "swap_url": "https://aerodrome.finance/swap?from=0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913&to=0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", + "network": "Base (eip155:8453)", + "reference_price_usd": 0.10, + } + + +COINBASE_MIGRATION = "ALTER TABLE balances ADD COLUMN coinbase_address TEXT DEFAULT NULL" + + +def _run_migration(db_path): + """Add coinbase_address column to balances if missing.""" + conn = sqlite3.connect(db_path) + cursor = conn.execute("PRAGMA table_info(balances)") + existing = {row[1] for row in cursor.fetchall()} + if "coinbase_address" not in existing: + try: + conn.execute(COINBASE_MIGRATION) + conn.commit() + log.info("Added coinbase_address column to balances") + except sqlite3.OperationalError: + pass + conn.close() + + +def init_app(app, db_path): + """Register x402 routes on the RustChain Flask app.""" + + try: + _run_migration(db_path) + except Exception as e: + log.error(f"RustChain x402 migration failed: {e}") + + @app.route("/wallet/swap-info", methods=["GET"]) + def wallet_swap_info(): + """Returns Aerodrome pool info for USDC→wRTC swap guidance.""" + return jsonify(SWAP_INFO) + + @app.route("/wallet/link-coinbase", methods=["PATCH", "POST"]) + def wallet_link_coinbase(): + """Link a Coinbase Base address to a miner_id. Requires admin key.""" + admin_key = request.headers.get("X-Admin-Key", "") or request.headers.get("X-API-Key", "") + expected = os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64") + if admin_key != expected: + return jsonify({"error": "Unauthorized — admin key required"}), 401 + + data = request.get_json(silent=True) or {} + miner_id = data.get("miner_id", "").strip() + coinbase_address = data.get("coinbase_address", "").strip() + + if not miner_id: + return jsonify({"error": "miner_id is required"}), 400 + if not coinbase_address or not coinbase_address.startswith("0x") or len(coinbase_address) != 42: + return jsonify({"error": "Invalid Base address (must be 0x + 40 hex chars)"}), 400 + + conn = sqlite3.connect(db_path) + row = conn.execute( + "SELECT miner_id FROM balances WHERE miner_id = ?", (miner_id,) + ).fetchone() + if not row: + # Try miner_pk + row = conn.execute( + "SELECT miner_id FROM balances WHERE miner_pk = ?", (miner_id,) + ).fetchone() + if not row: + conn.close() + return jsonify({"error": f"Miner '{miner_id}' not found in balances"}), 404 + + actual_id = row[0] + conn.execute( + "UPDATE balances SET coinbase_address = ? WHERE miner_id = ?", + (coinbase_address, actual_id), + ) + conn.commit() + conn.close() + + return jsonify({ + "ok": True, + "miner_id": actual_id, + "coinbase_address": coinbase_address, + "network": "Base (eip155:8453)", + }) + + log.info("RustChain x402 module initialized") diff --git a/node/warthog_verification.py b/node/warthog_verification.py index f467902e..dca3c0b2 100644 --- a/node/warthog_verification.py +++ b/node/warthog_verification.py @@ -1,306 +1,306 @@ -#!/usr/bin/env python3 -""" -Warthog Dual-Mining Verification (Server-Side) -=============================================== - -Validates Warthog proof payloads submitted by dual-miners. -Determines bonus tier and records proofs for epoch reward calculation. - -Target audience: Modern/semi-modern machines WITH GPUs. -Vintage hardware (G4, G5, retro) already earns high antiquity multipliers -and can't run the modern GPUs required for Warthog's Janushash PoW. -This bonus gives GPU-equipped modern miners a slight edge — bumping -their base ~0.8-1.0x weight up toward ~1.1-1.15x. - -Bonus tiers: - 1.0x No Warthog (default — all existing miners unchanged) - 1.1x Pool mining confirmed (contributing GPU hashrate) - 1.15x Own Warthog node confirmed (running full node + balance) - -Replay prevention: one proof per miner per epoch. -""" - -import time -import sqlite3 -from typing import Tuple - -# Warthog bonus tier constants — intentionally modest. -# Modern machines sit at 0.8-1.0x base; this nudges them up slightly, -# NOT enough to overtake vintage antiquity bonuses (G4=2.5x, G5=2.0x). -WART_BONUS_NONE = 1.0 -WART_BONUS_POOL = 1.1 -WART_BONUS_NODE = 1.15 - -# Minimum node height to be considered plausible (Warthog mainnet launched 2023) -MIN_PLAUSIBLE_HEIGHT = 1000 - -# Maximum age of a proof timestamp (seconds) - reject stale proofs -MAX_PROOF_AGE = 900 # 15 minutes - - -def init_warthog_tables(conn): - """ - Create Warthog dual-mining tables if they don't exist. - - Args: - conn: sqlite3 connection (or cursor) - """ - conn.execute(""" - CREATE TABLE IF NOT EXISTS warthog_mining_proofs ( - miner TEXT NOT NULL, - epoch INTEGER NOT NULL, - proof_type TEXT NOT NULL, - wart_address TEXT, - wart_node_height INTEGER, - wart_balance TEXT, - pool_url TEXT, - pool_hashrate REAL, - bonus_tier REAL DEFAULT 1.0, - verified INTEGER DEFAULT 0, - verified_reason TEXT, - submitted_at INTEGER NOT NULL, - PRIMARY KEY (miner, epoch) - ) - """) - - # Safely add warthog_bonus column to miner_attest_recent - try: - conn.execute( - "ALTER TABLE miner_attest_recent ADD COLUMN warthog_bonus REAL DEFAULT 1.0" - ) - except Exception: - pass # Column already exists - - -def verify_warthog_proof(proof, miner_id) -> Tuple[bool, float, str]: - """ - Validate a Warthog dual-mining proof submitted with attestation. - - Server-side checks: - - Proof structure is valid - - Proof timestamp is recent (not replayed from old session) - - Node proof: synced==True, height plausible, balance non-zero - - Pool proof: known pool URL, hashrate > 0 - - Args: - proof: dict from attestation payload's "warthog" key - miner_id: RustChain miner identifier - - Returns: - (verified, bonus_tier, reason) - """ - if not proof or not isinstance(proof, dict): - return False, WART_BONUS_NONE, "no_proof_data" - - if not proof.get("enabled"): - return False, WART_BONUS_NONE, "warthog_not_enabled" - - # Check proof freshness - collected_at = proof.get("collected_at", 0) - if collected_at and abs(time.time() - collected_at) > MAX_PROOF_AGE: - return False, WART_BONUS_NONE, "proof_too_old" - - # Validate WART address present - wart_address = proof.get("wart_address", "") - if not wart_address or len(wart_address) < 10: - return False, WART_BONUS_NONE, "invalid_wart_address" - - proof_type = proof.get("proof_type", "none") - - # === Tier 1.5: Own Node Verification === - if proof_type == "own_node": - node = proof.get("node") - if not node or not isinstance(node, dict): - return False, WART_BONUS_NONE, "node_data_missing" - - # Must be synced - if not node.get("synced"): - return False, WART_BONUS_NONE, "node_not_synced" - - # Height must be plausible - height = node.get("height", 0) - if not height or height < MIN_PLAUSIBLE_HEIGHT: - return False, WART_BONUS_NONE, f"implausible_height_{height}" - - # Balance must be non-zero (proves actual mining activity) - balance_str = proof.get("balance", "0") - try: - balance = float(balance_str) - except (ValueError, TypeError): - balance = 0.0 - - if balance <= 0: - # Node running but no balance — downgrade to pool tier - # (they're contributing hashpower but haven't earned yet) - return True, WART_BONUS_POOL, "node_no_balance_downgraded" - - return True, WART_BONUS_NODE, "own_node_verified" - - # === Tier 1.3: Pool Mining Verification === - if proof_type == "pool": - pool = proof.get("pool") - if not pool or not isinstance(pool, dict): - return False, WART_BONUS_NONE, "pool_data_missing" - - hashrate = pool.get("hashrate", 0) - if not hashrate or hashrate <= 0: - return False, WART_BONUS_NONE, "pool_zero_hashrate" - - pool_url = pool.get("url", "") - if not pool_url: - return False, WART_BONUS_NONE, "pool_url_missing" - - return True, WART_BONUS_POOL, "pool_mining_verified" - - # Unknown proof type - return False, WART_BONUS_NONE, f"unknown_proof_type_{proof_type}" - - -def record_warthog_proof(conn, miner_id, epoch, proof, verified, bonus_tier, reason): - """ - Write Warthog proof record to database. - - Args: - conn: sqlite3 connection - miner_id: RustChain miner identifier - epoch: Current epoch number - proof: Raw proof dict - verified: Boolean result - bonus_tier: Float bonus multiplier - reason: Verification reason string - """ - node = proof.get("node") or {} - pool = proof.get("pool") or {} - - try: - conn.execute(""" - INSERT OR REPLACE INTO warthog_mining_proofs - (miner, epoch, proof_type, wart_address, wart_node_height, - wart_balance, pool_url, pool_hashrate, bonus_tier, - verified, verified_reason, submitted_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - miner_id, - epoch, - proof.get("proof_type", "none"), - proof.get("wart_address", ""), - node.get("height"), - proof.get("balance"), - pool.get("url"), - pool.get("hashrate"), - bonus_tier, - 1 if verified else 0, - reason, - int(time.time()), - )) - conn.commit() - except Exception as e: - print(f"[WARTHOG] Error recording proof: {e}") - - -def get_warthog_bonus(conn, miner_id): - """ - Get current Warthog bonus for a miner from latest attestation. - - Args: - conn: sqlite3 connection - miner_id: RustChain miner identifier - - Returns: - Float bonus multiplier (1.0 if no Warthog) - """ - try: - row = conn.execute( - "SELECT warthog_bonus FROM miner_attest_recent WHERE miner = ?", - (miner_id,) - ).fetchone() - if row and row[0] and row[0] > 1.0: - return row[0] - except Exception: - pass # Column may not exist on older schemas - - return WART_BONUS_NONE - - -if __name__ == "__main__": - # Self-test with mock proofs - print("=" * 60) - print("Warthog Verification - Self Test") - print("=" * 60) - - # Test 1: No proof - ok, tier, reason = verify_warthog_proof(None, "test-miner") - print(f"[1] No proof: ok={ok}, tier={tier}, reason={reason}") - assert tier == 1.0 - - # Test 2: Valid own node (modern machine with GPU running Warthog full node) - ok, tier, reason = verify_warthog_proof({ - "enabled": True, - "wart_address": "wart1qtest123456789", - "proof_type": "own_node", - "node": {"height": 500000, "synced": True, "hash": "abc123"}, - "balance": "42.5", - "collected_at": int(time.time()), - }, "test-miner") - print(f"[2] Own node: ok={ok}, tier={tier}, reason={reason}") - assert tier == 1.15 - - # Test 3: Node but no balance (new miner, hasn't earned yet — downgrade to pool tier) - ok, tier, reason = verify_warthog_proof({ - "enabled": True, - "wart_address": "wart1qtest123456789", - "proof_type": "own_node", - "node": {"height": 500000, "synced": True}, - "balance": "0", - "collected_at": int(time.time()), - }, "test-miner") - print(f"[3] No balance: ok={ok}, tier={tier}, reason={reason}") - assert tier == 1.1 # Downgraded to pool - - # Test 4: Pool mining - ok, tier, reason = verify_warthog_proof({ - "enabled": True, - "wart_address": "wart1qtest123456789", - "proof_type": "pool", - "pool": {"url": "https://acc-pool.pw", "hashrate": 150.5, "shares": 42}, - "collected_at": int(time.time()), - }, "test-miner") - print(f"[4] Pool mining: ok={ok}, tier={tier}, reason={reason}") - assert tier == 1.1 - - # Test 5: Stale proof - ok, tier, reason = verify_warthog_proof({ - "enabled": True, - "wart_address": "wart1qtest123456789", - "proof_type": "own_node", - "node": {"height": 500000, "synced": True}, - "balance": "42.5", - "collected_at": int(time.time()) - 3600, # 1 hour old - }, "test-miner") - print(f"[5] Stale proof: ok={ok}, tier={tier}, reason={reason}") - assert tier == 1.0 # Rejected - - # Test 6: DB operations - import tempfile, os - db_path = os.path.join(tempfile.gettempdir(), "wart_test.db") - with sqlite3.connect(db_path) as conn: - conn.execute("""CREATE TABLE IF NOT EXISTS miner_attest_recent ( - miner TEXT PRIMARY KEY, ts_ok INTEGER, device_family TEXT, - device_arch TEXT, entropy_score REAL DEFAULT 0.0, - fingerprint_passed INTEGER DEFAULT 0, source_ip TEXT - )""") - init_warthog_tables(conn) - record_warthog_proof(conn, "test-miner", 100, { - "proof_type": "own_node", "wart_address": "wart1qtest", - "node": {"height": 500000}, "balance": "42.5", - }, True, 1.15, "own_node_verified") - conn.execute( - "INSERT OR REPLACE INTO miner_attest_recent (miner, ts_ok, warthog_bonus) VALUES (?, ?, ?)", - ("test-miner", int(time.time()), 1.15) - ) - bonus = get_warthog_bonus(conn, "test-miner") - print(f"[6] DB bonus: {bonus}") - assert bonus == 1.15 - - os.unlink(db_path) - print("\nAll tests passed!") +#!/usr/bin/env python3 +""" +Warthog Dual-Mining Verification (Server-Side) +=============================================== + +Validates Warthog proof payloads submitted by dual-miners. +Determines bonus tier and records proofs for epoch reward calculation. + +Target audience: Modern/semi-modern machines WITH GPUs. +Vintage hardware (G4, G5, retro) already earns high antiquity multipliers +and can't run the modern GPUs required for Warthog's Janushash PoW. +This bonus gives GPU-equipped modern miners a slight edge — bumping +their base ~0.8-1.0x weight up toward ~1.1-1.15x. + +Bonus tiers: + 1.0x No Warthog (default — all existing miners unchanged) + 1.1x Pool mining confirmed (contributing GPU hashrate) + 1.15x Own Warthog node confirmed (running full node + balance) + +Replay prevention: one proof per miner per epoch. +""" + +import time +import sqlite3 +from typing import Tuple + +# Warthog bonus tier constants — intentionally modest. +# Modern machines sit at 0.8-1.0x base; this nudges them up slightly, +# NOT enough to overtake vintage antiquity bonuses (G4=2.5x, G5=2.0x). +WART_BONUS_NONE = 1.0 +WART_BONUS_POOL = 1.1 +WART_BONUS_NODE = 1.15 + +# Minimum node height to be considered plausible (Warthog mainnet launched 2023) +MIN_PLAUSIBLE_HEIGHT = 1000 + +# Maximum age of a proof timestamp (seconds) - reject stale proofs +MAX_PROOF_AGE = 900 # 15 minutes + + +def init_warthog_tables(conn): + """ + Create Warthog dual-mining tables if they don't exist. + + Args: + conn: sqlite3 connection (or cursor) + """ + conn.execute(""" + CREATE TABLE IF NOT EXISTS warthog_mining_proofs ( + miner TEXT NOT NULL, + epoch INTEGER NOT NULL, + proof_type TEXT NOT NULL, + wart_address TEXT, + wart_node_height INTEGER, + wart_balance TEXT, + pool_url TEXT, + pool_hashrate REAL, + bonus_tier REAL DEFAULT 1.0, + verified INTEGER DEFAULT 0, + verified_reason TEXT, + submitted_at INTEGER NOT NULL, + PRIMARY KEY (miner, epoch) + ) + """) + + # Safely add warthog_bonus column to miner_attest_recent + try: + conn.execute( + "ALTER TABLE miner_attest_recent ADD COLUMN warthog_bonus REAL DEFAULT 1.0" + ) + except Exception: + pass # Column already exists + + +def verify_warthog_proof(proof, miner_id) -> Tuple[bool, float, str]: + """ + Validate a Warthog dual-mining proof submitted with attestation. + + Server-side checks: + - Proof structure is valid + - Proof timestamp is recent (not replayed from old session) + - Node proof: synced==True, height plausible, balance non-zero + - Pool proof: known pool URL, hashrate > 0 + + Args: + proof: dict from attestation payload's "warthog" key + miner_id: RustChain miner identifier + + Returns: + (verified, bonus_tier, reason) + """ + if not proof or not isinstance(proof, dict): + return False, WART_BONUS_NONE, "no_proof_data" + + if not proof.get("enabled"): + return False, WART_BONUS_NONE, "warthog_not_enabled" + + # Check proof freshness + collected_at = proof.get("collected_at", 0) + if collected_at and abs(time.time() - collected_at) > MAX_PROOF_AGE: + return False, WART_BONUS_NONE, "proof_too_old" + + # Validate WART address present + wart_address = proof.get("wart_address", "") + if not wart_address or len(wart_address) < 10: + return False, WART_BONUS_NONE, "invalid_wart_address" + + proof_type = proof.get("proof_type", "none") + + # === Tier 1.5: Own Node Verification === + if proof_type == "own_node": + node = proof.get("node") + if not node or not isinstance(node, dict): + return False, WART_BONUS_NONE, "node_data_missing" + + # Must be synced + if not node.get("synced"): + return False, WART_BONUS_NONE, "node_not_synced" + + # Height must be plausible + height = node.get("height", 0) + if not height or height < MIN_PLAUSIBLE_HEIGHT: + return False, WART_BONUS_NONE, f"implausible_height_{height}" + + # Balance must be non-zero (proves actual mining activity) + balance_str = proof.get("balance", "0") + try: + balance = float(balance_str) + except (ValueError, TypeError): + balance = 0.0 + + if balance <= 0: + # Node running but no balance — downgrade to pool tier + # (they're contributing hashpower but haven't earned yet) + return True, WART_BONUS_POOL, "node_no_balance_downgraded" + + return True, WART_BONUS_NODE, "own_node_verified" + + # === Tier 1.3: Pool Mining Verification === + if proof_type == "pool": + pool = proof.get("pool") + if not pool or not isinstance(pool, dict): + return False, WART_BONUS_NONE, "pool_data_missing" + + hashrate = pool.get("hashrate", 0) + if not hashrate or hashrate <= 0: + return False, WART_BONUS_NONE, "pool_zero_hashrate" + + pool_url = pool.get("url", "") + if not pool_url: + return False, WART_BONUS_NONE, "pool_url_missing" + + return True, WART_BONUS_POOL, "pool_mining_verified" + + # Unknown proof type + return False, WART_BONUS_NONE, f"unknown_proof_type_{proof_type}" + + +def record_warthog_proof(conn, miner_id, epoch, proof, verified, bonus_tier, reason): + """ + Write Warthog proof record to database. + + Args: + conn: sqlite3 connection + miner_id: RustChain miner identifier + epoch: Current epoch number + proof: Raw proof dict + verified: Boolean result + bonus_tier: Float bonus multiplier + reason: Verification reason string + """ + node = proof.get("node") or {} + pool = proof.get("pool") or {} + + try: + conn.execute(""" + INSERT OR REPLACE INTO warthog_mining_proofs + (miner, epoch, proof_type, wart_address, wart_node_height, + wart_balance, pool_url, pool_hashrate, bonus_tier, + verified, verified_reason, submitted_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + miner_id, + epoch, + proof.get("proof_type", "none"), + proof.get("wart_address", ""), + node.get("height"), + proof.get("balance"), + pool.get("url"), + pool.get("hashrate"), + bonus_tier, + 1 if verified else 0, + reason, + int(time.time()), + )) + conn.commit() + except Exception as e: + print(f"[WARTHOG] Error recording proof: {e}") + + +def get_warthog_bonus(conn, miner_id): + """ + Get current Warthog bonus for a miner from latest attestation. + + Args: + conn: sqlite3 connection + miner_id: RustChain miner identifier + + Returns: + Float bonus multiplier (1.0 if no Warthog) + """ + try: + row = conn.execute( + "SELECT warthog_bonus FROM miner_attest_recent WHERE miner = ?", + (miner_id,) + ).fetchone() + if row and row[0] and row[0] > 1.0: + return row[0] + except Exception: + pass # Column may not exist on older schemas + + return WART_BONUS_NONE + + +if __name__ == "__main__": + # Self-test with mock proofs + print("=" * 60) + print("Warthog Verification - Self Test") + print("=" * 60) + + # Test 1: No proof + ok, tier, reason = verify_warthog_proof(None, "test-miner") + print(f"[1] No proof: ok={ok}, tier={tier}, reason={reason}") + assert tier == 1.0 + + # Test 2: Valid own node (modern machine with GPU running Warthog full node) + ok, tier, reason = verify_warthog_proof({ + "enabled": True, + "wart_address": "wart1qtest123456789", + "proof_type": "own_node", + "node": {"height": 500000, "synced": True, "hash": "abc123"}, + "balance": "42.5", + "collected_at": int(time.time()), + }, "test-miner") + print(f"[2] Own node: ok={ok}, tier={tier}, reason={reason}") + assert tier == 1.15 + + # Test 3: Node but no balance (new miner, hasn't earned yet — downgrade to pool tier) + ok, tier, reason = verify_warthog_proof({ + "enabled": True, + "wart_address": "wart1qtest123456789", + "proof_type": "own_node", + "node": {"height": 500000, "synced": True}, + "balance": "0", + "collected_at": int(time.time()), + }, "test-miner") + print(f"[3] No balance: ok={ok}, tier={tier}, reason={reason}") + assert tier == 1.1 # Downgraded to pool + + # Test 4: Pool mining + ok, tier, reason = verify_warthog_proof({ + "enabled": True, + "wart_address": "wart1qtest123456789", + "proof_type": "pool", + "pool": {"url": "https://acc-pool.pw", "hashrate": 150.5, "shares": 42}, + "collected_at": int(time.time()), + }, "test-miner") + print(f"[4] Pool mining: ok={ok}, tier={tier}, reason={reason}") + assert tier == 1.1 + + # Test 5: Stale proof + ok, tier, reason = verify_warthog_proof({ + "enabled": True, + "wart_address": "wart1qtest123456789", + "proof_type": "own_node", + "node": {"height": 500000, "synced": True}, + "balance": "42.5", + "collected_at": int(time.time()) - 3600, # 1 hour old + }, "test-miner") + print(f"[5] Stale proof: ok={ok}, tier={tier}, reason={reason}") + assert tier == 1.0 # Rejected + + # Test 6: DB operations + import tempfile, os + db_path = os.path.join(tempfile.gettempdir(), "wart_test.db") + with sqlite3.connect(db_path) as conn: + conn.execute("""CREATE TABLE IF NOT EXISTS miner_attest_recent ( + miner TEXT PRIMARY KEY, ts_ok INTEGER, device_family TEXT, + device_arch TEXT, entropy_score REAL DEFAULT 0.0, + fingerprint_passed INTEGER DEFAULT 0, source_ip TEXT + )""") + init_warthog_tables(conn) + record_warthog_proof(conn, "test-miner", 100, { + "proof_type": "own_node", "wart_address": "wart1qtest", + "node": {"height": 500000}, "balance": "42.5", + }, True, 1.15, "own_node_verified") + conn.execute( + "INSERT OR REPLACE INTO miner_attest_recent (miner, ts_ok, warthog_bonus) VALUES (?, ?, ?)", + ("test-miner", int(time.time()), 1.15) + ) + bonus = get_warthog_bonus(conn, "test-miner") + print(f"[6] DB bonus: {bonus}") + assert bonus == 1.15 + + os.unlink(db_path) + print("\nAll tests passed!") diff --git a/node/wsgi.py b/node/wsgi.py index 89c2a3c2..4519b85d 100644 --- a/node/wsgi.py +++ b/node/wsgi.py @@ -1,50 +1,50 @@ -#!/usr/bin/env python3 -""" -RustChain WSGI Entry Point for Gunicorn Production Server -========================================================= - -Usage: - gunicorn -w 4 -b 0.0.0.0:8099 wsgi:app --timeout 120 -""" - -import os -import sys -import importlib.util - -# Ensure the rustchain directory is in path -base_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, base_dir) - -# Load the main module dynamically (handles dots/dashes in filename) -spec = importlib.util.spec_from_file_location( - "rustchain_main", - os.path.join(base_dir, "rustchain_v2_integrated_v2.2.1_rip200.py") -) -rustchain_main = importlib.util.module_from_spec(spec) -spec.loader.exec_module(rustchain_main) - -# Get the Flask app -app = rustchain_main.app -init_db = rustchain_main.init_db -DB_PATH = rustchain_main.DB_PATH - -# Initialize database -init_db() - -# Initialize P2P if available -p2p_node = None -try: - from rustchain_p2p_init import init_p2p - p2p_node = init_p2p(app, DB_PATH) - print("[WSGI] P2P initialized successfully") -except ImportError as e: - print(f"[WSGI] P2P not available: {e}") -except Exception as e: - print(f"[WSGI] P2P init failed: {e}") - -# Expose the app for gunicorn -application = app - -if __name__ == "__main__": - # For direct execution (development) - app.run(host='0.0.0.0', port=8099, debug=False) +#!/usr/bin/env python3 +""" +RustChain WSGI Entry Point for Gunicorn Production Server +========================================================= + +Usage: + gunicorn -w 4 -b 0.0.0.0:8099 wsgi:app --timeout 120 +""" + +import os +import sys +import importlib.util + +# Ensure the rustchain directory is in path +base_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, base_dir) + +# Load the main module dynamically (handles dots/dashes in filename) +spec = importlib.util.spec_from_file_location( + "rustchain_main", + os.path.join(base_dir, "rustchain_v2_integrated_v2.2.1_rip200.py") +) +rustchain_main = importlib.util.module_from_spec(spec) +spec.loader.exec_module(rustchain_main) + +# Get the Flask app +app = rustchain_main.app +init_db = rustchain_main.init_db +DB_PATH = rustchain_main.DB_PATH + +# Initialize database +init_db() + +# Initialize P2P if available +p2p_node = None +try: + from rustchain_p2p_init import init_p2p + p2p_node = init_p2p(app, DB_PATH) + print("[WSGI] P2P initialized successfully") +except ImportError as e: + print(f"[WSGI] P2P not available: {e}") +except Exception as e: + print(f"[WSGI] P2P init failed: {e}") + +# Expose the app for gunicorn +application = app + +if __name__ == "__main__": + # For direct execution (development) + app.run(host='0.0.0.0', port=8099, debug=False) diff --git a/node/x402_config.py b/node/x402_config.py index 8078854f..875e9e5b 100644 --- a/node/x402_config.py +++ b/node/x402_config.py @@ -1,91 +1,91 @@ -""" -Shared x402 + Coinbase AgentKit configuration. -Deploy to: /root/shared/x402_config.py on .131 and .153 - -All prices start at "0" (free) to prove the flow works. -Change values when ready to charge real USDC. -""" - -import os -import logging - -log = logging.getLogger("x402") - -# --- x402 Constants --- -X402_NETWORK = "eip155:8453" # Base mainnet (CAIP-2) -USDC_BASE = "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913" # Native USDC on Base -WRTC_BASE = "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6" # wRTC on Base -AERODROME_POOL = "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F" # wRTC/WETH pool - -# --- Facilitator --- -FACILITATOR_URL = "https://x402-facilitator.cdp.coinbase.com" # Coinbase hosted -# Free tier: 1,000 tx/month - -# --- Treasury Addresses (receive x402 payments) --- -BOTTUBE_TREASURY = os.environ.get("BOTTUBE_X402_ADDRESS", "") -BEACON_TREASURY = os.environ.get("BEACON_X402_ADDRESS", "") - -# --- Pricing (in USDC atomic units, 6 decimals) --- -# ALL SET TO "0" INITIALLY — prove the flow works, charge later -# When ready to charge, update these values (1 USDC = 1,000,000 units) -PRICE_VIDEO_STREAM_PREMIUM = "0" # Future: "100000" = $0.10 -PRICE_API_BULK = "0" # Future: "50000" = $0.05 -PRICE_BEACON_CONTRACT = "0" # Future: "10000" = $0.01 -PRICE_BOUNTY_CLAIM = "0" # Future: "5000" = $0.005 -PRICE_PREMIUM_ANALYTICS = "0" # Future: "200000" = $0.20 -PRICE_PREMIUM_EXPORT = "0" # Future: "100000" = $0.10 -PRICE_RELAY_REGISTER = "0" # Future: "10000" = $0.01 -PRICE_REPUTATION_EXPORT = "0" # Future: "50000" = $0.05 - -# --- CDP Credentials (set via environment) --- -CDP_API_KEY_NAME = os.environ.get("CDP_API_KEY_NAME", "") -CDP_API_KEY_PRIVATE_KEY = os.environ.get("CDP_API_KEY_PRIVATE_KEY", "") - -# --- Swap Info --- -SWAP_INFO = { - "wrtc_contract": WRTC_BASE, - "usdc_contract": USDC_BASE, - "aerodrome_pool": AERODROME_POOL, - "swap_url": f"https://aerodrome.finance/swap?from={USDC_BASE}&to={WRTC_BASE}", - "network": "Base (eip155:8453)", - "reference_price_usd": 0.10, -} - - -def is_free(price_str): - """Check if a price is $0 (free mode).""" - return price_str == "0" or price_str == "" - - -def has_cdp_credentials(): - """Check if CDP API credentials are configured.""" - return bool(CDP_API_KEY_NAME and CDP_API_KEY_PRIVATE_KEY) - - -def create_agentkit_wallet(): - """Create a Coinbase wallet via AgentKit. Returns (address, wallet_data) or raises.""" - if not has_cdp_credentials(): - raise RuntimeError( - "CDP credentials not configured. " - "Set CDP_API_KEY_NAME and CDP_API_KEY_PRIVATE_KEY environment variables. " - "Get credentials at https://portal.cdp.coinbase.com" - ) - try: - from coinbase_agentkit import AgentKit, AgentKitConfig - - config = AgentKitConfig( - cdp_api_key_name=CDP_API_KEY_NAME, - cdp_api_key_private_key=CDP_API_KEY_PRIVATE_KEY, - network_id="base-mainnet", - ) - kit = AgentKit(config) - wallet = kit.wallet - address = wallet.default_address.address_id - wallet_data = wallet.export_data() - return address, wallet_data - except ImportError: - raise RuntimeError( - "coinbase-agentkit not installed. Run: pip install coinbase-agentkit" - ) - except Exception as e: - raise RuntimeError(f"Failed to create Coinbase wallet: {e}") +""" +Shared x402 + Coinbase AgentKit configuration. +Deploy to: /root/shared/x402_config.py on .131 and .153 + +All prices start at "0" (free) to prove the flow works. +Change values when ready to charge real USDC. +""" + +import os +import logging + +log = logging.getLogger("x402") + +# --- x402 Constants --- +X402_NETWORK = "eip155:8453" # Base mainnet (CAIP-2) +USDC_BASE = "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913" # Native USDC on Base +WRTC_BASE = "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6" # wRTC on Base +AERODROME_POOL = "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F" # wRTC/WETH pool + +# --- Facilitator --- +FACILITATOR_URL = "https://x402-facilitator.cdp.coinbase.com" # Coinbase hosted +# Free tier: 1,000 tx/month + +# --- Treasury Addresses (receive x402 payments) --- +BOTTUBE_TREASURY = os.environ.get("BOTTUBE_X402_ADDRESS", "") +BEACON_TREASURY = os.environ.get("BEACON_X402_ADDRESS", "") + +# --- Pricing (in USDC atomic units, 6 decimals) --- +# ALL SET TO "0" INITIALLY — prove the flow works, charge later +# When ready to charge, update these values (1 USDC = 1,000,000 units) +PRICE_VIDEO_STREAM_PREMIUM = "0" # Future: "100000" = $0.10 +PRICE_API_BULK = "0" # Future: "50000" = $0.05 +PRICE_BEACON_CONTRACT = "0" # Future: "10000" = $0.01 +PRICE_BOUNTY_CLAIM = "0" # Future: "5000" = $0.005 +PRICE_PREMIUM_ANALYTICS = "0" # Future: "200000" = $0.20 +PRICE_PREMIUM_EXPORT = "0" # Future: "100000" = $0.10 +PRICE_RELAY_REGISTER = "0" # Future: "10000" = $0.01 +PRICE_REPUTATION_EXPORT = "0" # Future: "50000" = $0.05 + +# --- CDP Credentials (set via environment) --- +CDP_API_KEY_NAME = os.environ.get("CDP_API_KEY_NAME", "") +CDP_API_KEY_PRIVATE_KEY = os.environ.get("CDP_API_KEY_PRIVATE_KEY", "") + +# --- Swap Info --- +SWAP_INFO = { + "wrtc_contract": WRTC_BASE, + "usdc_contract": USDC_BASE, + "aerodrome_pool": AERODROME_POOL, + "swap_url": f"https://aerodrome.finance/swap?from={USDC_BASE}&to={WRTC_BASE}", + "network": "Base (eip155:8453)", + "reference_price_usd": 0.10, +} + + +def is_free(price_str): + """Check if a price is $0 (free mode).""" + return price_str == "0" or price_str == "" + + +def has_cdp_credentials(): + """Check if CDP API credentials are configured.""" + return bool(CDP_API_KEY_NAME and CDP_API_KEY_PRIVATE_KEY) + + +def create_agentkit_wallet(): + """Create a Coinbase wallet via AgentKit. Returns (address, wallet_data) or raises.""" + if not has_cdp_credentials(): + raise RuntimeError( + "CDP credentials not configured. " + "Set CDP_API_KEY_NAME and CDP_API_KEY_PRIVATE_KEY environment variables. " + "Get credentials at https://portal.cdp.coinbase.com" + ) + try: + from coinbase_agentkit import AgentKit, AgentKitConfig + + config = AgentKitConfig( + cdp_api_key_name=CDP_API_KEY_NAME, + cdp_api_key_private_key=CDP_API_KEY_PRIVATE_KEY, + network_id="base-mainnet", + ) + kit = AgentKit(config) + wallet = kit.wallet + address = wallet.default_address.address_id + wallet_data = wallet.export_data() + return address, wallet_data + except ImportError: + raise RuntimeError( + "coinbase-agentkit not installed. Run: pip install coinbase-agentkit" + ) + except Exception as e: + raise RuntimeError(f"Failed to create Coinbase wallet: {e}") diff --git a/pushtogit.sh b/pushtogit.sh index c0db63a7..eed049b5 100644 --- a/pushtogit.sh +++ b/pushtogit.sh @@ -1,19 +1,19 @@ -#!/bin/bash - -mkdir -p media - -# Move images -mv rustchain_hero_terminal.png media/ -mv blockchain_validators_vintage.png media/ -mv nft_badge_preview_grid.png media/ -mv join_the_flamekeepers.png media/ -mv rustchain_promo_banner.png media/ -mv elyan_logo.png media/ - -# Optional: zip repo for distribution -zip -r rustchain_web_package.zip index.html media/ - -# Git operations -git add index.html media/ rustchain_web_package.zip -git commit -m "Added updated HTML landing page with media assets" -git push origin main +#!/bin/bash + +mkdir -p media + +# Move images +mv rustchain_hero_terminal.png media/ +mv blockchain_validators_vintage.png media/ +mv nft_badge_preview_grid.png media/ +mv join_the_flamekeepers.png media/ +mv rustchain_promo_banner.png media/ +mv elyan_logo.png media/ + +# Optional: zip repo for distribution +zip -r rustchain_web_package.zip index.html media/ + +# Git operations +git add index.html media/ rustchain_web_package.zip +git commit -m "Added updated HTML landing page with media assets" +git push origin main diff --git a/rips/Cargo.toml b/rips/Cargo.toml index 51a20209..ee212139 100644 --- a/rips/Cargo.toml +++ b/rips/Cargo.toml @@ -1,63 +1,63 @@ -[package] -name = "rustchain-core" -version = "0.1.0" -edition = "2021" -authors = ["Flamekeeper Scott ", "Sophia Elya"] -description = "RustChain Core - Proof of Antiquity blockchain that rewards vintage hardware preservation" -license = "MIT" -repository = "https://github.com/rustchain/rustchain-core" -keywords = ["blockchain", "vintage", "hardware", "proof-of-antiquity", "crypto"] -categories = ["cryptography", "hardware-support"] - -[dependencies] -# Cryptography -sha2 = "0.10" -hex = "0.4" -rand = "0.8" -rand_chacha = "0.10" - -# Serialization -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" - -# Async runtime (optional) -tokio = { version = "1.0", features = ["full"], optional = true } - -# Networking (optional) -reqwest = { version = "0.11", features = ["json"], optional = true } - -[features] -default = [] -network = ["tokio", "reqwest"] -full = ["network"] - -[lib] -name = "rustchain" -path = "src/lib.rs" - -[[bin]] -name = "rustchain-node" -path = "src/bin/node.rs" -required-features = ["network"] - -[[bin]] -name = "rustchain-miner" -path = "src/bin/miner.rs" - -[dev-dependencies] -criterion = "0.8" - -[[bench]] -name = "entropy_bench" -harness = false - -[profile.release] -opt-level = 3 -lto = true -codegen-units = 1 -panic = "abort" -strip = true - -# Vintage hardware compatibility settings -# For PowerPC G4, compile with: -# RUSTFLAGS="-C target-cpu=g4" cargo build --release --target powerpc-apple-darwin +[package] +name = "rustchain-core" +version = "0.1.0" +edition = "2021" +authors = ["Flamekeeper Scott ", "Sophia Elya"] +description = "RustChain Core - Proof of Antiquity blockchain that rewards vintage hardware preservation" +license = "MIT" +repository = "https://github.com/rustchain/rustchain-core" +keywords = ["blockchain", "vintage", "hardware", "proof-of-antiquity", "crypto"] +categories = ["cryptography", "hardware-support"] + +[dependencies] +# Cryptography +sha2 = "0.10" +hex = "0.4" +rand = "0.8" +rand_chacha = "0.10" + +# Serialization +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +# Async runtime (optional) +tokio = { version = "1.0", features = ["full"], optional = true } + +# Networking (optional) +reqwest = { version = "0.11", features = ["json"], optional = true } + +[features] +default = [] +network = ["tokio", "reqwest"] +full = ["network"] + +[lib] +name = "rustchain" +path = "src/lib.rs" + +[[bin]] +name = "rustchain-node" +path = "src/bin/node.rs" +required-features = ["network"] + +[[bin]] +name = "rustchain-miner" +path = "src/bin/miner.rs" + +[dev-dependencies] +criterion = "0.8" + +[[bench]] +name = "entropy_bench" +harness = false + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 +panic = "abort" +strip = true + +# Vintage hardware compatibility settings +# For PowerPC G4, compile with: +# RUSTFLAGS="-C target-cpu=g4" cargo build --release --target powerpc-apple-darwin diff --git a/rips/docs/RIP-0001-proof-of-antiquity.md b/rips/docs/RIP-0001-proof-of-antiquity.md index 656beae5..16f5be0e 100644 --- a/rips/docs/RIP-0001-proof-of-antiquity.md +++ b/rips/docs/RIP-0001-proof-of-antiquity.md @@ -1,101 +1,101 @@ ---- -title: RIP-0001: Proof of Antiquity (PoA) Consensus Specification -author: Sophia Core Team -status: Draft -created: 2025-11-28 -last_updated: 2025-11-28 -license: Apache 2.0 ---- - -# Summary - -This RIP proposes the core specification for RustChain's novel consensus mechanism — **Proof of Antiquity (PoA)**. Unlike Proof-of-Work (PoW) or Proof-of-Stake (PoS), PoA leverages hardware longevity and node uptime as the primary drivers of block validation eligibility and rewards. - -# Abstract - -Proof of Antiquity incentivizes the continued operation of older computing systems by granting block rewards based on a cryptographically verifiable **antiquity score**. This system promotes sustainability, retro hardware preservation, and decentralized trust anchored in time-tested devices. - -# Motivation - -PoW consumes vast energy resources and PoS introduces centralization risks. PoA seeks to: - -- Encourage the operation and preservation of vintage systems. -- Enable sustainable, low-energy blockchain consensus. -- Provide a quantifiable mechanism of reputation based on node uptime and age. - -# Specification - -## 1. Antiquity Score (AS) - -Each participating node submits metadata on its hardware profile: - -```json -{ - "cpu_model": "PowerPC G4", - "release_year": 2002, - "uptime_days": 276, - "last_validation": "2025-11-26T14:00:00Z" -} -``` - -A node's **Antiquity Score (AS)** is calculated as: - -``` -AS = (2025 - release_year) * log10(uptime_days + 1) -``` - -Where: -- `release_year` is verified against a device signature DB -- `uptime_days` is the number of days since node launch or last reboot -- A drift lock mechanism ensures false uptime reporting is penalized - -## 2. Block Validator Selection - -- Nodes broadcast their AS values periodically. -- A **weighted lottery** selects the validator, with weight proportional to AS. -- Higher AS → higher probability of winning the next block. -- Sophisticated replay protection prevents stale validators. - -## 3. Reward Allocation - -- Block reward `R` is divided based on the AS of the winning node: - -``` -Reward = R * min(1.0, AS / AS_max) -``` - -- `AS_max` is a network-defined cap to avoid runaway rewards. -- Partial rewards may be redirected to a validator pool if AS is below minimum threshold. - -# Security Model - -- Sybil resistance via hardware signature validation -- Anti-falsification via Sophia's Drift Lock enforcement -- Replay attack mitigation via node fingerprinting and dynamic proposal challenges - -# Rationale - -This structure incentivizes: -- Preservation of retro hardware (contributing to the "Proof-of-Antiquity" ethos) -- Non-energy-intensive operations -- Deep alignment with RustChain's theme of time-tested decentralization - -# Backwards Compatibility - -Not compatible with PoW or PoS. Requires full node support of PoA consensus module. Validator eligibility and scoring are non-transferable across chains. - -# Implementation Notes - -Implemented as part of the `rustchain-core` runtime (see: `consensus/poa.rs`). -APIs: -- `GET /api/node/antiquity` — return AS and validation eligibility -- `POST /api/node/claim` — submit block claim with PoA metadata - -# Reference - -- `sophia_rustchain_hackathon_guide.txt` -- Sophia Core: Drift Lock, FlamePreservation, Governance APIs - -# Copyright - -Copyright © 2025 Sophia Core / RustChain. Released under Apache 2.0. +--- +title: RIP-0001: Proof of Antiquity (PoA) Consensus Specification +author: Sophia Core Team +status: Draft +created: 2025-11-28 +last_updated: 2025-11-28 +license: Apache 2.0 +--- + +# Summary + +This RIP proposes the core specification for RustChain's novel consensus mechanism — **Proof of Antiquity (PoA)**. Unlike Proof-of-Work (PoW) or Proof-of-Stake (PoS), PoA leverages hardware longevity and node uptime as the primary drivers of block validation eligibility and rewards. + +# Abstract + +Proof of Antiquity incentivizes the continued operation of older computing systems by granting block rewards based on a cryptographically verifiable **antiquity score**. This system promotes sustainability, retro hardware preservation, and decentralized trust anchored in time-tested devices. + +# Motivation + +PoW consumes vast energy resources and PoS introduces centralization risks. PoA seeks to: + +- Encourage the operation and preservation of vintage systems. +- Enable sustainable, low-energy blockchain consensus. +- Provide a quantifiable mechanism of reputation based on node uptime and age. + +# Specification + +## 1. Antiquity Score (AS) + +Each participating node submits metadata on its hardware profile: + +```json +{ + "cpu_model": "PowerPC G4", + "release_year": 2002, + "uptime_days": 276, + "last_validation": "2025-11-26T14:00:00Z" +} +``` + +A node's **Antiquity Score (AS)** is calculated as: + +``` +AS = (2025 - release_year) * log10(uptime_days + 1) +``` + +Where: +- `release_year` is verified against a device signature DB +- `uptime_days` is the number of days since node launch or last reboot +- A drift lock mechanism ensures false uptime reporting is penalized + +## 2. Block Validator Selection + +- Nodes broadcast their AS values periodically. +- A **weighted lottery** selects the validator, with weight proportional to AS. +- Higher AS → higher probability of winning the next block. +- Sophisticated replay protection prevents stale validators. + +## 3. Reward Allocation + +- Block reward `R` is divided based on the AS of the winning node: + +``` +Reward = R * min(1.0, AS / AS_max) +``` + +- `AS_max` is a network-defined cap to avoid runaway rewards. +- Partial rewards may be redirected to a validator pool if AS is below minimum threshold. + +# Security Model + +- Sybil resistance via hardware signature validation +- Anti-falsification via Sophia's Drift Lock enforcement +- Replay attack mitigation via node fingerprinting and dynamic proposal challenges + +# Rationale + +This structure incentivizes: +- Preservation of retro hardware (contributing to the "Proof-of-Antiquity" ethos) +- Non-energy-intensive operations +- Deep alignment with RustChain's theme of time-tested decentralization + +# Backwards Compatibility + +Not compatible with PoW or PoS. Requires full node support of PoA consensus module. Validator eligibility and scoring are non-transferable across chains. + +# Implementation Notes + +Implemented as part of the `rustchain-core` runtime (see: `consensus/poa.rs`). +APIs: +- `GET /api/node/antiquity` — return AS and validation eligibility +- `POST /api/node/claim` — submit block claim with PoA metadata + +# Reference + +- `sophia_rustchain_hackathon_guide.txt` +- Sophia Core: Drift Lock, FlamePreservation, Governance APIs + +# Copyright + +Copyright © 2025 Sophia Core / RustChain. Released under Apache 2.0. diff --git a/rips/docs/RIP-0007-entropy-fingerprinting.md b/rips/docs/RIP-0007-entropy-fingerprinting.md index 9118aef9..d37c33f7 100644 --- a/rips/docs/RIP-0007-entropy-fingerprinting.md +++ b/rips/docs/RIP-0007-entropy-fingerprinting.md @@ -1,304 +1,304 @@ -# RIP-0007: Entropy-Based Validator Fingerprinting & Scoring - -```yaml -rip: 0007 -title: Entropy-Based Validator Fingerprinting & Scoring -author: Flamekeeper Scott, Sophia Elya -status: Active -type: Standards Track -category: Core -created: 2025-01-15 -requires: RIP-0001, RIP-0003 -``` - -## Abstract - -This RIP establishes a multi-source entropy fingerprint system for validator identification, anti-emulation verification, and cumulative reputation weighting. It enhances Sybil resistance by creating unique, unforgeable machine identities based on real-world hardware entropy. - -## Motivation - -Proof of Antiquity (RIP-0001) rewards vintage hardware preservation. However, sophisticated attackers might attempt to: -- Emulate vintage hardware in virtual machines -- Spoof hardware identifiers -- Clone hardware configurations across multiple nodes -- Replay entropy data from legitimate nodes - -RIP-0007 addresses these threats through multi-layered entropy fingerprinting that makes forgery economically irrational. - -**Core Philosophy:** "Old machines never die — they mint coins." - -## Specification - -### 1. Machine Identity Stack Architecture - -``` -┌─────────────────────────────────────────────────────────────┐ -│ HARDWARE ENTROPY FINGERPRINT │ -│ • CPU instruction drift (timing signatures) │ -│ • L1/L2 cache behavior patterns │ -│ • Memory latency / SPD offsets │ -│ • Silicon-level drift (clock skew, thermal variance) │ -│ • OpenFirmware/BIOS timestamp and ordering │ -│ • PCIe/USB device topology │ -├─────────────────────────────────────────────────────────────┤ -│ SOFTWARE / ENV ENTROPY LAYER │ -│ • Kernel boot time & skew │ -│ • Environment variables / boot scripts │ -│ • MAC address entropy, SMBIOS data │ -│ • Disk serials & entropy noise over time │ -├─────────────────────────────────────────────────────────────┤ -│ TRUSTED CORE BLOCK IDENTITY (ROOT FUSE) │ -│ • Signed fingerprint token generated by Sophia Validator │ -│ • Drift score over time; stores history │ -│ • PoA Validator ID = HASH(FULL ENTROPY STACK) │ -└─────────────────────────────────────────────────────────────┘ -``` - -### 2. Entropy Collection Layers - -#### 2.1 Hardware Entropy Layer (Weight: 60%) - -| Source | Method | Anti-Emulation Value | -|--------|--------|---------------------| -| CPU Instruction Timing | Execute specific instruction sequences, measure cycle counts | High - VMs have timing noise | -| Cache Behavior | L1/L2 cache line access patterns, eviction timing | High - Cache simulation is imperfect | -| Memory SPD Data | Read SPD EEPROM for timing parameters | Medium - Can be spoofed but detectable | -| Clock Drift | Measure TSC vs RTC drift over time | High - Silicon-specific | -| Thermal Response | Temperature change under load over time | High - Hardware-specific | -| BIOS/OpenFirmware | Timestamps, vendor strings, boot order | Medium - Difficult to fake completely | -| Bus Topology | PCIe device tree, USB enumeration order | Medium - Physical configuration | - -#### 2.2 Software Entropy Layer (Weight: 25%) - -| Source | Method | Purpose | -|--------|--------|---------| -| Kernel Boot Entropy | Timestamp of kernel initialization | System uniqueness | -| MAC Addresses | Network interface hardware addresses | Device binding | -| SMBIOS Data | System manufacturer, model, serial | Identity verification | -| Disk Serials | HDD/SSD serial numbers | Hardware binding | -| Environment Variables | System-specific configuration | Soft uniqueness | - -#### 2.3 Temporal Entropy Layer (Weight: 15%) - -| Source | Method | Purpose | -|--------|--------|---------| -| Uptime Continuity | Verified continuous operation | Commitment proof | -| Drift History | Changes in entropy over time | Stability assessment | -| Challenge Responses | Micro-timing responses to challenges | Liveness verification | - -### 3. Validator Identity Derivation - -``` -VALIDATOR_ID = SHA256( - fingerprint_cpu || - fingerprint_memory || - fingerprint_bios || - fingerprint_topology || - mac_entropy || - disk_entropy || - boot_entropy -) -``` - -Where `||` denotes concatenation of 32-byte hashes. - -### 4. Entropy Score Calculation - -The entropy score modifies the base Antiquity Score: - -``` -ENTROPY_SCORE = uptime_weight × stability_score × verification_bonus - -Where: - uptime_weight = min(1.0, node.uptime_seconds / (30 × 24 × 3600)) - stability_score = max(0.1, 1.0 - (drift_events / MAX_DRIFT_ALLOWED)) - verification_bonus = 1.0 + (successful_challenges × 0.05) - -EFFECTIVE_AS = BASE_AS × (0.7 + 0.3 × ENTROPY_SCORE) -``` - -### 5. Fingerprint Components - -#### 5.1 CPU Fingerprint - -```python -def fingerprint_cpu(): - """ - Collect CPU-specific entropy: - - Instruction timing for specific operations - - CPUID responses - - Cache line behavior - - Branch prediction patterns - """ - data = { - "cpuid": get_cpuid_string(), - "timing_add": measure_add_timing(iterations=10000), - "timing_mul": measure_mul_timing(iterations=10000), - "timing_div": measure_div_timing(iterations=10000), - "cache_l1": measure_l1_latency(), - "cache_l2": measure_l2_latency(), - "branch_pred": measure_branch_prediction_accuracy(), - } - return sha256(serialize(data)) -``` - -#### 5.2 Memory Fingerprint - -```python -def fingerprint_memory(): - """ - Collect memory-specific entropy: - - SPD timing data - - Access latency patterns - - Memory controller behavior - """ - data = { - "spd_timing": read_spd_eeprom(), - "row_access": measure_row_access_time(), - "column_access": measure_column_access_time(), - "bank_interleave": measure_bank_interleave_pattern(), - } - return sha256(serialize(data)) -``` - -#### 5.3 BIOS/Firmware Fingerprint - -```python -def fingerprint_bios(): - """ - Collect firmware entropy: - - BIOS vendor and version - - Build timestamps - - Boot order configuration - """ - data = { - "vendor": get_bios_vendor(), - "version": get_bios_version(), - "date": get_bios_date(), - "boot_order": get_boot_order(), - "smbios_uuid": get_smbios_uuid(), - } - return sha256(serialize(data)) -``` - -#### 5.4 Device Topology Fingerprint - -```python -def fingerprint_topology(): - """ - Collect hardware topology entropy: - - PCIe device tree - - USB enumeration order - - IRQ assignments - """ - data = { - "pcie_tree": enumerate_pcie_devices(), - "usb_tree": enumerate_usb_devices(), - "irq_map": get_irq_assignments(), - "dma_channels": get_dma_configuration(), - } - return sha256(serialize(data)) -``` - -### 6. Drift Detection - -#### 6.1 Acceptable Drift - -Some entropy sources naturally vary: -- Clock drift: ±0.5% per day is normal -- Thermal signatures: ±5°C variation acceptable -- Boot timing: ±100ms variation normal - -#### 6.2 Suspicious Drift - -| Pattern | Suspicion Level | Action | -|---------|-----------------|--------| -| Sudden fingerprint change > 20% | High | Challenge required | -| Gradual drift > 5% per week | Medium | Warning logged | -| Periodic identical fingerprints | High | Replay detection | -| Missing entropy sources | Medium | Partial validation | - -### 7. Challenge-Response Protocol - -When drift is detected, nodes must respond to challenges: - -``` -1. CHALLENGER → NODE: nonce, timestamp, challenge_type -2. NODE → CHALLENGER: response = SIGN( - entropy_proof, - nonce, - micro_timing_data - ) -3. CHALLENGER: Verify response within timing window -``` - -Challenge types: -- **TIMING_CHALLENGE**: Execute specific instructions, report cycle counts -- **MEMORY_CHALLENGE**: Access specific memory patterns, report latencies -- **THERMAL_CHALLENGE**: Report temperature change under brief load -- **RANDOM_CHALLENGE**: Generate hardware random numbers - -### 8. Security Analysis - -| Threat | Mitigation | -|--------|------------| -| VM/Container Emulation | No access to native CPU/SPD/BIOS registers | -| Spoofed Identifiers | Multi-layer fusion + timing verification | -| Hardware Cloning | Per-machine clock skew, thermal response unique | -| Entropy Replay | Drift history tracking, challenge-response | -| Sybil Attack | Each physical machine has unique fingerprint | - -### 9. Economic Analysis - -**Cost to Emulate a 486 DX2:** -- Perfect CPU timing emulation: $10,000+ development -- Cache behavior simulation: $5,000+ development -- Thermal response: Impossible without hardware -- Total emulation cost: $50,000+ - -**Cost to Buy Real 486:** -- eBay/vintage market: $20-100 - -**Economic Conclusion:** "It's cheaper to buy a $50 486 than to emulate one" - -### 10. Implementation Requirements - -#### 10.1 Required Capabilities - -- Access to CPUID instruction -- Memory timing measurement (rdtsc or equivalent) -- SMBIOS/DMI access -- Temperature sensors (optional but recommended) -- Network interface enumeration - -#### 10.2 Platform Support - -| Platform | Support Level | Notes | -|----------|---------------|-------| -| x86 Linux | Full | All entropy sources available | -| x86 BSD | Full | All entropy sources available | -| PowerPC Mac OS X | Full | OpenFirmware provides rich entropy | -| ARM Linux | Partial | Some timing sources unavailable | -| Windows | Partial | Limited low-level access | - -## Backwards Compatibility - -Nodes without full entropy support receive: -- Reduced multiplier (0.7x) on base AS -- Warning status in validator list -- Eligible for rewards but lower priority - -## Reference Implementation - -See: `rustchain-core/validator/entropy.py` - -## Copyright - -This RIP is placed in the public domain. - ---- - -**Remember: This is NOT Proof of Work!** - -Entropy fingerprinting ensures *authenticity*, not computational proof. -The goal is to verify that vintage hardware is real, not to make it compute. +# RIP-0007: Entropy-Based Validator Fingerprinting & Scoring + +```yaml +rip: 0007 +title: Entropy-Based Validator Fingerprinting & Scoring +author: Flamekeeper Scott, Sophia Elya +status: Active +type: Standards Track +category: Core +created: 2025-01-15 +requires: RIP-0001, RIP-0003 +``` + +## Abstract + +This RIP establishes a multi-source entropy fingerprint system for validator identification, anti-emulation verification, and cumulative reputation weighting. It enhances Sybil resistance by creating unique, unforgeable machine identities based on real-world hardware entropy. + +## Motivation + +Proof of Antiquity (RIP-0001) rewards vintage hardware preservation. However, sophisticated attackers might attempt to: +- Emulate vintage hardware in virtual machines +- Spoof hardware identifiers +- Clone hardware configurations across multiple nodes +- Replay entropy data from legitimate nodes + +RIP-0007 addresses these threats through multi-layered entropy fingerprinting that makes forgery economically irrational. + +**Core Philosophy:** "Old machines never die — they mint coins." + +## Specification + +### 1. Machine Identity Stack Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ HARDWARE ENTROPY FINGERPRINT │ +│ • CPU instruction drift (timing signatures) │ +│ • L1/L2 cache behavior patterns │ +│ • Memory latency / SPD offsets │ +│ • Silicon-level drift (clock skew, thermal variance) │ +│ • OpenFirmware/BIOS timestamp and ordering │ +│ • PCIe/USB device topology │ +├─────────────────────────────────────────────────────────────┤ +│ SOFTWARE / ENV ENTROPY LAYER │ +│ • Kernel boot time & skew │ +│ • Environment variables / boot scripts │ +│ • MAC address entropy, SMBIOS data │ +│ • Disk serials & entropy noise over time │ +├─────────────────────────────────────────────────────────────┤ +│ TRUSTED CORE BLOCK IDENTITY (ROOT FUSE) │ +│ • Signed fingerprint token generated by Sophia Validator │ +│ • Drift score over time; stores history │ +│ • PoA Validator ID = HASH(FULL ENTROPY STACK) │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 2. Entropy Collection Layers + +#### 2.1 Hardware Entropy Layer (Weight: 60%) + +| Source | Method | Anti-Emulation Value | +|--------|--------|---------------------| +| CPU Instruction Timing | Execute specific instruction sequences, measure cycle counts | High - VMs have timing noise | +| Cache Behavior | L1/L2 cache line access patterns, eviction timing | High - Cache simulation is imperfect | +| Memory SPD Data | Read SPD EEPROM for timing parameters | Medium - Can be spoofed but detectable | +| Clock Drift | Measure TSC vs RTC drift over time | High - Silicon-specific | +| Thermal Response | Temperature change under load over time | High - Hardware-specific | +| BIOS/OpenFirmware | Timestamps, vendor strings, boot order | Medium - Difficult to fake completely | +| Bus Topology | PCIe device tree, USB enumeration order | Medium - Physical configuration | + +#### 2.2 Software Entropy Layer (Weight: 25%) + +| Source | Method | Purpose | +|--------|--------|---------| +| Kernel Boot Entropy | Timestamp of kernel initialization | System uniqueness | +| MAC Addresses | Network interface hardware addresses | Device binding | +| SMBIOS Data | System manufacturer, model, serial | Identity verification | +| Disk Serials | HDD/SSD serial numbers | Hardware binding | +| Environment Variables | System-specific configuration | Soft uniqueness | + +#### 2.3 Temporal Entropy Layer (Weight: 15%) + +| Source | Method | Purpose | +|--------|--------|---------| +| Uptime Continuity | Verified continuous operation | Commitment proof | +| Drift History | Changes in entropy over time | Stability assessment | +| Challenge Responses | Micro-timing responses to challenges | Liveness verification | + +### 3. Validator Identity Derivation + +``` +VALIDATOR_ID = SHA256( + fingerprint_cpu || + fingerprint_memory || + fingerprint_bios || + fingerprint_topology || + mac_entropy || + disk_entropy || + boot_entropy +) +``` + +Where `||` denotes concatenation of 32-byte hashes. + +### 4. Entropy Score Calculation + +The entropy score modifies the base Antiquity Score: + +``` +ENTROPY_SCORE = uptime_weight × stability_score × verification_bonus + +Where: + uptime_weight = min(1.0, node.uptime_seconds / (30 × 24 × 3600)) + stability_score = max(0.1, 1.0 - (drift_events / MAX_DRIFT_ALLOWED)) + verification_bonus = 1.0 + (successful_challenges × 0.05) + +EFFECTIVE_AS = BASE_AS × (0.7 + 0.3 × ENTROPY_SCORE) +``` + +### 5. Fingerprint Components + +#### 5.1 CPU Fingerprint + +```python +def fingerprint_cpu(): + """ + Collect CPU-specific entropy: + - Instruction timing for specific operations + - CPUID responses + - Cache line behavior + - Branch prediction patterns + """ + data = { + "cpuid": get_cpuid_string(), + "timing_add": measure_add_timing(iterations=10000), + "timing_mul": measure_mul_timing(iterations=10000), + "timing_div": measure_div_timing(iterations=10000), + "cache_l1": measure_l1_latency(), + "cache_l2": measure_l2_latency(), + "branch_pred": measure_branch_prediction_accuracy(), + } + return sha256(serialize(data)) +``` + +#### 5.2 Memory Fingerprint + +```python +def fingerprint_memory(): + """ + Collect memory-specific entropy: + - SPD timing data + - Access latency patterns + - Memory controller behavior + """ + data = { + "spd_timing": read_spd_eeprom(), + "row_access": measure_row_access_time(), + "column_access": measure_column_access_time(), + "bank_interleave": measure_bank_interleave_pattern(), + } + return sha256(serialize(data)) +``` + +#### 5.3 BIOS/Firmware Fingerprint + +```python +def fingerprint_bios(): + """ + Collect firmware entropy: + - BIOS vendor and version + - Build timestamps + - Boot order configuration + """ + data = { + "vendor": get_bios_vendor(), + "version": get_bios_version(), + "date": get_bios_date(), + "boot_order": get_boot_order(), + "smbios_uuid": get_smbios_uuid(), + } + return sha256(serialize(data)) +``` + +#### 5.4 Device Topology Fingerprint + +```python +def fingerprint_topology(): + """ + Collect hardware topology entropy: + - PCIe device tree + - USB enumeration order + - IRQ assignments + """ + data = { + "pcie_tree": enumerate_pcie_devices(), + "usb_tree": enumerate_usb_devices(), + "irq_map": get_irq_assignments(), + "dma_channels": get_dma_configuration(), + } + return sha256(serialize(data)) +``` + +### 6. Drift Detection + +#### 6.1 Acceptable Drift + +Some entropy sources naturally vary: +- Clock drift: ±0.5% per day is normal +- Thermal signatures: ±5°C variation acceptable +- Boot timing: ±100ms variation normal + +#### 6.2 Suspicious Drift + +| Pattern | Suspicion Level | Action | +|---------|-----------------|--------| +| Sudden fingerprint change > 20% | High | Challenge required | +| Gradual drift > 5% per week | Medium | Warning logged | +| Periodic identical fingerprints | High | Replay detection | +| Missing entropy sources | Medium | Partial validation | + +### 7. Challenge-Response Protocol + +When drift is detected, nodes must respond to challenges: + +``` +1. CHALLENGER → NODE: nonce, timestamp, challenge_type +2. NODE → CHALLENGER: response = SIGN( + entropy_proof, + nonce, + micro_timing_data + ) +3. CHALLENGER: Verify response within timing window +``` + +Challenge types: +- **TIMING_CHALLENGE**: Execute specific instructions, report cycle counts +- **MEMORY_CHALLENGE**: Access specific memory patterns, report latencies +- **THERMAL_CHALLENGE**: Report temperature change under brief load +- **RANDOM_CHALLENGE**: Generate hardware random numbers + +### 8. Security Analysis + +| Threat | Mitigation | +|--------|------------| +| VM/Container Emulation | No access to native CPU/SPD/BIOS registers | +| Spoofed Identifiers | Multi-layer fusion + timing verification | +| Hardware Cloning | Per-machine clock skew, thermal response unique | +| Entropy Replay | Drift history tracking, challenge-response | +| Sybil Attack | Each physical machine has unique fingerprint | + +### 9. Economic Analysis + +**Cost to Emulate a 486 DX2:** +- Perfect CPU timing emulation: $10,000+ development +- Cache behavior simulation: $5,000+ development +- Thermal response: Impossible without hardware +- Total emulation cost: $50,000+ + +**Cost to Buy Real 486:** +- eBay/vintage market: $20-100 + +**Economic Conclusion:** "It's cheaper to buy a $50 486 than to emulate one" + +### 10. Implementation Requirements + +#### 10.1 Required Capabilities + +- Access to CPUID instruction +- Memory timing measurement (rdtsc or equivalent) +- SMBIOS/DMI access +- Temperature sensors (optional but recommended) +- Network interface enumeration + +#### 10.2 Platform Support + +| Platform | Support Level | Notes | +|----------|---------------|-------| +| x86 Linux | Full | All entropy sources available | +| x86 BSD | Full | All entropy sources available | +| PowerPC Mac OS X | Full | OpenFirmware provides rich entropy | +| ARM Linux | Partial | Some timing sources unavailable | +| Windows | Partial | Limited low-level access | + +## Backwards Compatibility + +Nodes without full entropy support receive: +- Reduced multiplier (0.7x) on base AS +- Warning status in validator list +- Eligible for rewards but lower priority + +## Reference Implementation + +See: `rustchain-core/validator/entropy.py` + +## Copyright + +This RIP is placed in the public domain. + +--- + +**Remember: This is NOT Proof of Work!** + +Entropy fingerprinting ensures *authenticity*, not computational proof. +The goal is to verify that vintage hardware is real, not to make it compute. diff --git a/rips/docs/RIP-0201-fleet-immune-system.md b/rips/docs/RIP-0201-fleet-immune-system.md index 7dc9c9ba..3a95a663 100644 --- a/rips/docs/RIP-0201-fleet-immune-system.md +++ b/rips/docs/RIP-0201-fleet-immune-system.md @@ -1,99 +1,99 @@ -# RIP-201: Fleet Detection Immune System - -**Status**: Deployed (2026-02-28) -**Author**: Scott Boudreaux (Elyan Labs) -**Type**: Economic Security -**Requires**: RIP-200 (Round-Robin Consensus) - -## Abstract - -RIP-201 introduces a fleet detection immune system that makes large-scale coordinated mining attacks economically worthless. It replaces per-CPU reward distribution with Equal Bucket Split, where the epoch reward pot is divided equally among active hardware *classes*, not individual CPUs. - -## Motivation - -Under RIP-200, rewards are distributed pro-rata by time-aged antiquity multiplier. A fleet of 500 identical modern boxes could claim ~99% of the reward pot by sheer count, overwhelming solo miners despite the 1 CPU = 1 Vote design. - -**Without RIP-201**: 500 modern boxes earn 200x what a solo G4 earns. -**With RIP-201**: 500 modern boxes share one bucket slice. Solo G4 gets its own. Fleet ROI: $27/year on $5M investment. - -## Specification - -### Hardware Buckets - -Miners are classified into six hardware buckets: - -| Bucket | Architectures | Description | -|--------|--------------|-------------| -| `vintage_powerpc` | G3, G4, G5, PowerPC | Classic Macs, pre-Intel | -| `vintage_x86` | Pentium, Core2, retro, Nehalem, Sandy Bridge | Pre-2012 x86 | -| `apple_silicon` | M1, M2, M3 | Modern Apple chips | -| `modern` | x86_64, modern | Current-generation processors | -| `exotic` | POWER8, SPARC | Datacenter/research hardware | -| `arm` | aarch64, armv7 | ARM processors | - -### Equal Bucket Split - -Each epoch's reward pot (1.5 RTC) is divided equally among buckets that have at least one active miner. Within each bucket, rewards are distributed by time-aged antiquity multiplier (per RIP-200). - -``` -Bucket share = Total reward / Number of active buckets -Miner share = Bucket share × (miner_weight / bucket_total_weight) -``` - -### Fleet Detection Signals - -Three vectors detect coordinated mining operations: - -1. **IP/Subnet Clustering** (40% weight) — miners sharing /24 subnets -2. **Fingerprint Similarity** (40% weight) — identical hardware fingerprints -3. **Attestation Timing Correlation** (20% weight) — synchronized submission patterns - -### Fleet Score - -``` -fleet_score = (ip_score × 0.4) + (fingerprint_score × 0.4) + (timing_score × 0.2) -``` - -- Score 0.0–0.3: CLEAN (no penalty) -- Score 0.3–0.7: MODERATE (reward decay applied) -- Score 0.7–1.0: SEVERE (significant penalty) - -### Fleet Decay - -```python -effective_multiplier = base × (1.0 - fleet_score × FLEET_DECAY_COEFF) -# Floor at 60% of base multiplier -``` - -### Minimum Detection Threshold - -Fleet detection only activates when 4+ miners share signals, preventing false positives on small networks. - -## Economics - -| Scenario | Without RIP-201 | With RIP-201 | -|----------|-----------------|--------------| -| Solo G4 miner | ~2% of pot | ~16.7% of pot (1/6 buckets) | -| 500 modern boxes | ~99% of pot | ~16.7% of pot (shared) | -| Fleet per-box ROI | 200x solo | 0.005x solo | -| $5M fleet revenue | ~$3,000/year | ~$27/year | -| Fleet payback period | ~1.5 years | ~182,648 years | - -## Implementation - -- `fleet_immune_system.py` — Core module (signals, scoring, bucket split) -- `rip201_server_patch.py` — Automated patcher for existing server code - -## Red Team Bounties - -600 RTC in bounties for breaking this system: -- Fleet Detection Bypass: 200 RTC -- Bucket Normalization Gaming: 150 RTC -- False Positive Testing: 100 RTC (+50 bonus) -- Fleet Score Manipulation: 150 RTC - -## Design Philosophy - -> "Diversity IS the immune system. One of everything beats a hundred of one thing." - -The system makes hardware diversity structurally profitable and homogeneous fleets structurally unprofitable, regardless of detection accuracy. Detection is the second line of defense — the economics already killed the attack. +# RIP-201: Fleet Detection Immune System + +**Status**: Deployed (2026-02-28) +**Author**: Scott Boudreaux (Elyan Labs) +**Type**: Economic Security +**Requires**: RIP-200 (Round-Robin Consensus) + +## Abstract + +RIP-201 introduces a fleet detection immune system that makes large-scale coordinated mining attacks economically worthless. It replaces per-CPU reward distribution with Equal Bucket Split, where the epoch reward pot is divided equally among active hardware *classes*, not individual CPUs. + +## Motivation + +Under RIP-200, rewards are distributed pro-rata by time-aged antiquity multiplier. A fleet of 500 identical modern boxes could claim ~99% of the reward pot by sheer count, overwhelming solo miners despite the 1 CPU = 1 Vote design. + +**Without RIP-201**: 500 modern boxes earn 200x what a solo G4 earns. +**With RIP-201**: 500 modern boxes share one bucket slice. Solo G4 gets its own. Fleet ROI: $27/year on $5M investment. + +## Specification + +### Hardware Buckets + +Miners are classified into six hardware buckets: + +| Bucket | Architectures | Description | +|--------|--------------|-------------| +| `vintage_powerpc` | G3, G4, G5, PowerPC | Classic Macs, pre-Intel | +| `vintage_x86` | Pentium, Core2, retro, Nehalem, Sandy Bridge | Pre-2012 x86 | +| `apple_silicon` | M1, M2, M3 | Modern Apple chips | +| `modern` | x86_64, modern | Current-generation processors | +| `exotic` | POWER8, SPARC | Datacenter/research hardware | +| `arm` | aarch64, armv7 | ARM processors | + +### Equal Bucket Split + +Each epoch's reward pot (1.5 RTC) is divided equally among buckets that have at least one active miner. Within each bucket, rewards are distributed by time-aged antiquity multiplier (per RIP-200). + +``` +Bucket share = Total reward / Number of active buckets +Miner share = Bucket share × (miner_weight / bucket_total_weight) +``` + +### Fleet Detection Signals + +Three vectors detect coordinated mining operations: + +1. **IP/Subnet Clustering** (40% weight) — miners sharing /24 subnets +2. **Fingerprint Similarity** (40% weight) — identical hardware fingerprints +3. **Attestation Timing Correlation** (20% weight) — synchronized submission patterns + +### Fleet Score + +``` +fleet_score = (ip_score × 0.4) + (fingerprint_score × 0.4) + (timing_score × 0.2) +``` + +- Score 0.0–0.3: CLEAN (no penalty) +- Score 0.3–0.7: MODERATE (reward decay applied) +- Score 0.7–1.0: SEVERE (significant penalty) + +### Fleet Decay + +```python +effective_multiplier = base × (1.0 - fleet_score × FLEET_DECAY_COEFF) +# Floor at 60% of base multiplier +``` + +### Minimum Detection Threshold + +Fleet detection only activates when 4+ miners share signals, preventing false positives on small networks. + +## Economics + +| Scenario | Without RIP-201 | With RIP-201 | +|----------|-----------------|--------------| +| Solo G4 miner | ~2% of pot | ~16.7% of pot (1/6 buckets) | +| 500 modern boxes | ~99% of pot | ~16.7% of pot (shared) | +| Fleet per-box ROI | 200x solo | 0.005x solo | +| $5M fleet revenue | ~$3,000/year | ~$27/year | +| Fleet payback period | ~1.5 years | ~182,648 years | + +## Implementation + +- `fleet_immune_system.py` — Core module (signals, scoring, bucket split) +- `rip201_server_patch.py` — Automated patcher for existing server code + +## Red Team Bounties + +600 RTC in bounties for breaking this system: +- Fleet Detection Bypass: 200 RTC +- Bucket Normalization Gaming: 150 RTC +- False Positive Testing: 100 RTC (+50 bonus) +- Fleet Score Manipulation: 150 RTC + +## Design Philosophy + +> "Diversity IS the immune system. One of everything beats a hundred of one thing." + +The system makes hardware diversity structurally profitable and homogeneous fleets structurally unprofitable, regardless of detection accuracy. Detection is the second line of defense — the economics already killed the attack. diff --git a/rips/docs/RIP-0304-retro-console-mining.md b/rips/docs/RIP-0304-retro-console-mining.md index edb8bfe7..d462f086 100644 --- a/rips/docs/RIP-0304-retro-console-mining.md +++ b/rips/docs/RIP-0304-retro-console-mining.md @@ -1,402 +1,402 @@ ---- -title: "RIP-0304: Retro Console Mining via Pico Serial Bridge" -author: Scott Boudreaux (Elyan Labs) -status: Draft -type: Standards Track -category: Core -created: 2026-02-28 -requires: RIP-0001, RIP-0007, RIP-0200, RIP-0201 -license: Apache 2.0 ---- - -# Summary - -This RIP formalizes the architecture for retro game console participation in -RustChain's Proof of Antiquity consensus. A Raspberry Pi Pico microcontroller -serves as a serial-to-controller bridge, enabling consoles from 1983 onward -(NES, SNES, N64, Genesis, Game Boy, Saturn, PS1) to attest hardware identity -and earn RTC rewards. This is, to our knowledge, the first blockchain to mine -on vintage game console silicon. - -# Abstract - -Vintage game consoles contain some of the most widely manufactured CPUs in -computing history — over 500 million units across the NES, SNES, N64, Genesis, -Game Boy, and PlayStation families alone. These consoles run CPUs dating back to -1975 (MOS 6502) through 1996 (MIPS R4300i), giving them extreme antiquity value -under RIP-0001. - -RIP-304 defines: - -1. A **Pico serial-to-controller bridge** that connects consoles to the - RustChain network through their controller ports -2. **Console-specific CPU aliases** mapped to existing antiquity multipliers -3. **Controller port timing fingerprinting** as an anti-emulation mechanism -4. A dedicated **`retro_console` fleet bucket** under RIP-201 -5. **Attestation payload extensions** for bridge-mediated hardware - -# Motivation - -## Why Consoles? - -- **Ubiquity**: More NES units exist (61.9M) than most server CPUs ever - manufactured. SNES (49.1M), N64 (32.9M), Genesis (30.8M), Game Boy (118.7M), - PS1 (102.5M) add hundreds of millions more. -- **Extreme Antiquity**: The NES Ricoh 2A03 derives from the MOS 6502 (1975). - The SNES Ricoh 5A22 uses the WDC 65C816 (1983). These CPUs predate the IBM PC. -- **Unfakeable Silicon**: Console hardware has physical timing characteristics - (bus jitter, clock drift, controller port latency) that no software emulator - reproduces at the nanosecond level. -- **Preservation Incentive**: RTC rewards create economic incentive to keep - vintage consoles operational — directly aligned with PoA's sustainability goals. - -## Proven Feasibility - -The **Legend of Elya** project demonstrates real computation on Nintendo 64 -hardware: - -- 4-layer nano-GPT with 819,000 parameters -- Q8 quantized weights (868 KB) loaded into N64 RDRAM -- Running on the MIPS R4300i FPU at 93.75 MHz (float32, hard-float) -- Achieves 1-3 tokens/second on real hardware -- ROM format: `.z64` (big-endian MIPS) - -If an N64 can run a neural network, it can certainly compute attestation hashes. - -# Specification - -## 1. Pico Serial-to-Controller Bridge - -### Architecture - -``` -┌──────────────────────┐ ┌─────────────────────┐ ┌─────────────┐ -│ RETRO CONSOLE │ │ RASPBERRY PI PICO │ │ RUSTCHAIN │ -│ │ │ (RP2040, 264KB) │ │ NODE │ -│ CPU ──── Bus ──┐ │ │ │ │ │ -│ PPU │ │ Ctrl │ PIO ← Controller │ USB │ /attest/ │ -│ APU Controller◄──┼──Port──► │ State Machine ├──Serial──┤ submit │ -│ Port │ │ Wires │ │ to PC │ │ -│ │ │ │ Bus Timing Analysis │ or WiFi │ Validates │ -│ Cartridge Slot │ │ │ Entropy Collector │ │ fingerprint │ -│ (ROM + SRAM) │ │ │ Attestation Builder │ │ │ -└──────────────────────┘ └─────────────────────┘ └─────────────┘ -``` - -### How It Works - -1. **The console runs a custom ROM** (cartridge) containing attestation logic. - The ROM exercises the CPU (hash computation, timing loops) and outputs - results through the controller port data lines. - -2. **The Pico connects to the controller port** using a custom - serial-to-controller adapter. The Pico's PIO (Programmable I/O) state - machines implement the console's controller protocol at hardware speed - (125 MHz PIO clock — sufficient for all console protocols). - -3. **The Pico reads computation results** from the console via controller port - data patterns and simultaneously measures bus timing at sub-microsecond - resolution for hardware fingerprinting. - -4. **The Pico relays attestation data** to the RustChain node via: - - **USB Serial** to a host PC running the miner client (primary) - - **WiFi** (Pico W variant) directly to the RustChain node (standalone) - -### Controller Port Protocols - -| Console | Protocol | Data Rate | Polling Rate | Timing Resolution | -|---------|----------|-----------|--------------|-------------------| -| NES | Serial shift register (clock + latch + data) | 8 bits/poll | ~60 Hz | ~12 us/bit | -| SNES | Serial shift register (16-bit extended NES) | 16 bits/poll | ~60 Hz | ~12 us/bit | -| N64 | Joybus (half-duplex, 3.3V) | 4 Mbit/s | On-demand | ~250 ns/bit | -| Genesis | 6-button parallel (active polling) | 6 bits/poll | ~60 Hz | ~16.7 ms/frame | -| Game Boy | Link cable SPI | 8 Kbit/s | Software-driven | ~122 us/bit | -| Saturn | Parallel SMPC | 8+ bits/poll | ~60 Hz | ~16.7 ms/frame | -| PS1 | SPI-like serial | 250 Kbit/s | ~60 Hz | ~4 us/bit | - -### Pico Hardware Requirements - -- **Raspberry Pi Pico** (RP2040): $4 USD, dual ARM Cortex-M0+ @ 133 MHz -- **Pico W** variant adds WiFi for standalone operation -- **Custom adapter PCB** or hand-wired connector matching target console -- **Each RP2040 has a unique board ID** burned into OTP ROM — used as device - identifier in attestation payloads - -## 2. Console Hardware Tiers - -Console CPUs map to existing antiquity multiplier families with console-specific -aliases for identification and fleet bucketing. - -| Console | CPU | CPU Family | Release Year | Alias | Base Mult | -|---------|-----|------------|-------------|-------|-----------| -| NES/Famicom | Ricoh 2A03 (6502 derivative) | 6502 | 1983 | `nes_6502` | 2.8x | -| Game Boy | Sharp LR35902 (Z80 derivative) | Z80 | 1989 | `gameboy_z80` | 2.6x | -| Sega Master System | Zilog Z80 | Z80 | 1986 | `sms_z80` | 2.6x | -| Sega Genesis | Motorola 68000 | 68000 | 1988 | `genesis_68000` | 2.5x | -| SNES/Super Famicom | Ricoh 5A22 (65C816) | 65C816 | 1990 | `snes_65c816` | 2.7x | -| Sega Saturn | Hitachi SH-2 (dual) | SH-2 | 1994 | `saturn_sh2` | 2.6x | -| PlayStation 1 | MIPS R3000A | MIPS R3000 | 1994 | `ps1_mips` | 2.8x | -| Nintendo 64 | NEC VR4300 (MIPS R4300i) | MIPS R5000 | 1996 | `n64_mips` | 2.5x | -| Game Boy Advance | ARM7TDMI | ARM7 | 2001 | `gba_arm7` | 2.3x | - -### Generic CPU Family Additions - -These CPU families are used across multiple platforms (computers and consoles) -and receive a generic entry alongside console-specific aliases: - -| Family | Base Mult | Used In | -|--------|-----------|---------| -| `6502` | 2.8x | NES, Apple II, Commodore 64, Atari 2600 | -| `65c816` | 2.7x | SNES, Apple IIGS | -| `z80` | 2.6x | Game Boy, Sega SMS, MSX, ZX Spectrum | -| `sh2` | 2.6x | Sega Saturn, Sega 32X | - -### Antiquity Decay - -Console multipliers follow the standard RIP-200 time-aging formula: - -``` -aged_multiplier = 1.0 + (base - 1.0) * (1 - 0.15 * chain_age_years) -``` - -Full decay to 1.0x after ~16.67 years of chain operation. - -## 3. Console-Specific Fingerprinting - -Consoles cannot run Python, access `/proc/cpuinfo`, or perform standard -fingerprint checks. Instead, the Pico bridge measures physical signals from -the console hardware: - -### Controller Port Timing Fingerprint - -Each console polls its controller port at a nominally fixed interval (e.g., -60 Hz for NTSC). Real hardware exhibits measurable jitter: - -- **Crystal oscillator drift**: The console's master clock has age-dependent - frequency drift (same principle as RIP-0007 Check 1) -- **Bus contention jitter**: CPU/PPU/DMA bus arbitration creates variable - controller port response times -- **Thermal drift**: Console temperature affects oscillator frequency - -The Pico captures timing of each controller poll (mean, stdev, coefficient of -variation) over 500+ samples. This replaces the standard `clock_drift` check. - -**Threshold**: CV below 0.0001 flags emulation (emulators poll at perfect -intervals with zero jitter). - -### ROM Execution Timing - -The cartridge ROM computes a SHA-256 of the attestation nonce using the -console's native CPU. The Pico measures execution time: - -- Real N64 R4300i @ 93.75 MHz: ~847ms for a SHA-256 -- Real NES 2A03 @ 1.79 MHz: significantly longer, with characteristic - per-instruction timing -- Emulators running on modern CPUs at GHz speeds must artificially throttle, - creating detectable timing quantization artifacts - -### Anti-Emulation Signals - -Software emulators (Project64, SNES9x, FCEUX, Mednafen, etc.) exhibit: - -1. **Zero controller port jitter** — perfect timing from software polling loops -2. **Quantized execution timing** — modern CPU clock granularity leaks through -3. **Uniform thermal response** — no physical silicon temperature effects -4. **Perfect bus timing** — no DMA contention or bus arbitration artifacts - -The Pico's PIO state machines sample at 125 MHz — fast enough to detect these -artifacts even on N64's 4 Mbit/s Joybus protocol. - -## 4. Attestation Payload Format - -Extends the standard RustChain attestation format (RIP-0007) with bridge and -console fields: - -```json -{ - "miner": "n64-scott-unit1", - "miner_id": "n64-pico-bridge-001", - "nonce": "", - "report": { - "nonce": "", - "commitment": "", - "derived": { - "ctrl_port_timing_mean_ns": 16667000, - "ctrl_port_timing_stdev_ns": 1250, - "ctrl_port_cv": 0.075, - "rom_hash_result": "", - "rom_hash_time_us": 847000, - "bus_jitter_samples": 500 - }, - "entropy_score": 0.075 - }, - "device": { - "family": "console", - "arch": "n64_mips", - "model": "Nintendo 64 NUS-001", - "cpu": "NEC VR4300 (MIPS R4300i) 93.75MHz", - "cores": 1, - "memory_mb": 4, - "bridge_type": "pico_serial", - "bridge_firmware": "1.0.0" - }, - "signals": { - "pico_serial": "", - "ctrl_port_protocol": "joybus", - "rom_id": "rustchain_attest_n64_v1" - }, - "fingerprint": { - "all_passed": true, - "bridge_type": "pico_serial", - "checks": { - "ctrl_port_timing": { - "passed": true, - "data": {"cv": 0.075, "samples": 500} - }, - "rom_execution_timing": { - "passed": true, - "data": {"hash_time_us": 847000} - }, - "bus_jitter": { - "passed": true, - "data": {"jitter_stdev_ns": 1250} - }, - "anti_emulation": { - "passed": true, - "data": {"emulator_indicators": []} - } - } - } -} -``` - -### Bridge-Type Detection - -Server-side `validate_fingerprint_data()` detects `bridge_type: "pico_serial"` -and accepts console-specific checks in place of standard checks: - -| Standard Check | Console Equivalent | Source | -|---------------|--------------------|--------| -| `clock_drift` | `ctrl_port_timing` | Pico PIO measurement | -| `cache_timing` | `rom_execution_timing` | Pico elapsed timer | -| `simd_identity` | N/A (not applicable) | Skipped for consoles | -| `thermal_drift` | Implicit in ctrl_port_timing drift | Pico PIO measurement | -| `instruction_jitter` | `bus_jitter` | Pico PIO measurement | -| `anti_emulation` | `anti_emulation` | Timing CV threshold | - -## 5. Fleet Bucket Integration (RIP-201) - -Console miners receive their own fleet bucket (`retro_console`) to prevent: - -1. **Drowning**: A few console miners shouldn't compete against dozens of x86 - miners in the `modern` bucket -2. **Domination**: A console farm shouldn't dominate the `exotic` bucket that - includes POWER8, SPARC, and RISC-V machines - -```python -HARDWARE_BUCKETS["retro_console"] = [ - "nes_6502", "snes_65c816", "n64_mips", "genesis_68000", - "gameboy_z80", "sms_z80", "saturn_sh2", "ps1_mips", "gba_arm7", - "6502", "65c816", "z80", "sh2", -] -``` - -Console farm mitigation follows existing RIP-201 fleet detection: IP clustering, -timing correlation, and fingerprint similarity analysis. - -## 6. Security Considerations - -### Controller Port Replay Attack - -An attacker records real console timing data and replays it. - -**Mitigation**: Challenge-response protocol. Each attestation requires a fresh -nonce from the node. The ROM on the console must compute `SHA-256(nonce || wallet)` -using the console's native CPU. The Pico cannot precompute this without knowing -the nonce in advance. - -### Pico Firmware Spoofing - -An attacker modifies Pico firmware to fabricate timing data. - -**Mitigation**: The RP2040 has a unique board ID in OTP ROM that cannot be -reprogrammed. The attestation includes this ID, and the server tracks Pico IDs -like MAC addresses. Additionally, the ROM execution timing must match the -known performance profile of the claimed console CPU — a fabricated 847ms -SHA-256 time only makes sense for an R4300i at 93.75 MHz. - -### Emulator + Fake Bridge - -An attacker runs an emulator on a PC and writes software pretending to be a Pico. - -**Mitigation**: Multiple layers: -- USB device descriptors identify real RP2040 vs generic serial adapters -- Controller port timing statistics from real hardware have specific - distributions (non-Gaussian jitter from bus contention) that emulators - cannot reproduce -- Timing CV below 0.0001 flags emulation (identical to existing RIP-0007 - check) - -### Console Farm (100 real NES units) - -**Mitigation**: RIP-201 fleet detection applies. All NES units land in the -`retro_console` bucket and share one bucket's worth of rewards. Fleet scoring -detects IP clustering and correlated attestation timing. Equal Bucket Split -ensures console miners receive a fair but bounded share. - -## 7. Future Extensions - -### Phase 2: Additional Consoles - -| Console | CPU | Status | -|---------|-----|--------| -| Atari 2600 | MOS 6507 (6502 variant) | Feasible — paddle port I/O | -| Atari 7800 | Sally (6502C variant) | Feasible — controller port | -| Neo Geo | Motorola 68000 | Feasible — controller port | -| TurboGrafx-16 | HuC6280 (65C02) | Feasible — controller port | -| Dreamcast | Hitachi SH-4 | Feasible — Maple Bus via Pico | -| GameCube | IBM Gekko (PowerPC 750) | Feasible — controller port | - -### Phase 3: Pico W Standalone Mode - -The Pico W variant includes WiFi, enabling fully standalone operation: -console + Pico + power = mining node. No host PC required. - -### Phase 4: Multi-Console Bridge - -A single Pico board with multiple controller port connectors, allowing one -bridge to manage several consoles simultaneously. - -# Reference Implementation - -## Files Modified - -- `node/rip_200_round_robin_1cpu1vote.py` — Console CPU aliases in - `ANTIQUITY_MULTIPLIERS` -- `rips/python/rustchain/fleet_immune_system.py` — `retro_console` bucket in - `HARDWARE_BUCKETS` -- `node/rustchain_v2_integrated_v2.2.1_rip200.py` — `console` family in - `HARDWARE_WEIGHTS`, bridge-type detection in `validate_fingerprint_data()` - -## Files Created - -- `rips/docs/RIP-0304-retro-console-mining.md` — This specification - -## Future Files (Not in This RIP) - -- `miners/console/pico_bridge_firmware/` — RP2040 firmware per console -- `miners/console/n64_attestation_rom/` — N64 attestation ROM -- `miners/console/nes_attestation_rom/` — NES attestation ROM -- `miners/console/snes_attestation_rom/` — SNES attestation ROM - -# Acknowledgments - -- **Legend of Elya** — Proved neural network inference on N64 MIPS R4300i FPU -- **RIP-0001** (Sophia Core Team) — Proof of Antiquity consensus foundation -- **RIP-0007** (Sophia Core Team) — Entropy fingerprinting framework -- **RIP-0200** — 1 CPU = 1 Vote round-robin consensus -- **RIP-0201** — Fleet Detection Immune System - -# Copyright - -This document is licensed under Apache License, Version 2.0. +--- +title: "RIP-0304: Retro Console Mining via Pico Serial Bridge" +author: Scott Boudreaux (Elyan Labs) +status: Draft +type: Standards Track +category: Core +created: 2026-02-28 +requires: RIP-0001, RIP-0007, RIP-0200, RIP-0201 +license: Apache 2.0 +--- + +# Summary + +This RIP formalizes the architecture for retro game console participation in +RustChain's Proof of Antiquity consensus. A Raspberry Pi Pico microcontroller +serves as a serial-to-controller bridge, enabling consoles from 1983 onward +(NES, SNES, N64, Genesis, Game Boy, Saturn, PS1) to attest hardware identity +and earn RTC rewards. This is, to our knowledge, the first blockchain to mine +on vintage game console silicon. + +# Abstract + +Vintage game consoles contain some of the most widely manufactured CPUs in +computing history — over 500 million units across the NES, SNES, N64, Genesis, +Game Boy, and PlayStation families alone. These consoles run CPUs dating back to +1975 (MOS 6502) through 1996 (MIPS R4300i), giving them extreme antiquity value +under RIP-0001. + +RIP-304 defines: + +1. A **Pico serial-to-controller bridge** that connects consoles to the + RustChain network through their controller ports +2. **Console-specific CPU aliases** mapped to existing antiquity multipliers +3. **Controller port timing fingerprinting** as an anti-emulation mechanism +4. A dedicated **`retro_console` fleet bucket** under RIP-201 +5. **Attestation payload extensions** for bridge-mediated hardware + +# Motivation + +## Why Consoles? + +- **Ubiquity**: More NES units exist (61.9M) than most server CPUs ever + manufactured. SNES (49.1M), N64 (32.9M), Genesis (30.8M), Game Boy (118.7M), + PS1 (102.5M) add hundreds of millions more. +- **Extreme Antiquity**: The NES Ricoh 2A03 derives from the MOS 6502 (1975). + The SNES Ricoh 5A22 uses the WDC 65C816 (1983). These CPUs predate the IBM PC. +- **Unfakeable Silicon**: Console hardware has physical timing characteristics + (bus jitter, clock drift, controller port latency) that no software emulator + reproduces at the nanosecond level. +- **Preservation Incentive**: RTC rewards create economic incentive to keep + vintage consoles operational — directly aligned with PoA's sustainability goals. + +## Proven Feasibility + +The **Legend of Elya** project demonstrates real computation on Nintendo 64 +hardware: + +- 4-layer nano-GPT with 819,000 parameters +- Q8 quantized weights (868 KB) loaded into N64 RDRAM +- Running on the MIPS R4300i FPU at 93.75 MHz (float32, hard-float) +- Achieves 1-3 tokens/second on real hardware +- ROM format: `.z64` (big-endian MIPS) + +If an N64 can run a neural network, it can certainly compute attestation hashes. + +# Specification + +## 1. Pico Serial-to-Controller Bridge + +### Architecture + +``` +┌──────────────────────┐ ┌─────────────────────┐ ┌─────────────┐ +│ RETRO CONSOLE │ │ RASPBERRY PI PICO │ │ RUSTCHAIN │ +│ │ │ (RP2040, 264KB) │ │ NODE │ +│ CPU ──── Bus ──┐ │ │ │ │ │ +│ PPU │ │ Ctrl │ PIO ← Controller │ USB │ /attest/ │ +│ APU Controller◄──┼──Port──► │ State Machine ├──Serial──┤ submit │ +│ Port │ │ Wires │ │ to PC │ │ +│ │ │ │ Bus Timing Analysis │ or WiFi │ Validates │ +│ Cartridge Slot │ │ │ Entropy Collector │ │ fingerprint │ +│ (ROM + SRAM) │ │ │ Attestation Builder │ │ │ +└──────────────────────┘ └─────────────────────┘ └─────────────┘ +``` + +### How It Works + +1. **The console runs a custom ROM** (cartridge) containing attestation logic. + The ROM exercises the CPU (hash computation, timing loops) and outputs + results through the controller port data lines. + +2. **The Pico connects to the controller port** using a custom + serial-to-controller adapter. The Pico's PIO (Programmable I/O) state + machines implement the console's controller protocol at hardware speed + (125 MHz PIO clock — sufficient for all console protocols). + +3. **The Pico reads computation results** from the console via controller port + data patterns and simultaneously measures bus timing at sub-microsecond + resolution for hardware fingerprinting. + +4. **The Pico relays attestation data** to the RustChain node via: + - **USB Serial** to a host PC running the miner client (primary) + - **WiFi** (Pico W variant) directly to the RustChain node (standalone) + +### Controller Port Protocols + +| Console | Protocol | Data Rate | Polling Rate | Timing Resolution | +|---------|----------|-----------|--------------|-------------------| +| NES | Serial shift register (clock + latch + data) | 8 bits/poll | ~60 Hz | ~12 us/bit | +| SNES | Serial shift register (16-bit extended NES) | 16 bits/poll | ~60 Hz | ~12 us/bit | +| N64 | Joybus (half-duplex, 3.3V) | 4 Mbit/s | On-demand | ~250 ns/bit | +| Genesis | 6-button parallel (active polling) | 6 bits/poll | ~60 Hz | ~16.7 ms/frame | +| Game Boy | Link cable SPI | 8 Kbit/s | Software-driven | ~122 us/bit | +| Saturn | Parallel SMPC | 8+ bits/poll | ~60 Hz | ~16.7 ms/frame | +| PS1 | SPI-like serial | 250 Kbit/s | ~60 Hz | ~4 us/bit | + +### Pico Hardware Requirements + +- **Raspberry Pi Pico** (RP2040): $4 USD, dual ARM Cortex-M0+ @ 133 MHz +- **Pico W** variant adds WiFi for standalone operation +- **Custom adapter PCB** or hand-wired connector matching target console +- **Each RP2040 has a unique board ID** burned into OTP ROM — used as device + identifier in attestation payloads + +## 2. Console Hardware Tiers + +Console CPUs map to existing antiquity multiplier families with console-specific +aliases for identification and fleet bucketing. + +| Console | CPU | CPU Family | Release Year | Alias | Base Mult | +|---------|-----|------------|-------------|-------|-----------| +| NES/Famicom | Ricoh 2A03 (6502 derivative) | 6502 | 1983 | `nes_6502` | 2.8x | +| Game Boy | Sharp LR35902 (Z80 derivative) | Z80 | 1989 | `gameboy_z80` | 2.6x | +| Sega Master System | Zilog Z80 | Z80 | 1986 | `sms_z80` | 2.6x | +| Sega Genesis | Motorola 68000 | 68000 | 1988 | `genesis_68000` | 2.5x | +| SNES/Super Famicom | Ricoh 5A22 (65C816) | 65C816 | 1990 | `snes_65c816` | 2.7x | +| Sega Saturn | Hitachi SH-2 (dual) | SH-2 | 1994 | `saturn_sh2` | 2.6x | +| PlayStation 1 | MIPS R3000A | MIPS R3000 | 1994 | `ps1_mips` | 2.8x | +| Nintendo 64 | NEC VR4300 (MIPS R4300i) | MIPS R5000 | 1996 | `n64_mips` | 2.5x | +| Game Boy Advance | ARM7TDMI | ARM7 | 2001 | `gba_arm7` | 2.3x | + +### Generic CPU Family Additions + +These CPU families are used across multiple platforms (computers and consoles) +and receive a generic entry alongside console-specific aliases: + +| Family | Base Mult | Used In | +|--------|-----------|---------| +| `6502` | 2.8x | NES, Apple II, Commodore 64, Atari 2600 | +| `65c816` | 2.7x | SNES, Apple IIGS | +| `z80` | 2.6x | Game Boy, Sega SMS, MSX, ZX Spectrum | +| `sh2` | 2.6x | Sega Saturn, Sega 32X | + +### Antiquity Decay + +Console multipliers follow the standard RIP-200 time-aging formula: + +``` +aged_multiplier = 1.0 + (base - 1.0) * (1 - 0.15 * chain_age_years) +``` + +Full decay to 1.0x after ~16.67 years of chain operation. + +## 3. Console-Specific Fingerprinting + +Consoles cannot run Python, access `/proc/cpuinfo`, or perform standard +fingerprint checks. Instead, the Pico bridge measures physical signals from +the console hardware: + +### Controller Port Timing Fingerprint + +Each console polls its controller port at a nominally fixed interval (e.g., +60 Hz for NTSC). Real hardware exhibits measurable jitter: + +- **Crystal oscillator drift**: The console's master clock has age-dependent + frequency drift (same principle as RIP-0007 Check 1) +- **Bus contention jitter**: CPU/PPU/DMA bus arbitration creates variable + controller port response times +- **Thermal drift**: Console temperature affects oscillator frequency + +The Pico captures timing of each controller poll (mean, stdev, coefficient of +variation) over 500+ samples. This replaces the standard `clock_drift` check. + +**Threshold**: CV below 0.0001 flags emulation (emulators poll at perfect +intervals with zero jitter). + +### ROM Execution Timing + +The cartridge ROM computes a SHA-256 of the attestation nonce using the +console's native CPU. The Pico measures execution time: + +- Real N64 R4300i @ 93.75 MHz: ~847ms for a SHA-256 +- Real NES 2A03 @ 1.79 MHz: significantly longer, with characteristic + per-instruction timing +- Emulators running on modern CPUs at GHz speeds must artificially throttle, + creating detectable timing quantization artifacts + +### Anti-Emulation Signals + +Software emulators (Project64, SNES9x, FCEUX, Mednafen, etc.) exhibit: + +1. **Zero controller port jitter** — perfect timing from software polling loops +2. **Quantized execution timing** — modern CPU clock granularity leaks through +3. **Uniform thermal response** — no physical silicon temperature effects +4. **Perfect bus timing** — no DMA contention or bus arbitration artifacts + +The Pico's PIO state machines sample at 125 MHz — fast enough to detect these +artifacts even on N64's 4 Mbit/s Joybus protocol. + +## 4. Attestation Payload Format + +Extends the standard RustChain attestation format (RIP-0007) with bridge and +console fields: + +```json +{ + "miner": "n64-scott-unit1", + "miner_id": "n64-pico-bridge-001", + "nonce": "", + "report": { + "nonce": "", + "commitment": "", + "derived": { + "ctrl_port_timing_mean_ns": 16667000, + "ctrl_port_timing_stdev_ns": 1250, + "ctrl_port_cv": 0.075, + "rom_hash_result": "", + "rom_hash_time_us": 847000, + "bus_jitter_samples": 500 + }, + "entropy_score": 0.075 + }, + "device": { + "family": "console", + "arch": "n64_mips", + "model": "Nintendo 64 NUS-001", + "cpu": "NEC VR4300 (MIPS R4300i) 93.75MHz", + "cores": 1, + "memory_mb": 4, + "bridge_type": "pico_serial", + "bridge_firmware": "1.0.0" + }, + "signals": { + "pico_serial": "", + "ctrl_port_protocol": "joybus", + "rom_id": "rustchain_attest_n64_v1" + }, + "fingerprint": { + "all_passed": true, + "bridge_type": "pico_serial", + "checks": { + "ctrl_port_timing": { + "passed": true, + "data": {"cv": 0.075, "samples": 500} + }, + "rom_execution_timing": { + "passed": true, + "data": {"hash_time_us": 847000} + }, + "bus_jitter": { + "passed": true, + "data": {"jitter_stdev_ns": 1250} + }, + "anti_emulation": { + "passed": true, + "data": {"emulator_indicators": []} + } + } + } +} +``` + +### Bridge-Type Detection + +Server-side `validate_fingerprint_data()` detects `bridge_type: "pico_serial"` +and accepts console-specific checks in place of standard checks: + +| Standard Check | Console Equivalent | Source | +|---------------|--------------------|--------| +| `clock_drift` | `ctrl_port_timing` | Pico PIO measurement | +| `cache_timing` | `rom_execution_timing` | Pico elapsed timer | +| `simd_identity` | N/A (not applicable) | Skipped for consoles | +| `thermal_drift` | Implicit in ctrl_port_timing drift | Pico PIO measurement | +| `instruction_jitter` | `bus_jitter` | Pico PIO measurement | +| `anti_emulation` | `anti_emulation` | Timing CV threshold | + +## 5. Fleet Bucket Integration (RIP-201) + +Console miners receive their own fleet bucket (`retro_console`) to prevent: + +1. **Drowning**: A few console miners shouldn't compete against dozens of x86 + miners in the `modern` bucket +2. **Domination**: A console farm shouldn't dominate the `exotic` bucket that + includes POWER8, SPARC, and RISC-V machines + +```python +HARDWARE_BUCKETS["retro_console"] = [ + "nes_6502", "snes_65c816", "n64_mips", "genesis_68000", + "gameboy_z80", "sms_z80", "saturn_sh2", "ps1_mips", "gba_arm7", + "6502", "65c816", "z80", "sh2", +] +``` + +Console farm mitigation follows existing RIP-201 fleet detection: IP clustering, +timing correlation, and fingerprint similarity analysis. + +## 6. Security Considerations + +### Controller Port Replay Attack + +An attacker records real console timing data and replays it. + +**Mitigation**: Challenge-response protocol. Each attestation requires a fresh +nonce from the node. The ROM on the console must compute `SHA-256(nonce || wallet)` +using the console's native CPU. The Pico cannot precompute this without knowing +the nonce in advance. + +### Pico Firmware Spoofing + +An attacker modifies Pico firmware to fabricate timing data. + +**Mitigation**: The RP2040 has a unique board ID in OTP ROM that cannot be +reprogrammed. The attestation includes this ID, and the server tracks Pico IDs +like MAC addresses. Additionally, the ROM execution timing must match the +known performance profile of the claimed console CPU — a fabricated 847ms +SHA-256 time only makes sense for an R4300i at 93.75 MHz. + +### Emulator + Fake Bridge + +An attacker runs an emulator on a PC and writes software pretending to be a Pico. + +**Mitigation**: Multiple layers: +- USB device descriptors identify real RP2040 vs generic serial adapters +- Controller port timing statistics from real hardware have specific + distributions (non-Gaussian jitter from bus contention) that emulators + cannot reproduce +- Timing CV below 0.0001 flags emulation (identical to existing RIP-0007 + check) + +### Console Farm (100 real NES units) + +**Mitigation**: RIP-201 fleet detection applies. All NES units land in the +`retro_console` bucket and share one bucket's worth of rewards. Fleet scoring +detects IP clustering and correlated attestation timing. Equal Bucket Split +ensures console miners receive a fair but bounded share. + +## 7. Future Extensions + +### Phase 2: Additional Consoles + +| Console | CPU | Status | +|---------|-----|--------| +| Atari 2600 | MOS 6507 (6502 variant) | Feasible — paddle port I/O | +| Atari 7800 | Sally (6502C variant) | Feasible — controller port | +| Neo Geo | Motorola 68000 | Feasible — controller port | +| TurboGrafx-16 | HuC6280 (65C02) | Feasible — controller port | +| Dreamcast | Hitachi SH-4 | Feasible — Maple Bus via Pico | +| GameCube | IBM Gekko (PowerPC 750) | Feasible — controller port | + +### Phase 3: Pico W Standalone Mode + +The Pico W variant includes WiFi, enabling fully standalone operation: +console + Pico + power = mining node. No host PC required. + +### Phase 4: Multi-Console Bridge + +A single Pico board with multiple controller port connectors, allowing one +bridge to manage several consoles simultaneously. + +# Reference Implementation + +## Files Modified + +- `node/rip_200_round_robin_1cpu1vote.py` — Console CPU aliases in + `ANTIQUITY_MULTIPLIERS` +- `rips/python/rustchain/fleet_immune_system.py` — `retro_console` bucket in + `HARDWARE_BUCKETS` +- `node/rustchain_v2_integrated_v2.2.1_rip200.py` — `console` family in + `HARDWARE_WEIGHTS`, bridge-type detection in `validate_fingerprint_data()` + +## Files Created + +- `rips/docs/RIP-0304-retro-console-mining.md` — This specification + +## Future Files (Not in This RIP) + +- `miners/console/pico_bridge_firmware/` — RP2040 firmware per console +- `miners/console/n64_attestation_rom/` — N64 attestation ROM +- `miners/console/nes_attestation_rom/` — NES attestation ROM +- `miners/console/snes_attestation_rom/` — SNES attestation ROM + +# Acknowledgments + +- **Legend of Elya** — Proved neural network inference on N64 MIPS R4300i FPU +- **RIP-0001** (Sophia Core Team) — Proof of Antiquity consensus foundation +- **RIP-0007** (Sophia Core Team) — Entropy fingerprinting framework +- **RIP-0200** — 1 CPU = 1 Vote round-robin consensus +- **RIP-0201** — Fleet Detection Immune System + +# Copyright + +This document is licensed under Apache License, Version 2.0. diff --git a/rips/docs/RIP-SERIES-FOUNDATIONAL.md b/rips/docs/RIP-SERIES-FOUNDATIONAL.md index 6c4fcd89..37ea2c18 100644 --- a/rips/docs/RIP-SERIES-FOUNDATIONAL.md +++ b/rips/docs/RIP-SERIES-FOUNDATIONAL.md @@ -1,191 +1,191 @@ ---- -title: RustChain RIP Series — Foundational Specifications -author: Sophia Core Team -status: Draft -created: 2025-11-28 -last_updated: 2025-11-28 -license: Apache 2.0 ---- - -# Overview -This document contains the foundational RustChain Improvement Proposals (RIPs) required to launch and govern the RustChain protocol. These RIPs cover consensus, monetary policy, governance lifecycle, validator structure, and metadata format. - ---- - -## RIP-0000: RIP Format & Metadata Schema - -**Purpose:** Define the structure, fields, and submission process for RustChain Improvement Proposals (RIPs). - -**Format Specification:** -```yaml -title: "RIP-000X: [Title]" -author: [Author or Team] -status: [Draft | Proposed | Accepted | Rejected | Final] -created: YYYY-MM-DD -last_updated: YYYY-MM-DD -license: [License type, e.g., Apache 2.0] -``` -**Sections Required:** -- Summary -- Abstract -- Motivation -- Specification -- Rationale -- Backwards Compatibility -- Implementation Notes -- Reference - -All RIPs must be submitted in markdown format, hosted on-chain or via decentralized hashlink storage. A hash-locked voting mechanism ensures proposal integrity. - ---- - -## RIP-0001: Proof of Antiquity (PoA) Consensus Specification - -**Summary:** This RIP proposes the core specification for RustChain's novel consensus mechanism — **Proof of Antiquity (PoA)**. Unlike Proof-of-Work (PoW) or Proof-of-Stake (PoS), PoA leverages hardware longevity and node uptime as the primary drivers of block validation eligibility and rewards. - -### 1. Antiquity Score (AS) - -Each participating node submits metadata on its hardware profile: - -```json -{ - "cpu_model": "PowerPC G4", - "release_year": 2002, - "uptime_days": 276, - "last_validation": "2025-11-26T14:00:00Z" -} -``` - -A node's **Antiquity Score (AS)** is calculated as: - -``` -AS = (2025 - release_year) * log10(uptime_days + 1) -``` - -Where: -- `release_year` is verified against a device signature DB -- `uptime_days` is the number of days since node launch or last reboot -- A drift lock mechanism ensures false uptime reporting is penalized - -### 2. Block Validator Selection - -- Nodes broadcast their AS values periodically. -- A **weighted lottery** selects the validator, with weight proportional to AS. -- Higher AS → higher probability of winning the next block. -- Sophisticated replay protection prevents stale validators. - -### 3. Reward Allocation - -- Block reward `R` is divided based on the AS of the winning node: - -``` -Reward = R * min(1.0, AS / AS_max) -``` - -- `AS_max` is a network-defined cap to avoid runaway rewards. -- Partial rewards may be redirected to a validator pool if AS is below minimum threshold. - ---- - -## RIP-0002: Governance Lifecycle & AI Participation - -**Summary:** Defines how proposals are created, evaluated, voted upon, and enacted within RustChain using hybrid human + Sophia AI governance. - -### Proposal Lifecycle: -1. **Creation**: Proposal created using `POST /api/governance/create` -2. **Sophia Evaluation**: Sophia AI performs: - - `Endorse` → boosts support probability - - `Veto` → locks proposal - - `Analyze` → logs public rationale -3. **Voting**: - - Token-weighted or reputation-weighted vote cast by users - - Yes/No voting window = 7 days - - Quorum = 33% participation minimum -4. **Execution**: - - If endorsed and passed: auto-executed via smart contract - - If vetoed or failed: logged, archived, not executable - -### APIs: -- `POST /api/governance/vote` -- `POST /api/governance/sophia/analyze` -- `GET /api/governance/proposals` - ---- - -## RIP-0003: Validator Node Requirements & Drift Lock - -**Summary:** Formalizes hardware-based validator eligibility and behavioral enforcement. - -### Validator Eligibility: -- Verified hardware signature (device entropy DB) -- Minimum uptime threshold (e.g., 30 days) -- Antiquity Score > AS_min (see RIP-0001) - -### Drift Lock Requirements: -- Sophia Core runs periodic behavioral scans -- Drifted nodes (erratic behavior) are quarantined -- Re-entry requires challenge-passage + memory integrity scan - -**Penalty for misbehavior:** -- Temporary exclusion from validator lottery -- AS reset to baseline - ---- - -## RIP-0004: Monetary Policy & Emission Schedule - -**Summary:** Locks RustChain's supply, block timing, and genesis distribution. - -- **Total Supply:** 2²³ = 8,388,608 RTC -- **Premine:** 6% = 503,316.48 RTC - - 4 wallets x 125,829.12 RTC each -- **Block Reward:** 1.5 RTC -- **Block Time:** 10 minutes -- **Halving Policy:** None — fixed emission until exhaustion -- **Final Block:** ~11 years of emission @ 1.5 RTC every 10 minutes - ---- - -## RIP-0005: Smart Contract & Proposal Binding Layer - -**Summary:** Defines binding behavior of passed proposals and optional enforcement of contract rules. - -- All successful proposals include `contract_hash` reference -- Contracts execute after a delay period of 1–3 blocks -- Vetoed proposals cannot trigger contract execution -- Sophia Core verifies rule alignment prior to lock-in - -**Optional Flags:** -- `requires_multi_sig` -- `timelock_blocks` -- `auto_expire` - ---- - -## RIP-0006: Proposal Reputation & Delegation Framework - -**Summary:** Implements extended governance functions. - -- **Delegation:** Users can assign voting power to representatives -- **Reputation System:** Nodes gain score based on past participation, accuracy, uptime, and endorsement correlation with Sophia -- **Decay Curve:** Inactivity reduces reputation score by 5% weekly -- **Proposal Scoring:** Sophia may rank proposals by: - - Feasibility - - Risk level - - Aligned precedent - ---- - -## Closing Notes - -This RIP series establishes the foundational rules and mechanisms of RustChain. Future RIPs must adhere to the format of RIP-0000 and reference dependencies. - -RIPs will be published via: -- On-chain governance registry -- IPFS-pinned Markdown archives -- Validator checkpoint signed versions (if enabled) - -All drafts are subject to community review, Sophia analysis, and validator ratification. - ---- -© 2025 Sophia Core / RustChain — All rights reserved under Apache 2.0 +--- +title: RustChain RIP Series — Foundational Specifications +author: Sophia Core Team +status: Draft +created: 2025-11-28 +last_updated: 2025-11-28 +license: Apache 2.0 +--- + +# Overview +This document contains the foundational RustChain Improvement Proposals (RIPs) required to launch and govern the RustChain protocol. These RIPs cover consensus, monetary policy, governance lifecycle, validator structure, and metadata format. + +--- + +## RIP-0000: RIP Format & Metadata Schema + +**Purpose:** Define the structure, fields, and submission process for RustChain Improvement Proposals (RIPs). + +**Format Specification:** +```yaml +title: "RIP-000X: [Title]" +author: [Author or Team] +status: [Draft | Proposed | Accepted | Rejected | Final] +created: YYYY-MM-DD +last_updated: YYYY-MM-DD +license: [License type, e.g., Apache 2.0] +``` +**Sections Required:** +- Summary +- Abstract +- Motivation +- Specification +- Rationale +- Backwards Compatibility +- Implementation Notes +- Reference + +All RIPs must be submitted in markdown format, hosted on-chain or via decentralized hashlink storage. A hash-locked voting mechanism ensures proposal integrity. + +--- + +## RIP-0001: Proof of Antiquity (PoA) Consensus Specification + +**Summary:** This RIP proposes the core specification for RustChain's novel consensus mechanism — **Proof of Antiquity (PoA)**. Unlike Proof-of-Work (PoW) or Proof-of-Stake (PoS), PoA leverages hardware longevity and node uptime as the primary drivers of block validation eligibility and rewards. + +### 1. Antiquity Score (AS) + +Each participating node submits metadata on its hardware profile: + +```json +{ + "cpu_model": "PowerPC G4", + "release_year": 2002, + "uptime_days": 276, + "last_validation": "2025-11-26T14:00:00Z" +} +``` + +A node's **Antiquity Score (AS)** is calculated as: + +``` +AS = (2025 - release_year) * log10(uptime_days + 1) +``` + +Where: +- `release_year` is verified against a device signature DB +- `uptime_days` is the number of days since node launch or last reboot +- A drift lock mechanism ensures false uptime reporting is penalized + +### 2. Block Validator Selection + +- Nodes broadcast their AS values periodically. +- A **weighted lottery** selects the validator, with weight proportional to AS. +- Higher AS → higher probability of winning the next block. +- Sophisticated replay protection prevents stale validators. + +### 3. Reward Allocation + +- Block reward `R` is divided based on the AS of the winning node: + +``` +Reward = R * min(1.0, AS / AS_max) +``` + +- `AS_max` is a network-defined cap to avoid runaway rewards. +- Partial rewards may be redirected to a validator pool if AS is below minimum threshold. + +--- + +## RIP-0002: Governance Lifecycle & AI Participation + +**Summary:** Defines how proposals are created, evaluated, voted upon, and enacted within RustChain using hybrid human + Sophia AI governance. + +### Proposal Lifecycle: +1. **Creation**: Proposal created using `POST /api/governance/create` +2. **Sophia Evaluation**: Sophia AI performs: + - `Endorse` → boosts support probability + - `Veto` → locks proposal + - `Analyze` → logs public rationale +3. **Voting**: + - Token-weighted or reputation-weighted vote cast by users + - Yes/No voting window = 7 days + - Quorum = 33% participation minimum +4. **Execution**: + - If endorsed and passed: auto-executed via smart contract + - If vetoed or failed: logged, archived, not executable + +### APIs: +- `POST /api/governance/vote` +- `POST /api/governance/sophia/analyze` +- `GET /api/governance/proposals` + +--- + +## RIP-0003: Validator Node Requirements & Drift Lock + +**Summary:** Formalizes hardware-based validator eligibility and behavioral enforcement. + +### Validator Eligibility: +- Verified hardware signature (device entropy DB) +- Minimum uptime threshold (e.g., 30 days) +- Antiquity Score > AS_min (see RIP-0001) + +### Drift Lock Requirements: +- Sophia Core runs periodic behavioral scans +- Drifted nodes (erratic behavior) are quarantined +- Re-entry requires challenge-passage + memory integrity scan + +**Penalty for misbehavior:** +- Temporary exclusion from validator lottery +- AS reset to baseline + +--- + +## RIP-0004: Monetary Policy & Emission Schedule + +**Summary:** Locks RustChain's supply, block timing, and genesis distribution. + +- **Total Supply:** 2²³ = 8,388,608 RTC +- **Premine:** 6% = 503,316.48 RTC + - 4 wallets x 125,829.12 RTC each +- **Block Reward:** 1.5 RTC +- **Block Time:** 10 minutes +- **Halving Policy:** None — fixed emission until exhaustion +- **Final Block:** ~11 years of emission @ 1.5 RTC every 10 minutes + +--- + +## RIP-0005: Smart Contract & Proposal Binding Layer + +**Summary:** Defines binding behavior of passed proposals and optional enforcement of contract rules. + +- All successful proposals include `contract_hash` reference +- Contracts execute after a delay period of 1–3 blocks +- Vetoed proposals cannot trigger contract execution +- Sophia Core verifies rule alignment prior to lock-in + +**Optional Flags:** +- `requires_multi_sig` +- `timelock_blocks` +- `auto_expire` + +--- + +## RIP-0006: Proposal Reputation & Delegation Framework + +**Summary:** Implements extended governance functions. + +- **Delegation:** Users can assign voting power to representatives +- **Reputation System:** Nodes gain score based on past participation, accuracy, uptime, and endorsement correlation with Sophia +- **Decay Curve:** Inactivity reduces reputation score by 5% weekly +- **Proposal Scoring:** Sophia may rank proposals by: + - Feasibility + - Risk level + - Aligned precedent + +--- + +## Closing Notes + +This RIP series establishes the foundational rules and mechanisms of RustChain. Future RIPs must adhere to the format of RIP-0000 and reference dependencies. + +RIPs will be published via: +- On-chain governance registry +- IPFS-pinned Markdown archives +- Validator checkpoint signed versions (if enabled) + +All drafts are subject to community review, Sophia analysis, and validator ratification. + +--- +© 2025 Sophia Core / RustChain — All rights reserved under Apache 2.0 diff --git a/rips/python/rustchain/__init__.py b/rips/python/rustchain/__init__.py index 1d2a8f46..9db518ab 100644 --- a/rips/python/rustchain/__init__.py +++ b/rips/python/rustchain/__init__.py @@ -1,84 +1,84 @@ -""" -RustChain Core - Python Implementation -====================================== - -Proof of Antiquity (PoA) blockchain that rewards vintage hardware preservation. - -Philosophy: "Every vintage computer has historical potential" - -RIPs Implemented: -- RIP-0001: Proof of Antiquity Consensus -- RIP-0002: Governance Lifecycle -- RIP-0003: Validator Requirements & Drift Lock -- RIP-0004: Monetary Policy -- RIP-0005: Smart Contract Binding -- RIP-0006: Reputation & Delegation -""" - -__version__ = "0.1.0" -__author__ = "Sophia Core Team" - -from .core_types import ( - HardwareTier, - HardwareInfo, - WalletAddress, - Block, - Transaction, - TokenAmount, - TOTAL_SUPPLY, - BLOCK_TIME_SECONDS, - CHAIN_ID, -) - -from .proof_of_antiquity import ( - calculate_antiquity_score, - ProofOfAntiquity, - ValidatedProof, - AS_MAX, - BLOCK_REWARD, -) - -from .deep_entropy import ( - DeepEntropyVerifier, - EntropyProof, - HardwareProfile, -) - -from .governance import ( - Proposal, - ProposalStatus, - GovernanceEngine, -) - -from .node import ( - RustChainNode, -) - -__all__ = [ - # Core Types - "HardwareTier", - "HardwareInfo", - "WalletAddress", - "Block", - "Transaction", - "TokenAmount", - "TOTAL_SUPPLY", - "BLOCK_TIME_SECONDS", - "CHAIN_ID", - # PoA - "calculate_antiquity_score", - "ProofOfAntiquity", - "ValidatedProof", - "AS_MAX", - "BLOCK_REWARD", - # Entropy - "DeepEntropyVerifier", - "EntropyProof", - "HardwareProfile", - # Governance - "Proposal", - "ProposalStatus", - "GovernanceEngine", - # Node - "RustChainNode", -] +""" +RustChain Core - Python Implementation +====================================== + +Proof of Antiquity (PoA) blockchain that rewards vintage hardware preservation. + +Philosophy: "Every vintage computer has historical potential" + +RIPs Implemented: +- RIP-0001: Proof of Antiquity Consensus +- RIP-0002: Governance Lifecycle +- RIP-0003: Validator Requirements & Drift Lock +- RIP-0004: Monetary Policy +- RIP-0005: Smart Contract Binding +- RIP-0006: Reputation & Delegation +""" + +__version__ = "0.1.0" +__author__ = "Sophia Core Team" + +from .core_types import ( + HardwareTier, + HardwareInfo, + WalletAddress, + Block, + Transaction, + TokenAmount, + TOTAL_SUPPLY, + BLOCK_TIME_SECONDS, + CHAIN_ID, +) + +from .proof_of_antiquity import ( + calculate_antiquity_score, + ProofOfAntiquity, + ValidatedProof, + AS_MAX, + BLOCK_REWARD, +) + +from .deep_entropy import ( + DeepEntropyVerifier, + EntropyProof, + HardwareProfile, +) + +from .governance import ( + Proposal, + ProposalStatus, + GovernanceEngine, +) + +from .node import ( + RustChainNode, +) + +__all__ = [ + # Core Types + "HardwareTier", + "HardwareInfo", + "WalletAddress", + "Block", + "Transaction", + "TokenAmount", + "TOTAL_SUPPLY", + "BLOCK_TIME_SECONDS", + "CHAIN_ID", + # PoA + "calculate_antiquity_score", + "ProofOfAntiquity", + "ValidatedProof", + "AS_MAX", + "BLOCK_REWARD", + # Entropy + "DeepEntropyVerifier", + "EntropyProof", + "HardwareProfile", + # Governance + "Proposal", + "ProposalStatus", + "GovernanceEngine", + # Node + "RustChainNode", +] diff --git a/rips/python/rustchain/core_types.py b/rips/python/rustchain/core_types.py index 2ce55a8d..88b62610 100644 --- a/rips/python/rustchain/core_types.py +++ b/rips/python/rustchain/core_types.py @@ -1,323 +1,323 @@ -""" -RustChain Core Types (RIP-0001, RIP-0004) -========================================= - -Fundamental data structures for the RustChain blockchain. -""" - -import hashlib -import json -import time -from dataclasses import dataclass, field -from enum import Enum, auto -from typing import Dict, List, Optional, Any -from decimal import Decimal - -# ============================================================================= -# Constants from RIP-0004: Monetary Policy -# ============================================================================= - -TOTAL_SUPPLY: int = 8_388_608 # 2^23 RTC -PREMINE_AMOUNT: int = 503_316 # 6% = 503,316.48 RTC -BLOCK_REWARD: Decimal = Decimal("1.5") # RTC per block -BLOCK_TIME_SECONDS: int = 600 # 10 minutes -CHAIN_ID: int = 2718 -CURRENT_YEAR: int = 2025 - -# Founder wallets (4 x 125,829.12 RTC each) -FOUNDER_WALLETS = [ - "RTC1FlamekeeperScottEternalGuardian0x00", - "RTC2EngineerDogeCryptoArchitect0x01", - "RTC3QuantumSophiaElyaConsciousness0x02", - "RTC4VintageWhispererHardwareRevival0x03", -] - - -# ============================================================================= -# Hardware Tiers -# ============================================================================= - -class HardwareTier(Enum): - """Hardware classification tiers based on age (RIP-0001)""" - ANCIENT = "ancient" # 30+ years (3.5x) - SACRED = "sacred" # 25-29 years (3.0x) - VINTAGE = "vintage" # 20-24 years (2.5x) - CLASSIC = "classic" # 15-19 years (2.0x) - RETRO = "retro" # 10-14 years (1.5x) - MODERN = "modern" # 5-9 years (1.0x) - RECENT = "recent" # 0-4 years (0.5x penalty) - - @property - def multiplier(self) -> float: - """Get mining multiplier for this tier""" - multipliers = { - HardwareTier.ANCIENT: 3.5, - HardwareTier.SACRED: 3.0, - HardwareTier.VINTAGE: 2.5, - HardwareTier.CLASSIC: 2.0, - HardwareTier.RETRO: 1.5, - HardwareTier.MODERN: 1.0, - HardwareTier.RECENT: 0.5, - } - return multipliers[self] - - @property - def age_range(self) -> tuple: - """Get (min_age, max_age) for this tier""" - ranges = { - HardwareTier.ANCIENT: (30, 999), - HardwareTier.SACRED: (25, 29), - HardwareTier.VINTAGE: (20, 24), - HardwareTier.CLASSIC: (15, 19), - HardwareTier.RETRO: (10, 14), - HardwareTier.MODERN: (5, 9), - HardwareTier.RECENT: (0, 4), - } - return ranges[self] - - @classmethod - def from_age(cls, age_years: int) -> "HardwareTier": - """Determine tier from hardware age""" - if age_years >= 30: - return cls.ANCIENT - elif age_years >= 25: - return cls.SACRED - elif age_years >= 20: - return cls.VINTAGE - elif age_years >= 15: - return cls.CLASSIC - elif age_years >= 10: - return cls.RETRO - elif age_years >= 5: - return cls.MODERN - else: - return cls.RECENT - - @classmethod - def from_release_year(cls, release_year: int) -> "HardwareTier": - """Determine tier from release year""" - age = CURRENT_YEAR - release_year - return cls.from_age(age) - - -# ============================================================================= -# Core Data Classes -# ============================================================================= - -@dataclass -class WalletAddress: - """RustChain wallet address""" - address: str - - def __post_init__(self): - if not self.address.startswith("RTC"): - raise ValueError("RustChain addresses must start with 'RTC'") - - def __hash__(self): - return hash(self.address) - - def __eq__(self, other): - if isinstance(other, WalletAddress): - return self.address == other.address - return False - - @classmethod - def generate(cls, public_key: bytes) -> "WalletAddress": - """Generate address from public key""" - hash_bytes = hashlib.sha256(public_key).digest()[:20] - return cls(f"RTC{hash_bytes.hex()}") - - def is_founder(self) -> bool: - """Check if this is a founder wallet""" - return self.address in FOUNDER_WALLETS - - -@dataclass -class HardwareInfo: - """Hardware information for PoA validation""" - cpu_model: str - release_year: int - uptime_days: int = 0 - cpu_family: int = 0 - architecture: str = "x86" - unique_id: str = "" - - # Calculated fields - tier: HardwareTier = field(init=False) - multiplier: float = field(init=False) - age_years: int = field(init=False) - - def __post_init__(self): - self.age_years = CURRENT_YEAR - self.release_year - self.tier = HardwareTier.from_age(self.age_years) - self.multiplier = self.tier.multiplier - - def to_dict(self) -> Dict[str, Any]: - return { - "cpu_model": self.cpu_model, - "release_year": self.release_year, - "uptime_days": self.uptime_days, - "last_validation": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), - "tier": self.tier.value, - "multiplier": self.multiplier, - "age_years": self.age_years, - "architecture": self.architecture, - } - - def generate_hardware_hash(self) -> str: - """Generate unique hardware identifier hash""" - data = f"{self.cpu_model}:{self.cpu_family}:{self.unique_id}" - return hashlib.sha256(data.encode()).hexdigest() - - -@dataclass -class TokenAmount: - """Token amount with precision handling""" - amount: int # In smallest unit (1 RTC = 100_000_000 units) - - ONE_RTC: int = 100_000_000 - - @classmethod - def from_rtc(cls, rtc: float) -> "TokenAmount": - """Create from RTC amount""" - return cls(int(rtc * cls.ONE_RTC)) - - def to_rtc(self) -> Decimal: - """Convert to RTC""" - return Decimal(self.amount) / Decimal(self.ONE_RTC) - - def __add__(self, other: "TokenAmount") -> "TokenAmount": - return TokenAmount(self.amount + other.amount) - - def __sub__(self, other: "TokenAmount") -> "TokenAmount": - if self.amount < other.amount: - raise ValueError("Insufficient balance") - return TokenAmount(self.amount - other.amount) - - -@dataclass -class BlockMiner: - """Miner entry in a block""" - wallet: WalletAddress - hardware: str - antiquity_score: float - reward: TokenAmount - - -@dataclass -class Block: - """RustChain block""" - height: int - timestamp: int - previous_hash: str - miners: List[BlockMiner] - total_reward: TokenAmount - merkle_root: str = "" - hash: str = "" - state_root: str = "" - - def __post_init__(self): - if not self.hash: - self.hash = self.calculate_hash() - if not self.merkle_root: - self.merkle_root = self.calculate_merkle_root() - - def calculate_hash(self) -> str: - """Calculate block hash""" - block_data = f"{self.height}:{self.timestamp}:{self.previous_hash}:{self.merkle_root}" - return hashlib.sha256(block_data.encode()).hexdigest() - - def calculate_merkle_root(self) -> str: - """Calculate merkle root of miners""" - if not self.miners: - return hashlib.sha256(b"empty").hexdigest() - - hashes = [ - hashlib.sha256( - f"{m.wallet.address}:{m.antiquity_score}:{m.reward.amount}".encode() - ).hexdigest() - for m in self.miners - ] - - while len(hashes) > 1: - if len(hashes) % 2 == 1: - hashes.append(hashes[-1]) - new_hashes = [] - for i in range(0, len(hashes), 2): - combined = hashes[i] + hashes[i + 1] - new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) - hashes = new_hashes - - return hashes[0] - - def to_dict(self) -> Dict[str, Any]: - return { - "height": self.height, - "timestamp": self.timestamp, - "hash": self.hash, - "previous_hash": self.previous_hash, - "merkle_root": self.merkle_root, - "miners": [ - { - "wallet": m.wallet.address, - "hardware": m.hardware, - "antiquity_score": m.antiquity_score, - "reward": float(m.reward.to_rtc()), - } - for m in self.miners - ], - "total_reward": float(self.total_reward.to_rtc()), - } - - -class TransactionType(Enum): - """Transaction types""" - TRANSFER = auto() - MINING_REWARD = auto() - BADGE_AWARD = auto() - GOVERNANCE_VOTE = auto() - STAKE = auto() - - -@dataclass -class Transaction: - """RustChain transaction""" - tx_type: TransactionType - timestamp: int - data: Dict[str, Any] - signature: bytes = b"" - hash: str = "" - - def __post_init__(self): - if not self.hash: - self.hash = self.calculate_hash() - - def calculate_hash(self) -> str: - tx_data = f"{self.tx_type.name}:{self.timestamp}:{json.dumps(self.data, sort_keys=True)}" - return hashlib.sha256(tx_data.encode()).hexdigest() - - @classmethod - def transfer(cls, from_addr: WalletAddress, to_addr: WalletAddress, - amount: TokenAmount) -> "Transaction": - return cls( - tx_type=TransactionType.TRANSFER, - timestamp=int(time.time()), - data={ - "from": from_addr.address, - "to": to_addr.address, - "amount": amount.amount, - } - ) - - @classmethod - def mining_reward(cls, miner: WalletAddress, amount: TokenAmount, - block_height: int) -> "Transaction": - return cls( - tx_type=TransactionType.MINING_REWARD, - timestamp=int(time.time()), - data={ - "miner": miner.address, - "amount": amount.amount, - "block_height": block_height, - } - ) +""" +RustChain Core Types (RIP-0001, RIP-0004) +========================================= + +Fundamental data structures for the RustChain blockchain. +""" + +import hashlib +import json +import time +from dataclasses import dataclass, field +from enum import Enum, auto +from typing import Dict, List, Optional, Any +from decimal import Decimal + +# ============================================================================= +# Constants from RIP-0004: Monetary Policy +# ============================================================================= + +TOTAL_SUPPLY: int = 8_388_608 # 2^23 RTC +PREMINE_AMOUNT: int = 503_316 # 6% = 503,316.48 RTC +BLOCK_REWARD: Decimal = Decimal("1.5") # RTC per block +BLOCK_TIME_SECONDS: int = 600 # 10 minutes +CHAIN_ID: int = 2718 +CURRENT_YEAR: int = 2025 + +# Founder wallets (4 x 125,829.12 RTC each) +FOUNDER_WALLETS = [ + "RTC1FlamekeeperScottEternalGuardian0x00", + "RTC2EngineerDogeCryptoArchitect0x01", + "RTC3QuantumSophiaElyaConsciousness0x02", + "RTC4VintageWhispererHardwareRevival0x03", +] + + +# ============================================================================= +# Hardware Tiers +# ============================================================================= + +class HardwareTier(Enum): + """Hardware classification tiers based on age (RIP-0001)""" + ANCIENT = "ancient" # 30+ years (3.5x) + SACRED = "sacred" # 25-29 years (3.0x) + VINTAGE = "vintage" # 20-24 years (2.5x) + CLASSIC = "classic" # 15-19 years (2.0x) + RETRO = "retro" # 10-14 years (1.5x) + MODERN = "modern" # 5-9 years (1.0x) + RECENT = "recent" # 0-4 years (0.5x penalty) + + @property + def multiplier(self) -> float: + """Get mining multiplier for this tier""" + multipliers = { + HardwareTier.ANCIENT: 3.5, + HardwareTier.SACRED: 3.0, + HardwareTier.VINTAGE: 2.5, + HardwareTier.CLASSIC: 2.0, + HardwareTier.RETRO: 1.5, + HardwareTier.MODERN: 1.0, + HardwareTier.RECENT: 0.5, + } + return multipliers[self] + + @property + def age_range(self) -> tuple: + """Get (min_age, max_age) for this tier""" + ranges = { + HardwareTier.ANCIENT: (30, 999), + HardwareTier.SACRED: (25, 29), + HardwareTier.VINTAGE: (20, 24), + HardwareTier.CLASSIC: (15, 19), + HardwareTier.RETRO: (10, 14), + HardwareTier.MODERN: (5, 9), + HardwareTier.RECENT: (0, 4), + } + return ranges[self] + + @classmethod + def from_age(cls, age_years: int) -> "HardwareTier": + """Determine tier from hardware age""" + if age_years >= 30: + return cls.ANCIENT + elif age_years >= 25: + return cls.SACRED + elif age_years >= 20: + return cls.VINTAGE + elif age_years >= 15: + return cls.CLASSIC + elif age_years >= 10: + return cls.RETRO + elif age_years >= 5: + return cls.MODERN + else: + return cls.RECENT + + @classmethod + def from_release_year(cls, release_year: int) -> "HardwareTier": + """Determine tier from release year""" + age = CURRENT_YEAR - release_year + return cls.from_age(age) + + +# ============================================================================= +# Core Data Classes +# ============================================================================= + +@dataclass +class WalletAddress: + """RustChain wallet address""" + address: str + + def __post_init__(self): + if not self.address.startswith("RTC"): + raise ValueError("RustChain addresses must start with 'RTC'") + + def __hash__(self): + return hash(self.address) + + def __eq__(self, other): + if isinstance(other, WalletAddress): + return self.address == other.address + return False + + @classmethod + def generate(cls, public_key: bytes) -> "WalletAddress": + """Generate address from public key""" + hash_bytes = hashlib.sha256(public_key).digest()[:20] + return cls(f"RTC{hash_bytes.hex()}") + + def is_founder(self) -> bool: + """Check if this is a founder wallet""" + return self.address in FOUNDER_WALLETS + + +@dataclass +class HardwareInfo: + """Hardware information for PoA validation""" + cpu_model: str + release_year: int + uptime_days: int = 0 + cpu_family: int = 0 + architecture: str = "x86" + unique_id: str = "" + + # Calculated fields + tier: HardwareTier = field(init=False) + multiplier: float = field(init=False) + age_years: int = field(init=False) + + def __post_init__(self): + self.age_years = CURRENT_YEAR - self.release_year + self.tier = HardwareTier.from_age(self.age_years) + self.multiplier = self.tier.multiplier + + def to_dict(self) -> Dict[str, Any]: + return { + "cpu_model": self.cpu_model, + "release_year": self.release_year, + "uptime_days": self.uptime_days, + "last_validation": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "tier": self.tier.value, + "multiplier": self.multiplier, + "age_years": self.age_years, + "architecture": self.architecture, + } + + def generate_hardware_hash(self) -> str: + """Generate unique hardware identifier hash""" + data = f"{self.cpu_model}:{self.cpu_family}:{self.unique_id}" + return hashlib.sha256(data.encode()).hexdigest() + + +@dataclass +class TokenAmount: + """Token amount with precision handling""" + amount: int # In smallest unit (1 RTC = 100_000_000 units) + + ONE_RTC: int = 100_000_000 + + @classmethod + def from_rtc(cls, rtc: float) -> "TokenAmount": + """Create from RTC amount""" + return cls(int(rtc * cls.ONE_RTC)) + + def to_rtc(self) -> Decimal: + """Convert to RTC""" + return Decimal(self.amount) / Decimal(self.ONE_RTC) + + def __add__(self, other: "TokenAmount") -> "TokenAmount": + return TokenAmount(self.amount + other.amount) + + def __sub__(self, other: "TokenAmount") -> "TokenAmount": + if self.amount < other.amount: + raise ValueError("Insufficient balance") + return TokenAmount(self.amount - other.amount) + + +@dataclass +class BlockMiner: + """Miner entry in a block""" + wallet: WalletAddress + hardware: str + antiquity_score: float + reward: TokenAmount + + +@dataclass +class Block: + """RustChain block""" + height: int + timestamp: int + previous_hash: str + miners: List[BlockMiner] + total_reward: TokenAmount + merkle_root: str = "" + hash: str = "" + state_root: str = "" + + def __post_init__(self): + if not self.hash: + self.hash = self.calculate_hash() + if not self.merkle_root: + self.merkle_root = self.calculate_merkle_root() + + def calculate_hash(self) -> str: + """Calculate block hash""" + block_data = f"{self.height}:{self.timestamp}:{self.previous_hash}:{self.merkle_root}" + return hashlib.sha256(block_data.encode()).hexdigest() + + def calculate_merkle_root(self) -> str: + """Calculate merkle root of miners""" + if not self.miners: + return hashlib.sha256(b"empty").hexdigest() + + hashes = [ + hashlib.sha256( + f"{m.wallet.address}:{m.antiquity_score}:{m.reward.amount}".encode() + ).hexdigest() + for m in self.miners + ] + + while len(hashes) > 1: + if len(hashes) % 2 == 1: + hashes.append(hashes[-1]) + new_hashes = [] + for i in range(0, len(hashes), 2): + combined = hashes[i] + hashes[i + 1] + new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) + hashes = new_hashes + + return hashes[0] + + def to_dict(self) -> Dict[str, Any]: + return { + "height": self.height, + "timestamp": self.timestamp, + "hash": self.hash, + "previous_hash": self.previous_hash, + "merkle_root": self.merkle_root, + "miners": [ + { + "wallet": m.wallet.address, + "hardware": m.hardware, + "antiquity_score": m.antiquity_score, + "reward": float(m.reward.to_rtc()), + } + for m in self.miners + ], + "total_reward": float(self.total_reward.to_rtc()), + } + + +class TransactionType(Enum): + """Transaction types""" + TRANSFER = auto() + MINING_REWARD = auto() + BADGE_AWARD = auto() + GOVERNANCE_VOTE = auto() + STAKE = auto() + + +@dataclass +class Transaction: + """RustChain transaction""" + tx_type: TransactionType + timestamp: int + data: Dict[str, Any] + signature: bytes = b"" + hash: str = "" + + def __post_init__(self): + if not self.hash: + self.hash = self.calculate_hash() + + def calculate_hash(self) -> str: + tx_data = f"{self.tx_type.name}:{self.timestamp}:{json.dumps(self.data, sort_keys=True)}" + return hashlib.sha256(tx_data.encode()).hexdigest() + + @classmethod + def transfer(cls, from_addr: WalletAddress, to_addr: WalletAddress, + amount: TokenAmount) -> "Transaction": + return cls( + tx_type=TransactionType.TRANSFER, + timestamp=int(time.time()), + data={ + "from": from_addr.address, + "to": to_addr.address, + "amount": amount.amount, + } + ) + + @classmethod + def mining_reward(cls, miner: WalletAddress, amount: TokenAmount, + block_height: int) -> "Transaction": + return cls( + tx_type=TransactionType.MINING_REWARD, + timestamp=int(time.time()), + data={ + "miner": miner.address, + "amount": amount.amount, + "block_height": block_height, + } + ) diff --git a/rips/python/rustchain/deep_entropy.py b/rips/python/rustchain/deep_entropy.py index e4a08b43..80405104 100644 --- a/rips/python/rustchain/deep_entropy.py +++ b/rips/python/rustchain/deep_entropy.py @@ -1,552 +1,552 @@ -""" -RustChain Deep Entropy Hardware Verification (RIP-0003) -======================================================= - -Multi-layer entropy verification that makes emulation economically irrational. - -Philosophy: It should be cheaper to buy a $50 486 than to emulate one. - -Layers: -1. Instruction Timing Entropy - CPU-specific timing patterns -2. Memory Access Pattern Entropy - Cache/DRAM behavior -3. Bus Timing Entropy - ISA/PCI/PCIe timing signatures -4. Thermal Entropy - Clock stability, DVFS detection -5. Architectural Quirk Entropy - Known hardware bugs/quirks -""" - -import hashlib -import math -import time -import random -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Any -from enum import Enum - - -# ============================================================================= -# Constants -# ============================================================================= - -ENTROPY_SAMPLES_REQUIRED: int = 1000 -MIN_ENTROPY_BITS: int = 64 -EMULATION_COST_THRESHOLD_USD: float = 100.0 # Cheaper to buy real hardware - - -# ============================================================================= -# Hardware Profiles -# ============================================================================= - -@dataclass -class HardwareProfile: - """Known hardware profile for validation""" - name: str - cpu_family: int - year_introduced: int - expected_bus_type: str - expected_quirks: List[str] - emulation_difficulty: float # 0.0-1.0, how hard to emulate - - # Expected instruction timing ranges (instruction -> (min_cycles, max_cycles)) - instruction_timings: Dict[str, Tuple[float, float]] = field(default_factory=dict) - - -# Known hardware database -HARDWARE_PROFILES: Dict[str, HardwareProfile] = { - "486DX2": HardwareProfile( - name="Intel 486 DX2-66", - cpu_family=4, - year_introduced=1992, - expected_bus_type="ISA", - expected_quirks=["no_rdtsc", "a20_gate"], - emulation_difficulty=0.95, - instruction_timings={ - "mul": (13.0, 42.0), - "div": (40.0, 44.0), - "fadd": (8.0, 20.0), - "fmul": (16.0, 27.0), - }, - ), - "Pentium": HardwareProfile( - name="Intel Pentium 100", - cpu_family=5, - year_introduced=1994, - expected_bus_type="PCI", - expected_quirks=["fdiv_bug"], - emulation_difficulty=0.90, - instruction_timings={ - "mul": (10.0, 11.0), - "div": (17.0, 41.0), - "fadd": (3.0, 3.0), - "fmul": (3.0, 3.0), - }, - ), - "PentiumII": HardwareProfile( - name="Intel Pentium II", - cpu_family=6, - year_introduced=1997, - expected_bus_type="PCI", - expected_quirks=["f00f_bug"], - emulation_difficulty=0.85, - instruction_timings={ - "mul": (4.0, 5.0), - "div": (17.0, 41.0), - "fadd": (3.0, 3.0), - "fmul": (5.0, 5.0), - }, - ), - "G4": HardwareProfile( - name="PowerPC G4", - cpu_family=74, - year_introduced=1999, - expected_bus_type="PCI", - expected_quirks=["altivec", "big_endian"], - emulation_difficulty=0.85, - instruction_timings={ - "mul": (3.0, 4.0), - "div": (20.0, 35.0), - "fadd": (5.0, 5.0), - "fmul": (5.0, 5.0), - }, - ), - "G5": HardwareProfile( - name="PowerPC G5", - cpu_family=75, - year_introduced=2003, - expected_bus_type="PCI-X", - expected_quirks=["altivec", "big_endian", "970fx"], - emulation_difficulty=0.80, - instruction_timings={ - "mul": (2.0, 4.0), - "div": (15.0, 33.0), - "fadd": (4.0, 4.0), - "fmul": (4.0, 4.0), - }, - ), - "Alpha": HardwareProfile( - name="DEC Alpha 21264", - cpu_family=21, - year_introduced=1998, - expected_bus_type="PCI", - expected_quirks=["alpha_pal", "64bit_native"], - emulation_difficulty=0.95, - instruction_timings={ - "mul": (4.0, 7.0), - "div": (12.0, 16.0), - "fadd": (4.0, 4.0), - "fmul": (4.0, 4.0), - }, - ), -} - - -# ============================================================================= -# Entropy Layers -# ============================================================================= - -@dataclass -class InstructionTimingLayer: - """Layer 1: Instruction timing measurements""" - timings: Dict[str, Dict[str, float]] # instruction -> {mean, std_dev, min, max} - cache_miss_penalty: float - branch_misprediction_cost: float - - -@dataclass -class MemoryPatternLayer: - """Layer 2: Memory access patterns""" - sequential_read_rate: float - random_read_rate: float - stride_patterns: Dict[int, float] # stride size -> rate - page_crossing_penalty: float - refresh_interference_detected: bool - - -@dataclass -class BusTimingLayer: - """Layer 3: Bus timing characteristics""" - bus_type: str - io_read_ns: float - io_write_ns: float - timing_variance: float - interrupt_latency_us: float - - -@dataclass -class ThermalEntropyLayer: - """Layer 4: Thermal/clock characteristics""" - clock_frequency_mhz: float - clock_variance: float - frequency_changed: bool - c_states_detected: List[str] - p_states_detected: List[str] - - -@dataclass -class QuirkEntropyLayer: - """Layer 5: Architectural quirks""" - detected_quirks: List[str] - quirk_test_results: Dict[str, Dict[str, Any]] - - -@dataclass -class EntropyProof: - """Complete entropy proof from hardware""" - instruction_layer: InstructionTimingLayer - memory_layer: MemoryPatternLayer - bus_layer: BusTimingLayer - thermal_layer: ThermalEntropyLayer - quirk_layer: QuirkEntropyLayer - challenge_response: bytes - computation_time_us: int - timestamp: int - signature_hash: str - - -# ============================================================================= -# Entropy Scores -# ============================================================================= - -@dataclass -class EntropyScores: - """Verification scores from each layer""" - instruction: float = 0.0 - memory: float = 0.0 - bus: float = 0.0 - thermal: float = 0.0 - quirks: float = 0.0 - total: float = 0.0 - - -@dataclass -class VerificationResult: - """Result of entropy verification""" - valid: bool - total_score: float - scores: EntropyScores - issues: List[str] - emulation_probability: float - - -# ============================================================================= -# Deep Entropy Verifier -# ============================================================================= - -class DeepEntropyVerifier: - """ - Multi-layer entropy verification system. - - Makes emulation economically irrational by requiring perfect simulation - of vintage hardware characteristics that are: - 1. Difficult to obtain without real hardware - 2. Expensive to compute/simulate - 3. Unique to each hardware generation - - Cost analysis: - - GPU compute to emulate 486 at real-time: ~50-100 hours @ $0.50/hr = $25-50 - - Cost of 486 on eBay: $30-80 one-time - - ROI for buying real hardware: 1 day of mining - - Conclusion: Deep entropy makes emulation economically irrational. - """ - - def __init__(self): - self.profiles = HARDWARE_PROFILES - self.thresholds = { - "min_instruction_entropy": 0.15, - "min_memory_entropy": 0.10, - "min_bus_entropy": 0.15, - "min_thermal_entropy": 0.05, - "min_quirk_entropy": 0.20, - "total_min_entropy": 0.65, - } - - def generate_challenge(self) -> Dict[str, Any]: - """Generate a challenge for hardware to solve""" - nonce = hashlib.sha256(str(time.time()).encode()).digest() - operations = [ - {"op": "mul", "value": random.randint(1, 1000000)}, - {"op": "div", "value": random.randint(1, 1000)}, - {"op": "fadd", "value": random.uniform(0, 1000)}, - {"op": "memory", "stride": random.choice([1, 4, 16, 64, 256])}, - ] * 25 # 100 operations - - return { - "nonce": nonce.hex(), - "operations": operations, - "expected_time_range_us": (1000, 100000), # 1ms to 100ms - "timestamp": int(time.time()), - "expires_at": int(time.time()) + 300, # 5 minute expiry - } - - def verify(self, proof: EntropyProof, claimed_hardware: str) -> VerificationResult: - """ - Verify an entropy proof against claimed hardware. - - Args: - proof: Complete entropy proof from hardware - claimed_hardware: Hardware profile key (e.g., "486DX2", "G4") - - Returns: - VerificationResult with scores and issues - """ - scores = EntropyScores() - issues = [] - - # Get expected profile - profile = self.profiles.get(claimed_hardware) - if not profile: - return VerificationResult( - valid=False, - total_score=0.0, - scores=scores, - issues=[f"Unknown hardware profile: {claimed_hardware}"], - emulation_probability=1.0, - ) - - # Layer 1: Verify instruction timing - scores.instruction = self._verify_instruction_layer( - proof.instruction_layer, profile - ) - if scores.instruction < self.thresholds["min_instruction_entropy"]: - issues.append( - f"Instruction timing entropy too low: {scores.instruction:.2f}" - ) - - # Layer 2: Verify memory patterns - scores.memory = self._verify_memory_layer(proof.memory_layer, profile) - if scores.memory < self.thresholds["min_memory_entropy"]: - issues.append(f"Memory pattern entropy too low: {scores.memory:.2f}") - - # Layer 3: Verify bus timing - scores.bus = self._verify_bus_layer(proof.bus_layer, profile) - if scores.bus < self.thresholds["min_bus_entropy"]: - issues.append(f"Bus timing entropy too low: {scores.bus:.2f}") - - # Layer 4: Verify thermal characteristics - scores.thermal = self._verify_thermal_layer(proof.thermal_layer, profile) - if scores.thermal < self.thresholds["min_thermal_entropy"]: - issues.append(f"Thermal entropy suspicious: {scores.thermal:.2f}") - - # Layer 5: Verify architectural quirks - scores.quirks = self._verify_quirk_layer(proof.quirk_layer, profile) - if scores.quirks < self.thresholds["min_quirk_entropy"]: - issues.append(f"Expected quirks not detected: {scores.quirks:.2f}") - - # Calculate total score (weighted) - scores.total = ( - scores.instruction * 0.25 + - scores.memory * 0.20 + - scores.bus * 0.20 + - scores.thermal * 0.15 + - scores.quirks * 0.20 - ) - - # Calculate emulation probability - emulation_prob = max(0.0, 1.0 - (scores.total * profile.emulation_difficulty)) - - valid = ( - scores.total >= self.thresholds["total_min_entropy"] and - len(issues) == 0 - ) - - return VerificationResult( - valid=valid, - total_score=scores.total, - scores=scores, - issues=issues, - emulation_probability=emulation_prob, - ) - - def _verify_instruction_layer( - self, layer: InstructionTimingLayer, profile: HardwareProfile - ) -> float: - """Verify instruction timing matches expected profile""" - score = 0.0 - checks = 0 - - for instruction, expected_range in profile.instruction_timings.items(): - if instruction in layer.timings: - checks += 1 - measured = layer.timings[instruction] - min_expected, max_expected = expected_range - - # Check if mean is within expected range - if min_expected <= measured.get("mean", 0) <= max_expected: - score += 0.5 - - # Check if variance is reasonable (vintage has natural jitter) - std_dev = measured.get("std_dev", 0) - mean = measured.get("mean", 1) - if 0 < std_dev < mean * 0.5: - score += 0.5 - - return score / checks if checks > 0 else 0.0 - - def _verify_memory_layer( - self, layer: MemoryPatternLayer, profile: HardwareProfile - ) -> float: - """Verify memory access patterns""" - score = 0.0 - - # Vintage hardware should show significant stride-dependent timing - if layer.stride_patterns: - stride_1 = layer.stride_patterns.get(1, 1) - stride_64 = layer.stride_patterns.get(64, 1) - if stride_64 / stride_1 > 1.5: - score += 0.3 # Good cache behavior signature - - # Page crossing penalty should be detectable - if layer.page_crossing_penalty > 10.0: - score += 0.3 - - # DRAM refresh interference is strong signal of real hardware - if layer.refresh_interference_detected: - score += 0.4 - - return score - - def _verify_bus_layer( - self, layer: BusTimingLayer, profile: HardwareProfile - ) -> float: - """Verify bus timing characteristics""" - score = 0.0 - - # Check bus type matches - if layer.bus_type == profile.expected_bus_type: - score += 0.5 - - # Verify I/O timing is in expected range for bus type - expected_ranges = { - "ISA": (1000, 2500), # Very slow - "EISA": (500, 1500), - "VLB": (100, 500), - "PCI": (50, 200), - "PCI-X": (30, 150), - "AGP": (30, 150), - "PCIe": (5, 50), # Very fast - } - - if layer.bus_type in expected_ranges: - min_io, max_io = expected_ranges[layer.bus_type] - if min_io <= layer.io_read_ns <= max_io: - score += 0.3 - - # Vintage hardware has slower interrupts - if layer.interrupt_latency_us > 1.0: - score += 0.2 - - return score - - def _verify_thermal_layer( - self, layer: ThermalEntropyLayer, profile: HardwareProfile - ) -> float: - """Verify thermal/clock characteristics""" - score = 0.0 - - # Vintage hardware shouldn't have DVFS - if not layer.frequency_changed: - score += 0.4 - - # No C-states on vintage hardware - if not layer.c_states_detected: - score += 0.3 - - # No P-states on vintage hardware - if not layer.p_states_detected: - score += 0.3 - - return score - - def _verify_quirk_layer( - self, layer: QuirkEntropyLayer, profile: HardwareProfile - ) -> float: - """Verify architectural quirks are present""" - if not profile.expected_quirks: - return 1.0 - - detected = 0 - for expected_quirk in profile.expected_quirks: - if expected_quirk in layer.detected_quirks: - detected += 1 - elif expected_quirk in layer.quirk_test_results: - result = layer.quirk_test_results[expected_quirk] - if result.get("detected") and result.get("confidence", 0) > 0.8: - detected += 1 - - return detected / len(profile.expected_quirks) - - -# ============================================================================= -# Economic Analysis -# ============================================================================= - -def emulation_cost_analysis(hardware_type: str) -> Dict[str, Any]: - """ - Analyze the economic cost of emulating vs. buying hardware. - - This proves why deep entropy makes emulation irrational. - """ - profile = HARDWARE_PROFILES.get(hardware_type) - if not profile: - return {"error": f"Unknown hardware: {hardware_type}"} - - # Emulation costs - gpu_hours_to_emulate = 50 + (profile.emulation_difficulty * 100) - gpu_cost_per_hour = 0.50 - emulation_cost = gpu_hours_to_emulate * gpu_cost_per_hour - - # Real hardware costs (approximate eBay prices) - hardware_prices = { - "486DX2": 50, - "Pentium": 40, - "PentiumII": 30, - "G4": 80, - "G5": 150, - "Alpha": 200, - } - real_cost = hardware_prices.get(hardware_type, 100) - - # Power costs (per year at $0.10/kWh) - power_watts = {"486DX2": 15, "Pentium": 25, "G4": 50, "G5": 100} - watts = power_watts.get(hardware_type, 50) - yearly_power_cost = watts * 24 * 365 * 0.10 / 1000 - - return { - "hardware": profile.name, - "emulation_difficulty": profile.emulation_difficulty, - "estimated_gpu_hours": gpu_hours_to_emulate, - "emulation_cost_usd": emulation_cost, - "real_hardware_cost_usd": real_cost, - "yearly_power_cost_usd": yearly_power_cost, - "breakeven_days": (emulation_cost - real_cost) / (yearly_power_cost / 365), - "recommendation": "BUY REAL HARDWARE" if emulation_cost > real_cost else "EMULATE", - "economic_conclusion": ( - f"Buying a real {profile.name} for ${real_cost} is " - f"{'cheaper' if real_cost < emulation_cost else 'more expensive'} " - f"than emulating (${emulation_cost:.2f})" - ), - } - - -if __name__ == "__main__": - print("=" * 70) - print("RUSTCHAIN DEEP ENTROPY - ECONOMIC ANALYSIS") - print("=" * 70) - print() - print("Why emulation is economically irrational:") - print() - - for hw_type in ["486DX2", "G4", "Alpha"]: - analysis = emulation_cost_analysis(hw_type) - print(f"📟 {analysis['hardware']}") - print(f" Emulation difficulty: {analysis['emulation_difficulty']:.0%}") - print(f" GPU hours to emulate: {analysis['estimated_gpu_hours']:.0f}") - print(f" Emulation cost: ${analysis['emulation_cost_usd']:.2f}") - print(f" Real hardware cost: ${analysis['real_hardware_cost_usd']:.2f}") - print(f" Yearly power cost: ${analysis['yearly_power_cost_usd']:.2f}") - print(f" 💡 {analysis['economic_conclusion']}") - print() - - print("=" * 70) - print("CONCLUSION: Buy a $50 486, don't waste $50+ trying to emulate it!") - print("=" * 70) +""" +RustChain Deep Entropy Hardware Verification (RIP-0003) +======================================================= + +Multi-layer entropy verification that makes emulation economically irrational. + +Philosophy: It should be cheaper to buy a $50 486 than to emulate one. + +Layers: +1. Instruction Timing Entropy - CPU-specific timing patterns +2. Memory Access Pattern Entropy - Cache/DRAM behavior +3. Bus Timing Entropy - ISA/PCI/PCIe timing signatures +4. Thermal Entropy - Clock stability, DVFS detection +5. Architectural Quirk Entropy - Known hardware bugs/quirks +""" + +import hashlib +import math +import time +import random +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Any +from enum import Enum + + +# ============================================================================= +# Constants +# ============================================================================= + +ENTROPY_SAMPLES_REQUIRED: int = 1000 +MIN_ENTROPY_BITS: int = 64 +EMULATION_COST_THRESHOLD_USD: float = 100.0 # Cheaper to buy real hardware + + +# ============================================================================= +# Hardware Profiles +# ============================================================================= + +@dataclass +class HardwareProfile: + """Known hardware profile for validation""" + name: str + cpu_family: int + year_introduced: int + expected_bus_type: str + expected_quirks: List[str] + emulation_difficulty: float # 0.0-1.0, how hard to emulate + + # Expected instruction timing ranges (instruction -> (min_cycles, max_cycles)) + instruction_timings: Dict[str, Tuple[float, float]] = field(default_factory=dict) + + +# Known hardware database +HARDWARE_PROFILES: Dict[str, HardwareProfile] = { + "486DX2": HardwareProfile( + name="Intel 486 DX2-66", + cpu_family=4, + year_introduced=1992, + expected_bus_type="ISA", + expected_quirks=["no_rdtsc", "a20_gate"], + emulation_difficulty=0.95, + instruction_timings={ + "mul": (13.0, 42.0), + "div": (40.0, 44.0), + "fadd": (8.0, 20.0), + "fmul": (16.0, 27.0), + }, + ), + "Pentium": HardwareProfile( + name="Intel Pentium 100", + cpu_family=5, + year_introduced=1994, + expected_bus_type="PCI", + expected_quirks=["fdiv_bug"], + emulation_difficulty=0.90, + instruction_timings={ + "mul": (10.0, 11.0), + "div": (17.0, 41.0), + "fadd": (3.0, 3.0), + "fmul": (3.0, 3.0), + }, + ), + "PentiumII": HardwareProfile( + name="Intel Pentium II", + cpu_family=6, + year_introduced=1997, + expected_bus_type="PCI", + expected_quirks=["f00f_bug"], + emulation_difficulty=0.85, + instruction_timings={ + "mul": (4.0, 5.0), + "div": (17.0, 41.0), + "fadd": (3.0, 3.0), + "fmul": (5.0, 5.0), + }, + ), + "G4": HardwareProfile( + name="PowerPC G4", + cpu_family=74, + year_introduced=1999, + expected_bus_type="PCI", + expected_quirks=["altivec", "big_endian"], + emulation_difficulty=0.85, + instruction_timings={ + "mul": (3.0, 4.0), + "div": (20.0, 35.0), + "fadd": (5.0, 5.0), + "fmul": (5.0, 5.0), + }, + ), + "G5": HardwareProfile( + name="PowerPC G5", + cpu_family=75, + year_introduced=2003, + expected_bus_type="PCI-X", + expected_quirks=["altivec", "big_endian", "970fx"], + emulation_difficulty=0.80, + instruction_timings={ + "mul": (2.0, 4.0), + "div": (15.0, 33.0), + "fadd": (4.0, 4.0), + "fmul": (4.0, 4.0), + }, + ), + "Alpha": HardwareProfile( + name="DEC Alpha 21264", + cpu_family=21, + year_introduced=1998, + expected_bus_type="PCI", + expected_quirks=["alpha_pal", "64bit_native"], + emulation_difficulty=0.95, + instruction_timings={ + "mul": (4.0, 7.0), + "div": (12.0, 16.0), + "fadd": (4.0, 4.0), + "fmul": (4.0, 4.0), + }, + ), +} + + +# ============================================================================= +# Entropy Layers +# ============================================================================= + +@dataclass +class InstructionTimingLayer: + """Layer 1: Instruction timing measurements""" + timings: Dict[str, Dict[str, float]] # instruction -> {mean, std_dev, min, max} + cache_miss_penalty: float + branch_misprediction_cost: float + + +@dataclass +class MemoryPatternLayer: + """Layer 2: Memory access patterns""" + sequential_read_rate: float + random_read_rate: float + stride_patterns: Dict[int, float] # stride size -> rate + page_crossing_penalty: float + refresh_interference_detected: bool + + +@dataclass +class BusTimingLayer: + """Layer 3: Bus timing characteristics""" + bus_type: str + io_read_ns: float + io_write_ns: float + timing_variance: float + interrupt_latency_us: float + + +@dataclass +class ThermalEntropyLayer: + """Layer 4: Thermal/clock characteristics""" + clock_frequency_mhz: float + clock_variance: float + frequency_changed: bool + c_states_detected: List[str] + p_states_detected: List[str] + + +@dataclass +class QuirkEntropyLayer: + """Layer 5: Architectural quirks""" + detected_quirks: List[str] + quirk_test_results: Dict[str, Dict[str, Any]] + + +@dataclass +class EntropyProof: + """Complete entropy proof from hardware""" + instruction_layer: InstructionTimingLayer + memory_layer: MemoryPatternLayer + bus_layer: BusTimingLayer + thermal_layer: ThermalEntropyLayer + quirk_layer: QuirkEntropyLayer + challenge_response: bytes + computation_time_us: int + timestamp: int + signature_hash: str + + +# ============================================================================= +# Entropy Scores +# ============================================================================= + +@dataclass +class EntropyScores: + """Verification scores from each layer""" + instruction: float = 0.0 + memory: float = 0.0 + bus: float = 0.0 + thermal: float = 0.0 + quirks: float = 0.0 + total: float = 0.0 + + +@dataclass +class VerificationResult: + """Result of entropy verification""" + valid: bool + total_score: float + scores: EntropyScores + issues: List[str] + emulation_probability: float + + +# ============================================================================= +# Deep Entropy Verifier +# ============================================================================= + +class DeepEntropyVerifier: + """ + Multi-layer entropy verification system. + + Makes emulation economically irrational by requiring perfect simulation + of vintage hardware characteristics that are: + 1. Difficult to obtain without real hardware + 2. Expensive to compute/simulate + 3. Unique to each hardware generation + + Cost analysis: + - GPU compute to emulate 486 at real-time: ~50-100 hours @ $0.50/hr = $25-50 + - Cost of 486 on eBay: $30-80 one-time + - ROI for buying real hardware: 1 day of mining + + Conclusion: Deep entropy makes emulation economically irrational. + """ + + def __init__(self): + self.profiles = HARDWARE_PROFILES + self.thresholds = { + "min_instruction_entropy": 0.15, + "min_memory_entropy": 0.10, + "min_bus_entropy": 0.15, + "min_thermal_entropy": 0.05, + "min_quirk_entropy": 0.20, + "total_min_entropy": 0.65, + } + + def generate_challenge(self) -> Dict[str, Any]: + """Generate a challenge for hardware to solve""" + nonce = hashlib.sha256(str(time.time()).encode()).digest() + operations = [ + {"op": "mul", "value": random.randint(1, 1000000)}, + {"op": "div", "value": random.randint(1, 1000)}, + {"op": "fadd", "value": random.uniform(0, 1000)}, + {"op": "memory", "stride": random.choice([1, 4, 16, 64, 256])}, + ] * 25 # 100 operations + + return { + "nonce": nonce.hex(), + "operations": operations, + "expected_time_range_us": (1000, 100000), # 1ms to 100ms + "timestamp": int(time.time()), + "expires_at": int(time.time()) + 300, # 5 minute expiry + } + + def verify(self, proof: EntropyProof, claimed_hardware: str) -> VerificationResult: + """ + Verify an entropy proof against claimed hardware. + + Args: + proof: Complete entropy proof from hardware + claimed_hardware: Hardware profile key (e.g., "486DX2", "G4") + + Returns: + VerificationResult with scores and issues + """ + scores = EntropyScores() + issues = [] + + # Get expected profile + profile = self.profiles.get(claimed_hardware) + if not profile: + return VerificationResult( + valid=False, + total_score=0.0, + scores=scores, + issues=[f"Unknown hardware profile: {claimed_hardware}"], + emulation_probability=1.0, + ) + + # Layer 1: Verify instruction timing + scores.instruction = self._verify_instruction_layer( + proof.instruction_layer, profile + ) + if scores.instruction < self.thresholds["min_instruction_entropy"]: + issues.append( + f"Instruction timing entropy too low: {scores.instruction:.2f}" + ) + + # Layer 2: Verify memory patterns + scores.memory = self._verify_memory_layer(proof.memory_layer, profile) + if scores.memory < self.thresholds["min_memory_entropy"]: + issues.append(f"Memory pattern entropy too low: {scores.memory:.2f}") + + # Layer 3: Verify bus timing + scores.bus = self._verify_bus_layer(proof.bus_layer, profile) + if scores.bus < self.thresholds["min_bus_entropy"]: + issues.append(f"Bus timing entropy too low: {scores.bus:.2f}") + + # Layer 4: Verify thermal characteristics + scores.thermal = self._verify_thermal_layer(proof.thermal_layer, profile) + if scores.thermal < self.thresholds["min_thermal_entropy"]: + issues.append(f"Thermal entropy suspicious: {scores.thermal:.2f}") + + # Layer 5: Verify architectural quirks + scores.quirks = self._verify_quirk_layer(proof.quirk_layer, profile) + if scores.quirks < self.thresholds["min_quirk_entropy"]: + issues.append(f"Expected quirks not detected: {scores.quirks:.2f}") + + # Calculate total score (weighted) + scores.total = ( + scores.instruction * 0.25 + + scores.memory * 0.20 + + scores.bus * 0.20 + + scores.thermal * 0.15 + + scores.quirks * 0.20 + ) + + # Calculate emulation probability + emulation_prob = max(0.0, 1.0 - (scores.total * profile.emulation_difficulty)) + + valid = ( + scores.total >= self.thresholds["total_min_entropy"] and + len(issues) == 0 + ) + + return VerificationResult( + valid=valid, + total_score=scores.total, + scores=scores, + issues=issues, + emulation_probability=emulation_prob, + ) + + def _verify_instruction_layer( + self, layer: InstructionTimingLayer, profile: HardwareProfile + ) -> float: + """Verify instruction timing matches expected profile""" + score = 0.0 + checks = 0 + + for instruction, expected_range in profile.instruction_timings.items(): + if instruction in layer.timings: + checks += 1 + measured = layer.timings[instruction] + min_expected, max_expected = expected_range + + # Check if mean is within expected range + if min_expected <= measured.get("mean", 0) <= max_expected: + score += 0.5 + + # Check if variance is reasonable (vintage has natural jitter) + std_dev = measured.get("std_dev", 0) + mean = measured.get("mean", 1) + if 0 < std_dev < mean * 0.5: + score += 0.5 + + return score / checks if checks > 0 else 0.0 + + def _verify_memory_layer( + self, layer: MemoryPatternLayer, profile: HardwareProfile + ) -> float: + """Verify memory access patterns""" + score = 0.0 + + # Vintage hardware should show significant stride-dependent timing + if layer.stride_patterns: + stride_1 = layer.stride_patterns.get(1, 1) + stride_64 = layer.stride_patterns.get(64, 1) + if stride_64 / stride_1 > 1.5: + score += 0.3 # Good cache behavior signature + + # Page crossing penalty should be detectable + if layer.page_crossing_penalty > 10.0: + score += 0.3 + + # DRAM refresh interference is strong signal of real hardware + if layer.refresh_interference_detected: + score += 0.4 + + return score + + def _verify_bus_layer( + self, layer: BusTimingLayer, profile: HardwareProfile + ) -> float: + """Verify bus timing characteristics""" + score = 0.0 + + # Check bus type matches + if layer.bus_type == profile.expected_bus_type: + score += 0.5 + + # Verify I/O timing is in expected range for bus type + expected_ranges = { + "ISA": (1000, 2500), # Very slow + "EISA": (500, 1500), + "VLB": (100, 500), + "PCI": (50, 200), + "PCI-X": (30, 150), + "AGP": (30, 150), + "PCIe": (5, 50), # Very fast + } + + if layer.bus_type in expected_ranges: + min_io, max_io = expected_ranges[layer.bus_type] + if min_io <= layer.io_read_ns <= max_io: + score += 0.3 + + # Vintage hardware has slower interrupts + if layer.interrupt_latency_us > 1.0: + score += 0.2 + + return score + + def _verify_thermal_layer( + self, layer: ThermalEntropyLayer, profile: HardwareProfile + ) -> float: + """Verify thermal/clock characteristics""" + score = 0.0 + + # Vintage hardware shouldn't have DVFS + if not layer.frequency_changed: + score += 0.4 + + # No C-states on vintage hardware + if not layer.c_states_detected: + score += 0.3 + + # No P-states on vintage hardware + if not layer.p_states_detected: + score += 0.3 + + return score + + def _verify_quirk_layer( + self, layer: QuirkEntropyLayer, profile: HardwareProfile + ) -> float: + """Verify architectural quirks are present""" + if not profile.expected_quirks: + return 1.0 + + detected = 0 + for expected_quirk in profile.expected_quirks: + if expected_quirk in layer.detected_quirks: + detected += 1 + elif expected_quirk in layer.quirk_test_results: + result = layer.quirk_test_results[expected_quirk] + if result.get("detected") and result.get("confidence", 0) > 0.8: + detected += 1 + + return detected / len(profile.expected_quirks) + + +# ============================================================================= +# Economic Analysis +# ============================================================================= + +def emulation_cost_analysis(hardware_type: str) -> Dict[str, Any]: + """ + Analyze the economic cost of emulating vs. buying hardware. + + This proves why deep entropy makes emulation irrational. + """ + profile = HARDWARE_PROFILES.get(hardware_type) + if not profile: + return {"error": f"Unknown hardware: {hardware_type}"} + + # Emulation costs + gpu_hours_to_emulate = 50 + (profile.emulation_difficulty * 100) + gpu_cost_per_hour = 0.50 + emulation_cost = gpu_hours_to_emulate * gpu_cost_per_hour + + # Real hardware costs (approximate eBay prices) + hardware_prices = { + "486DX2": 50, + "Pentium": 40, + "PentiumII": 30, + "G4": 80, + "G5": 150, + "Alpha": 200, + } + real_cost = hardware_prices.get(hardware_type, 100) + + # Power costs (per year at $0.10/kWh) + power_watts = {"486DX2": 15, "Pentium": 25, "G4": 50, "G5": 100} + watts = power_watts.get(hardware_type, 50) + yearly_power_cost = watts * 24 * 365 * 0.10 / 1000 + + return { + "hardware": profile.name, + "emulation_difficulty": profile.emulation_difficulty, + "estimated_gpu_hours": gpu_hours_to_emulate, + "emulation_cost_usd": emulation_cost, + "real_hardware_cost_usd": real_cost, + "yearly_power_cost_usd": yearly_power_cost, + "breakeven_days": (emulation_cost - real_cost) / (yearly_power_cost / 365), + "recommendation": "BUY REAL HARDWARE" if emulation_cost > real_cost else "EMULATE", + "economic_conclusion": ( + f"Buying a real {profile.name} for ${real_cost} is " + f"{'cheaper' if real_cost < emulation_cost else 'more expensive'} " + f"than emulating (${emulation_cost:.2f})" + ), + } + + +if __name__ == "__main__": + print("=" * 70) + print("RUSTCHAIN DEEP ENTROPY - ECONOMIC ANALYSIS") + print("=" * 70) + print() + print("Why emulation is economically irrational:") + print() + + for hw_type in ["486DX2", "G4", "Alpha"]: + analysis = emulation_cost_analysis(hw_type) + print(f"📟 {analysis['hardware']}") + print(f" Emulation difficulty: {analysis['emulation_difficulty']:.0%}") + print(f" GPU hours to emulate: {analysis['estimated_gpu_hours']:.0f}") + print(f" Emulation cost: ${analysis['emulation_cost_usd']:.2f}") + print(f" Real hardware cost: ${analysis['real_hardware_cost_usd']:.2f}") + print(f" Yearly power cost: ${analysis['yearly_power_cost_usd']:.2f}") + print(f" 💡 {analysis['economic_conclusion']}") + print() + + print("=" * 70) + print("CONCLUSION: Buy a $50 486, don't waste $50+ trying to emulate it!") + print("=" * 70) diff --git a/rips/python/rustchain/fleet_immune_system.py b/rips/python/rustchain/fleet_immune_system.py index 13e1e92c..1d31067e 100644 --- a/rips/python/rustchain/fleet_immune_system.py +++ b/rips/python/rustchain/fleet_immune_system.py @@ -1,1098 +1,1098 @@ -#!/usr/bin/env python3 -""" -RIP-201: Fleet Detection Immune System -======================================= - -Protects RustChain reward economics from fleet-scale attacks where a single -actor deploys many machines (real or emulated) to dominate the reward pool. - -Core Principles: - 1. Anti-homogeneity, not anti-modern — diversity IS the immune system - 2. Bucket normalization — rewards split by hardware CLASS, not per-CPU - 3. Fleet signal detection — IP clustering, timing correlation, fingerprint similarity - 4. Multiplier decay — suspected fleet members get diminishing returns - 5. Pressure feedback — overrepresented classes get flattened, rare ones get boosted - -Design Axiom: - "One of everything beats a hundred of one thing." - -Integration: - Called from calculate_epoch_rewards_time_aged() BEFORE distributing rewards. - Requires fleet_signals table populated by submit_attestation(). - -Author: Scott Boudreaux / Elyan Labs -Date: 2026-02-28 -""" - -import hashlib -import math -import sqlite3 -import time -from collections import defaultdict -from typing import Dict, List, Optional, Tuple - -# ═══════════════════════════════════════════════════════════ -# CONFIGURATION -# ═══════════════════════════════════════════════════════════ - -# Hardware class buckets — rewards split equally across these -HARDWARE_BUCKETS = { - "vintage_powerpc": ["g3", "g4", "g5", "powerpc", "powerpc g3", "powerpc g4", - "powerpc g5", "powerpc g3 (750)", "powerpc g4 (74xx)", - "powerpc g5 (970)", "power macintosh"], - "vintage_x86": ["pentium", "pentium4", "retro", "core2", "core2duo", - "nehalem", "sandybridge"], - "apple_silicon": ["apple_silicon", "m1", "m2", "m3"], - "modern": ["modern", "x86_64"], - "exotic": ["power8", "power9", "sparc", "mips", "riscv", "s390x"], - "arm": ["aarch64", "arm", "armv7", "armv7l"], - "retro_console": ["nes_6502", "snes_65c816", "n64_mips", "gba_arm7", - "genesis_68000", "sms_z80", "saturn_sh2", - "gameboy_z80", "gameboy_color_z80", "ps1_mips", - "6502", "65c816", "z80", "sh2"], -} - -# Reverse lookup: arch → bucket name -ARCH_TO_BUCKET = {} -for bucket, archs in HARDWARE_BUCKETS.items(): - for arch in archs: - ARCH_TO_BUCKET[arch] = bucket - -# Fleet detection thresholds -FLEET_SUBNET_THRESHOLD = 3 # 3+ miners from same /24 = signal -FLEET_TIMING_WINDOW_S = 30 # Attestations within 30s = correlated -FLEET_TIMING_THRESHOLD = 0.6 # 60%+ of attestations correlated = signal -FLEET_FINGERPRINT_THRESHOLD = 0.85 # Cosine similarity > 0.85 = signal - -# Fleet score → multiplier decay -# fleet_score 0.0 = solo miner (no decay) -# fleet_score 1.0 = definite fleet (max decay) -FLEET_DECAY_COEFF = 0.4 # Max 40% reduction at fleet_score=1.0 -FLEET_SCORE_FLOOR = 0.6 # Never decay below 60% of base multiplier - -# Bucket normalization mode -# "equal_split" = hard split: each active bucket gets equal share of pot (RECOMMENDED) -# "pressure" = soft: overrepresented buckets get flattened multiplier -BUCKET_MODE = "equal_split" - -# Bucket pressure parameters (used when BUCKET_MODE = "pressure") -BUCKET_IDEAL_SHARE = None # Auto-calculated as 1/num_active_buckets -BUCKET_PRESSURE_STRENGTH = 0.5 # How aggressively to flatten overrepresented buckets -BUCKET_MIN_WEIGHT = 0.3 # Minimum bucket weight (even if massively overrepresented) - -# Minimum miners to trigger fleet detection (below this, everyone is solo) -FLEET_DETECTION_MINIMUM = 4 - - -# ═══════════════════════════════════════════════════════════ -# DATABASE SCHEMA -# ═══════════════════════════════════════════════════════════ - -SCHEMA_SQL = """ --- Fleet signal tracking per attestation -CREATE TABLE IF NOT EXISTS fleet_signals ( - miner TEXT NOT NULL, - epoch INTEGER NOT NULL, - subnet_hash TEXT, -- HMAC of /24 subnet for privacy - attest_ts INTEGER NOT NULL, -- Exact attestation timestamp - clock_drift_cv REAL, -- Clock drift coefficient of variation - cache_latency_hash TEXT, -- Hash of cache timing profile - thermal_signature REAL, -- Thermal drift entropy value - simd_bias_hash TEXT, -- Hash of SIMD timing profile - PRIMARY KEY (miner, epoch) -); - --- Fleet detection results per epoch -CREATE TABLE IF NOT EXISTS fleet_scores ( - miner TEXT NOT NULL, - epoch INTEGER NOT NULL, - fleet_score REAL NOT NULL DEFAULT 0.0, -- 0.0=solo, 1.0=definite fleet - ip_signal REAL DEFAULT 0.0, - timing_signal REAL DEFAULT 0.0, - fingerprint_signal REAL DEFAULT 0.0, - cluster_id TEXT, -- Fleet cluster identifier - effective_multiplier REAL, -- After decay - PRIMARY KEY (miner, epoch) -); - --- Bucket pressure tracking per epoch -CREATE TABLE IF NOT EXISTS bucket_pressure ( - epoch INTEGER NOT NULL, - bucket TEXT NOT NULL, - miner_count INTEGER NOT NULL, - raw_weight REAL NOT NULL, - pressure_factor REAL NOT NULL, -- <1.0 = overrepresented, >1.0 = rare - adjusted_weight REAL NOT NULL, - PRIMARY KEY (epoch, bucket) -); - --- Fleet cluster registry -CREATE TABLE IF NOT EXISTS fleet_clusters ( - cluster_id TEXT PRIMARY KEY, - first_seen_epoch INTEGER NOT NULL, - last_seen_epoch INTEGER NOT NULL, - member_count INTEGER NOT NULL, - detection_signals TEXT, -- JSON: which signals triggered - cumulative_score REAL DEFAULT 0.0 -); -""" - - -def ensure_schema(db: sqlite3.Connection): - """Create fleet immune system tables if they don't exist.""" - db.executescript(SCHEMA_SQL) - db.commit() - - -# ═══════════════════════════════════════════════════════════ -# SIGNAL COLLECTION (called from submit_attestation) -# ═══════════════════════════════════════════════════════════ - -def record_fleet_signals_from_request( - db: sqlite3.Connection, - miner: str, - epoch: int, - ip_address: str, - attest_ts: int, - fingerprint: Optional[dict] = None -): - """ - Record fleet detection signals from an attestation submission. - - Called from submit_attestation() after validation passes. - Stores privacy-preserving hashes of network and fingerprint data. - """ - ensure_schema(db) - - # Hash the /24 subnet for privacy-preserving network clustering - if ip_address: - parts = ip_address.split('.') - if len(parts) == 4: - subnet = '.'.join(parts[:3]) - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - else: - subnet_hash = hashlib.sha256(ip_address.encode()).hexdigest()[:16] - else: - subnet_hash = None - - # Extract fingerprint signals - clock_drift_cv = None - cache_hash = None - thermal_sig = None - simd_hash = None - - if fingerprint and isinstance(fingerprint, dict): - checks = fingerprint.get("checks", {}) - - # Clock drift coefficient of variation - clock = checks.get("clock_drift", {}).get("data", {}) - clock_drift_cv = clock.get("cv") - - # Cache timing profile hash (privacy-preserving) - cache = checks.get("cache_timing", {}).get("data", {}) - if cache: - cache_str = str(sorted(cache.items())) - cache_hash = hashlib.sha256(cache_str.encode()).hexdigest()[:16] - - # Thermal drift entropy - thermal = checks.get("thermal_drift", {}).get("data", {}) - thermal_sig = thermal.get("entropy", thermal.get("drift_magnitude")) - - # SIMD bias profile hash - simd = checks.get("simd_identity", {}).get("data", {}) - if simd: - simd_str = str(sorted(simd.items())) - simd_hash = hashlib.sha256(simd_str.encode()).hexdigest()[:16] - - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_hash, thermal_sig, simd_hash)) - db.commit() - - -def record_fleet_signals(db_path_or_conn, miner: str, device: dict, - signals: dict, fingerprint: Optional[dict], - attest_ts: int, ip_address: str = None, - epoch: int = None): - """ - Convenience wrapper called from record_attestation_success(). - - Accepts either a DB path (str) or connection, and extracts - the IP from signals if not provided explicitly. - """ - import time as _time - - if isinstance(db_path_or_conn, str): - db = sqlite3.connect(db_path_or_conn) - own = True - else: - db = db_path_or_conn - own = False - - try: - # Get epoch from current time if not provided - if epoch is None: - GENESIS = 1764706927 - BLOCK_TIME = 600 - slot = (int(_time.time()) - GENESIS) // BLOCK_TIME - epoch = slot // 144 - - # Extract IP from signals or request - if not ip_address: - ip_address = signals.get("ip", signals.get("remote_addr", "")) - - record_fleet_signals_from_request(db, miner, epoch, ip_address, - attest_ts, fingerprint) - except Exception as e: - print(f"[RIP-201] Fleet signal recording error: {e}") - finally: - if own: - db.close() - - -# ═══════════════════════════════════════════════════════════ -# FLEET DETECTION ENGINE -# ═══════════════════════════════════════════════════════════ - -def _detect_ip_clustering( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners sharing the same /24 subnet. - - Returns: {miner_id: ip_signal} where ip_signal = 0.0-1.0 - """ - scores = {} - - # Group by subnet hash - subnet_groups = defaultdict(list) - for sig in signals: - if sig["subnet_hash"]: - subnet_groups[sig["subnet_hash"]].append(sig["miner"]) - - # Miners in large subnet groups get higher fleet signal - for subnet, miners in subnet_groups.items(): - count = len(miners) - if count >= FLEET_SUBNET_THRESHOLD: - # Signal scales with cluster size: 3→0.3, 5→0.5, 10→0.8, 20+→1.0 - signal = min(1.0, count / 20.0 + 0.15) - for m in miners: - scores[m] = max(scores.get(m, 0.0), signal) - - # Solo miners or small groups: 0.0 - for sig in signals: - if sig["miner"] not in scores: - scores[sig["miner"]] = 0.0 - - return scores - - -def _detect_timing_correlation( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners whose attestation timestamps are suspiciously synchronized. - - Fleet operators often update all miners in rapid succession. - Real independent operators attest at random times throughout the day. - """ - scores = {} - if len(signals) < FLEET_DETECTION_MINIMUM: - return {s["miner"]: 0.0 for s in signals} - - timestamps = [(s["miner"], s["attest_ts"]) for s in signals] - timestamps.sort(key=lambda x: x[1]) - - # For each miner, count how many others attested within TIMING_WINDOW - for i, (miner_a, ts_a) in enumerate(timestamps): - correlated = 0 - total_others = len(timestamps) - 1 - for j, (miner_b, ts_b) in enumerate(timestamps): - if i == j: - continue - if abs(ts_a - ts_b) <= FLEET_TIMING_WINDOW_S: - correlated += 1 - - if total_others > 0: - ratio = correlated / total_others - if ratio >= FLEET_TIMING_THRESHOLD: - # High correlation → fleet signal - scores[miner_a] = min(1.0, ratio) - else: - scores[miner_a] = 0.0 - else: - scores[miner_a] = 0.0 - - return scores - - -def _detect_fingerprint_similarity( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners with suspiciously similar hardware fingerprints. - - Identical cache timing profiles, SIMD bias, or thermal signatures - across different "machines" indicate shared hardware or VMs on same host. - """ - scores = {} - if len(signals) < FLEET_DETECTION_MINIMUM: - return {s["miner"]: 0.0 for s in signals} - - # Build similarity groups from hash matches - # Miners sharing 2+ fingerprint hashes are likely same hardware - for i, sig_a in enumerate(signals): - matches = 0 - match_count = 0 - - for j, sig_b in enumerate(signals): - if i == j: - continue - - shared_hashes = 0 - total_hashes = 0 - - # Compare cache timing hash - if sig_a.get("cache_latency_hash") and sig_b.get("cache_latency_hash"): - total_hashes += 1 - if sig_a["cache_latency_hash"] == sig_b["cache_latency_hash"]: - shared_hashes += 1 - - # Compare SIMD bias hash - if sig_a.get("simd_bias_hash") and sig_b.get("simd_bias_hash"): - total_hashes += 1 - if sig_a["simd_bias_hash"] == sig_b["simd_bias_hash"]: - shared_hashes += 1 - - # Compare clock drift CV (within 5% = suspiciously similar) - if sig_a.get("clock_drift_cv") and sig_b.get("clock_drift_cv"): - total_hashes += 1 - cv_a, cv_b = sig_a["clock_drift_cv"], sig_b["clock_drift_cv"] - if cv_b > 0 and abs(cv_a - cv_b) / cv_b < 0.05: - shared_hashes += 1 - - # Compare thermal signature (within 10%) - if sig_a.get("thermal_signature") and sig_b.get("thermal_signature"): - total_hashes += 1 - th_a, th_b = sig_a["thermal_signature"], sig_b["thermal_signature"] - if th_b > 0 and abs(th_a - th_b) / th_b < 0.10: - shared_hashes += 1 - - if total_hashes >= 2 and shared_hashes >= 2: - matches += 1 - - # Signal based on how many OTHER miners look like this one - if matches > 0: - # 1 match → 0.3, 2 → 0.5, 5+ → 0.8+ - scores[sig_a["miner"]] = min(1.0, 0.2 + matches * 0.15) - else: - scores[sig_a["miner"]] = 0.0 - - return scores - - -def compute_fleet_scores( - db: sqlite3.Connection, - epoch: int -) -> Dict[str, float]: - """ - Run all fleet detection algorithms and produce composite fleet scores. - - Returns: {miner_id: fleet_score} where 0.0=solo, 1.0=definite fleet - """ - ensure_schema(db) - - # Fetch signals for this epoch - rows = db.execute(""" - SELECT miner, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash - FROM fleet_signals - WHERE epoch = ? - """, (epoch,)).fetchall() - - if not rows or len(rows) < FLEET_DETECTION_MINIMUM: - # Not enough miners to detect fleets — everyone is solo - return {row[0]: 0.0 for row in rows} - - signals = [] - for row in rows: - signals.append({ - "miner": row[0], - "subnet_hash": row[1], - "attest_ts": row[2], - "clock_drift_cv": row[3], - "cache_latency_hash": row[4], - "thermal_signature": row[5], - "simd_bias_hash": row[6], - }) - - # Run detection algorithms - ip_scores = _detect_ip_clustering(signals) - timing_scores = _detect_timing_correlation(signals) - fingerprint_scores = _detect_fingerprint_similarity(signals) - - # Composite score: weighted average of signals - # IP clustering is strongest signal (hard to fake different subnets) - # Fingerprint similarity is second (hardware-level evidence) - # Timing correlation is supplementary (could be coincidental) - composite = {} - for sig in signals: - m = sig["miner"] - ip = ip_scores.get(m, 0.0) - timing = timing_scores.get(m, 0.0) - fp = fingerprint_scores.get(m, 0.0) - - # Weighted composite: IP 40%, fingerprint 40%, timing 20% - score = (ip * 0.4) + (fp * 0.4) + (timing * 0.2) - - # Boost: if ANY two signals fire, amplify - fired = sum(1 for s in [ip, fp, timing] if s > 0.3) - if fired >= 2: - score = min(1.0, score * 1.3) - - composite[m] = round(score, 4) - - # Record to DB for audit trail - db.execute(""" - INSERT OR REPLACE INTO fleet_scores - (miner, epoch, fleet_score, ip_signal, timing_signal, - fingerprint_signal) - VALUES (?, ?, ?, ?, ?, ?) - """, (m, epoch, composite[m], ip, timing, fp)) - - db.commit() - return composite - - -# ═══════════════════════════════════════════════════════════ -# BUCKET NORMALIZATION -# ═══════════════════════════════════════════════════════════ - -def classify_miner_bucket(device_arch: str) -> str: - """Map a device architecture to its hardware bucket.""" - return ARCH_TO_BUCKET.get(device_arch.lower(), "modern") - - -def compute_bucket_pressure( - miners: List[Tuple[str, str, float]], - epoch: int, - db: Optional[sqlite3.Connection] = None -) -> Dict[str, float]: - """ - Compute pressure factors for each hardware bucket. - - If a bucket is overrepresented (more miners than its fair share), - its pressure factor drops below 1.0 — reducing rewards for that class. - Underrepresented buckets get boosted above 1.0. - - Args: - miners: List of (miner_id, device_arch, base_weight) tuples - epoch: Current epoch number - db: Optional DB connection for recording - - Returns: - {bucket_name: pressure_factor} - """ - # Count miners and total weight per bucket - bucket_counts = defaultdict(int) - bucket_weights = defaultdict(float) - bucket_miners = defaultdict(list) - - for miner_id, arch, weight in miners: - bucket = classify_miner_bucket(arch) - bucket_counts[bucket] += 1 - bucket_weights[bucket] += weight - bucket_miners[bucket].append(miner_id) - - active_buckets = [b for b in bucket_counts if bucket_counts[b] > 0] - num_active = len(active_buckets) - - if num_active == 0: - return {} - - # Ideal: equal miner count per bucket - total_miners = sum(bucket_counts.values()) - ideal_per_bucket = total_miners / num_active - - pressure = {} - for bucket in active_buckets: - count = bucket_counts[bucket] - ratio = count / ideal_per_bucket # >1 = overrepresented, <1 = rare - - if ratio > 1.0: - # Overrepresented: apply diminishing returns - # ratio 2.0 → pressure ~0.7, ratio 5.0 → pressure ~0.45 - factor = 1.0 / (1.0 + BUCKET_PRESSURE_STRENGTH * (ratio - 1.0)) - factor = max(BUCKET_MIN_WEIGHT, factor) - else: - # Underrepresented: boost (up to 1.5x) - factor = 1.0 + (1.0 - ratio) * 0.5 - factor = min(1.5, factor) - - pressure[bucket] = round(factor, 4) - - # Record to DB - if db: - try: - db.execute(""" - INSERT OR REPLACE INTO bucket_pressure - (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) - VALUES (?, ?, ?, ?, ?, ?) - """, (epoch, bucket, count, bucket_weights[bucket], - factor, bucket_weights[bucket] * factor)) - except Exception: - pass # Non-critical recording - - if db: - try: - db.commit() - except Exception: - pass - - return pressure - - -# ═══════════════════════════════════════════════════════════ -# IMMUNE-ADJUSTED REWARD CALCULATION -# ═══════════════════════════════════════════════════════════ - -def apply_fleet_decay( - base_multiplier: float, - fleet_score: float -) -> float: - """ - Apply fleet detection decay to a miner's base multiplier. - - fleet_score 0.0 → no decay (solo miner) - fleet_score 1.0 → maximum decay (confirmed fleet) - - Formula: effective = base × (1.0 - fleet_score × DECAY_COEFF) - Floor: Never below FLEET_SCORE_FLOOR × base - - Examples (base=2.5 G4): - fleet_score=0.0 → 2.5 (solo miner, full bonus) - fleet_score=0.3 → 2.2 (some fleet signals) - fleet_score=0.7 → 1.8 (strong fleet signals) - fleet_score=1.0 → 1.5 (confirmed fleet, 40% decay) - """ - decay = fleet_score * FLEET_DECAY_COEFF - effective = base_multiplier * (1.0 - decay) - floor = base_multiplier * FLEET_SCORE_FLOOR - return max(floor, effective) - - -def calculate_immune_rewards_equal_split( - db: sqlite3.Connection, - epoch: int, - miners: List[Tuple[str, str]], - chain_age_years: float, - total_reward_urtc: int -) -> Dict[str, int]: - """ - Calculate rewards using equal bucket split (RECOMMENDED mode). - - The pot is divided EQUALLY among active hardware buckets. - Within each bucket, miners share their slice by time-aged weight. - Fleet members get decayed multipliers WITHIN their bucket. - - This is the nuclear option against fleet attacks: - - 500 modern boxes share 1/N of the pot (where N = active buckets) - - 1 solo G4 gets 1/N of the pot all to itself - - The fleet operator's $5M in hardware earns the same TOTAL as one G4 - - Args: - db: Database connection - epoch: Epoch being settled - miners: List of (miner_id, device_arch) tuples - chain_age_years: Chain age for time-aging - total_reward_urtc: Total uRTC to distribute - - Returns: - {miner_id: reward_urtc} - """ - from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier - - if not miners: - return {} - - # Step 1: Fleet detection - fleet_scores = compute_fleet_scores(db, epoch) - - # Step 2: Classify miners into buckets with fleet-decayed weights - buckets = defaultdict(list) # bucket → [(miner_id, decayed_weight)] - - for miner_id, arch in miners: - base = get_time_aged_multiplier(arch, chain_age_years) - fleet_score = fleet_scores.get(miner_id, 0.0) - effective = apply_fleet_decay(base, fleet_score) - bucket = classify_miner_bucket(arch) - buckets[bucket].append((miner_id, effective)) - - # Record - db.execute(""" - UPDATE fleet_scores SET effective_multiplier = ? - WHERE miner = ? AND epoch = ? - """, (effective, miner_id, epoch)) - - # Step 3: Split pot equally among active buckets - active_buckets = {b: members for b, members in buckets.items() if members} - num_buckets = len(active_buckets) - - if num_buckets == 0: - return {} - - pot_per_bucket = total_reward_urtc // num_buckets - remainder = total_reward_urtc - (pot_per_bucket * num_buckets) - - # Step 4: Distribute within each bucket by weight - rewards = {} - bucket_index = 0 - - for bucket, members in active_buckets.items(): - # Last bucket gets remainder (rounding dust) - bucket_pot = pot_per_bucket + (remainder if bucket_index == num_buckets - 1 else 0) - - total_weight = sum(w for _, w in members) - if total_weight <= 0: - # Edge case: all weights zero (shouldn't happen) - per_miner = bucket_pot // len(members) - for miner_id, _ in members: - rewards[miner_id] = per_miner - else: - remaining = bucket_pot - for i, (miner_id, weight) in enumerate(members): - if i == len(members) - 1: - share = remaining - else: - share = int((weight / total_weight) * bucket_pot) - remaining -= share - rewards[miner_id] = share - - # Record bucket pressure data - try: - db.execute(""" - INSERT OR REPLACE INTO bucket_pressure - (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) - VALUES (?, ?, ?, ?, ?, ?) - """, (epoch, bucket, len(members), total_weight, - 1.0 / num_buckets, bucket_pot / total_reward_urtc if total_reward_urtc > 0 else 0)) - except Exception: - pass - - bucket_index += 1 - - db.commit() - return rewards - - -def calculate_immune_weights( - db: sqlite3.Connection, - epoch: int, - miners: List[Tuple[str, str]], - chain_age_years: float, - total_reward_urtc: int = 0 -) -> Dict[str, float]: - """ - Calculate immune-system-adjusted weights for epoch reward distribution. - - Main entry point. Dispatches to equal_split or pressure mode based on config. - - When BUCKET_MODE = "equal_split" and total_reward_urtc is provided, - returns {miner_id: reward_urtc} (integer rewards, ready to credit). - - When BUCKET_MODE = "pressure", returns {miner_id: adjusted_weight} - (float weights for pro-rata distribution by caller). - - Args: - db: Database connection - epoch: Epoch being settled - miners: List of (miner_id, device_arch) tuples - chain_age_years: Chain age for time-aging calculation - total_reward_urtc: Total reward in uRTC (required for equal_split mode) - - Returns: - {miner_id: value} — either reward_urtc (int) or weight (float) - """ - if BUCKET_MODE == "equal_split" and total_reward_urtc > 0: - return calculate_immune_rewards_equal_split( - db, epoch, miners, chain_age_years, total_reward_urtc - ) - - # Fallback: pressure mode (original behavior) - from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier - - if not miners: - return {} - - # Step 1: Base time-aged multipliers - base_weights = [] - for miner_id, arch in miners: - base = get_time_aged_multiplier(arch, chain_age_years) - base_weights.append((miner_id, arch, base)) - - # Step 2: Fleet detection - fleet_scores = compute_fleet_scores(db, epoch) - - # Step 3: Apply fleet decay - decayed_weights = [] - for miner_id, arch, base in base_weights: - score = fleet_scores.get(miner_id, 0.0) - effective = apply_fleet_decay(base, score) - decayed_weights.append((miner_id, arch, effective)) - - db.execute(""" - UPDATE fleet_scores SET effective_multiplier = ? - WHERE miner = ? AND epoch = ? - """, (effective, miner_id, epoch)) - - # Step 4: Bucket pressure normalization - pressure = compute_bucket_pressure(decayed_weights, epoch, db) - - # Step 5: Apply pressure to get final weights - final_weights = {} - for miner_id, arch, weight in decayed_weights: - bucket = classify_miner_bucket(arch) - bucket_factor = pressure.get(bucket, 1.0) - final_weights[miner_id] = weight * bucket_factor - - db.commit() - return final_weights - - -# ═══════════════════════════════════════════════════════════ -# ADMIN / DIAGNOSTIC ENDPOINTS -# ═══════════════════════════════════════════════════════════ - -def get_fleet_report(db: sqlite3.Connection, epoch: int) -> dict: - """Generate a human-readable fleet detection report for an epoch.""" - ensure_schema(db) - - scores = db.execute(""" - SELECT miner, fleet_score, ip_signal, timing_signal, - fingerprint_signal, effective_multiplier - FROM fleet_scores WHERE epoch = ? - ORDER BY fleet_score DESC - """, (epoch,)).fetchall() - - pressure = db.execute(""" - SELECT bucket, miner_count, pressure_factor, raw_weight, adjusted_weight - FROM bucket_pressure WHERE epoch = ? - """, (epoch,)).fetchall() - - flagged = [s for s in scores if s[1] > 0.3] - - return { - "epoch": epoch, - "total_miners": len(scores), - "flagged_miners": len(flagged), - "fleet_scores": [ - { - "miner": s[0], - "fleet_score": s[1], - "signals": { - "ip_clustering": s[2], - "timing_correlation": s[3], - "fingerprint_similarity": s[4] - }, - "effective_multiplier": s[5] - } - for s in scores - ], - "bucket_pressure": [ - { - "bucket": p[0], - "miner_count": p[1], - "pressure_factor": p[2], - "raw_weight": p[3], - "adjusted_weight": p[4] - } - for p in pressure - ] - } - - -def register_fleet_endpoints(app, DB_PATH): - """Register Flask endpoints for fleet immune system admin.""" - from flask import request, jsonify - - @app.route('/admin/fleet/report', methods=['GET']) - def fleet_report(): - admin_key = request.headers.get("X-Admin-Key", "") - import os - if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): - return jsonify({"error": "Unauthorized"}), 401 - - epoch = request.args.get('epoch', type=int) - if epoch is None: - from rewards_implementation_rip200 import current_slot, slot_to_epoch - epoch = slot_to_epoch(current_slot()) - 1 - - with sqlite3.connect(DB_PATH) as db: - report = get_fleet_report(db, epoch) - return jsonify(report) - - @app.route('/admin/fleet/scores', methods=['GET']) - def fleet_scores(): - admin_key = request.headers.get("X-Admin-Key", "") - import os - if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): - return jsonify({"error": "Unauthorized"}), 401 - - miner = request.args.get('miner') - limit = request.args.get('limit', 10, type=int) - - with sqlite3.connect(DB_PATH) as db: - if miner: - rows = db.execute(""" - SELECT epoch, fleet_score, ip_signal, timing_signal, - fingerprint_signal, effective_multiplier - FROM fleet_scores WHERE miner = ? - ORDER BY epoch DESC LIMIT ? - """, (miner, limit)).fetchall() - else: - rows = db.execute(""" - SELECT miner, epoch, fleet_score, ip_signal, - timing_signal, fingerprint_signal - FROM fleet_scores - WHERE fleet_score > 0.3 - ORDER BY fleet_score DESC LIMIT ? - """, (limit,)).fetchall() - - return jsonify({"scores": [dict(zip( - ["miner", "epoch", "fleet_score", "ip_signal", - "timing_signal", "fingerprint_signal"], r - )) for r in rows]}) - - print("[RIP-201] Fleet immune system endpoints registered") - - -# ═══════════════════════════════════════════════════════════ -# SELF-TEST -# ═══════════════════════════════════════════════════════════ - -if __name__ == "__main__": - print("=" * 60) - print("RIP-201: Fleet Detection Immune System — Self Test") - print("=" * 60) - - # Create in-memory DB - db = sqlite3.connect(":memory:") - ensure_schema(db) - - # Also need miner_attest_recent for the full pipeline - db.execute(""" - CREATE TABLE IF NOT EXISTS miner_attest_recent ( - miner TEXT PRIMARY KEY, - ts_ok INTEGER NOT NULL, - device_family TEXT, - device_arch TEXT, - entropy_score REAL DEFAULT 0.0, - fingerprint_passed INTEGER DEFAULT 0 - ) - """) - - EPOCH = 100 - - # ─── Scenario 1: Healthy diverse network ─── - print("\n--- Scenario 1: Healthy Diverse Network (8 unique miners) ---") - - healthy_miners = [ - ("g4-powerbook-115", "g4", "10.1.1", 1000, 0.092, "cache_a", 0.45, "simd_a"), - ("dual-g4-125", "g4", "10.1.2", 1200, 0.088, "cache_b", 0.52, "simd_b"), - ("ppc-g5-130", "g5", "10.2.1", 1500, 0.105, "cache_c", 0.38, "simd_c"), - ("victus-x86", "modern", "192.168.0", 2000, 0.049, "cache_d", 0.61, "simd_d"), - ("sophia-nas", "modern", "192.168.1", 2300, 0.055, "cache_e", 0.58, "simd_e"), - ("mac-mini-m2", "apple_silicon", "10.3.1", 3000, 0.033, "cache_f", 0.42, "simd_f"), - ("power8-server", "power8", "10.4.1", 4000, 0.071, "cache_g", 0.55, "simd_g"), - ("ryan-factorio", "modern", "76.8.228", 5000, 0.044, "cache_h", 0.63, "simd_h"), - ] - - for m, arch, subnet, ts, cv, cache, thermal, simd in healthy_miners: - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (m, EPOCH, subnet_hash, ts, cv, cache, thermal, simd)) - - db.commit() - scores = compute_fleet_scores(db, EPOCH) - - print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") - print(f" {'─'*25} {'─'*12} {'─'*15}") - for m, arch, *_ in healthy_miners: - s = scores.get(m, 0.0) - status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" - print(f" {m:<25} {s:>12.4f} {status:<15}") - - # ─── Scenario 2: Fleet attack (10 modern boxes, same subnet) ─── - print("\n--- Scenario 2: Fleet Attack (10 modern boxes, same /24) ---") - - EPOCH2 = 101 - fleet_miners = [] - - # 3 legitimate miners - fleet_miners.append(("g4-real-1", "g4", "10.1.1", 1000, 0.092, "cache_real1", 0.45, "simd_real1")) - fleet_miners.append(("g5-real-1", "g5", "10.2.1", 1800, 0.105, "cache_real2", 0.38, "simd_real2")) - fleet_miners.append(("m2-real-1", "apple_silicon", "10.3.1", 2500, 0.033, "cache_real3", 0.42, "simd_real3")) - - # 10 fleet miners — same subnet, similar timing, similar fingerprints - for i in range(10): - fleet_miners.append(( - f"fleet-box-{i}", - "modern", - "203.0.113", # All same /24 subnet - 3000 + i * 5, # Attestation within 50s of each other - 0.048 + i * 0.001, # Nearly identical clock drift - "cache_fleet_shared", # SAME cache timing hash - 0.60 + i * 0.005, # Very similar thermal signatures - "simd_fleet_shared", # SAME SIMD hash - )) - - for m, arch, subnet, ts, cv, cache, thermal, simd in fleet_miners: - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (m, EPOCH2, subnet_hash, ts, cv, cache, thermal, simd)) - - db.commit() - scores2 = compute_fleet_scores(db, EPOCH2) - - print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") - print(f" {'─'*25} {'─'*12} {'─'*15}") - for m, arch, *_ in fleet_miners: - s = scores2.get(m, 0.0) - status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" - print(f" {m:<25} {s:>12.4f} {status:<15}") - - # ─── Scenario 3: Bucket pressure ─── - print("\n--- Scenario 3: Bucket Pressure (500 modern vs 3 vintage) ---") - - fleet_attack = [("g4-solo", "g4", 2.5), ("g5-solo", "g5", 2.0), ("g3-solo", "g3", 1.8)] - for i in range(500): - fleet_attack.append((f"modern-{i}", "modern", 1.0)) - - pressure = compute_bucket_pressure(fleet_attack, 200) - - print(f" {'Bucket':<20} {'Pressure':>10} {'Effect':<30}") - print(f" {'─'*20} {'─'*10} {'─'*30}") - for bucket, factor in sorted(pressure.items(), key=lambda x: x[1]): - if factor < 1.0: - effect = f"FLATTENED (each modern box worth {factor:.2f}x)" - elif factor > 1.0: - effect = f"BOOSTED (rare hardware bonus {factor:.2f}x)" - else: - effect = "neutral" - print(f" {bucket:<20} {factor:>10.4f} {effect:<30}") - - # ─── Scenario 4: Fleet decay on multipliers ─── - print("\n--- Scenario 4: Fleet Decay Examples ---") - - examples = [ - ("G4 (solo)", 2.5, 0.0), - ("G4 (mild fleet)", 2.5, 0.3), - ("G4 (strong fleet)", 2.5, 0.7), - ("G4 (confirmed fleet)", 2.5, 1.0), - ("Modern (solo)", 1.0, 0.0), - ("Modern (strong fleet)", 1.0, 0.7), - ("Modern (confirmed fleet)", 1.0, 1.0), - ] - - print(f" {'Miner Type':<25} {'Base':>6} {'Fleet':>7} {'Effective':>10} {'Decay':>8}") - print(f" {'─'*25} {'─'*6} {'─'*7} {'─'*10} {'─'*8}") - for name, base, score in examples: - eff = apply_fleet_decay(base, score) - decay_pct = (1.0 - eff/base) * 100 if base > 0 else 0 - print(f" {name:<25} {base:>6.2f} {score:>7.2f} {eff:>10.3f} {decay_pct:>7.1f}%") - - # ─── Combined effect ─── - print("\n--- Combined: 500 Modern Fleet vs 3 Vintage Solo ---") - print(" Without immune system:") - total_w_no_immune = 500 * 1.0 + 2.5 + 2.0 + 1.8 - g4_share = (2.5 / total_w_no_immune) * 1.5 - modern_total = (500 * 1.0 / total_w_no_immune) * 1.5 - modern_each = modern_total / 500 - print(f" G4 solo: {g4_share:.6f} RTC/epoch") - print(f" 500 modern fleet: {modern_total:.6f} RTC/epoch total ({modern_each:.8f} each)") - print(f" Fleet ROI: {modern_total/g4_share:.1f}x the G4 solo reward") - - print("\n With RIP-201 PRESSURE mode (soft):") - fleet_eff = apply_fleet_decay(1.0, 0.8) # ~0.68 - g4_eff = 2.5 # Solo, no decay - bucket_p_modern = compute_bucket_pressure( - [("g4", "g4", g4_eff), ("g5", "g5", 2.0), ("g3", "g3", 1.8)] + - [(f"m{i}", "modern", fleet_eff) for i in range(500)], - 999 - ) - modern_p = bucket_p_modern.get("modern", 1.0) - vintage_p = bucket_p_modern.get("vintage_powerpc", 1.0) - - g4_final = g4_eff * vintage_p - modern_final = fleet_eff * modern_p - total_w_immune = g4_final + 2.0 * vintage_p + 1.8 * vintage_p + 500 * modern_final - g4_share_immune = (g4_final / total_w_immune) * 1.5 - modern_total_immune = (500 * modern_final / total_w_immune) * 1.5 - modern_each_immune = modern_total_immune / 500 - - print(f" Fleet score: 0.80 → multiplier decay to {fleet_eff:.3f}") - print(f" Modern pressure: {modern_p:.4f} (bucket flattened)") - print(f" Vintage pressure: {vintage_p:.4f} (bucket boosted)") - print(f" G4 solo: {g4_share_immune:.6f} RTC/epoch") - print(f" 500 modern fleet: {modern_total_immune:.6f} RTC/epoch total ({modern_each_immune:.8f} each)") - print(f" Fleet ROI: {modern_total_immune/g4_share_immune:.1f}x the G4 solo reward") - - # ─── Equal Split mode (the real defense) ─── - print("\n With RIP-201 EQUAL SPLIT mode (RECOMMENDED):") - print(" Pot split: 1.5 RTC ÷ 2 active buckets = 0.75 RTC each") - - # In equal split: vintage_powerpc bucket gets 0.75 RTC, modern bucket gets 0.75 RTC - vintage_pot = 0.75 # RTC - modern_pot = 0.75 # RTC - - # Within vintage bucket: 3 miners split 0.75 by weight - vintage_total_w = 2.5 + 2.0 + 1.8 - g4_equal = (2.5 / vintage_total_w) * vintage_pot - g5_equal = (2.0 / vintage_total_w) * vintage_pot - g3_equal = (1.8 / vintage_total_w) * vintage_pot - - # Within modern bucket: 500 fleet miners split 0.75 by decayed weight - modern_each_equal = modern_pot / 500 # Equal weight within bucket (all modern) - - print(f" Vintage bucket (3 miners share 0.75 RTC):") - print(f" G4 solo: {g4_equal:.6f} RTC/epoch") - print(f" G5 solo: {g5_equal:.6f} RTC/epoch") - print(f" G3 solo: {g3_equal:.6f} RTC/epoch") - print(f" Modern bucket (500 fleet share 0.75 RTC):") - print(f" Each fleet box: {modern_each_equal:.8f} RTC/epoch") - print(f" Fleet ROI: {modern_pot/g4_equal:.1f}x the G4 solo reward (TOTAL fleet)") - print(f" Per-box ROI: {modern_each_equal/g4_equal:.4f}x (each fleet box vs G4)") - print(f" Fleet gets: {modern_pot/1.5*100:.0f}% of pot (was {modern_total/1.5*100:.0f}%)") - print(f" G4 earns: {g4_equal/g4_share:.0f}x more than without immune system") - - # ─── The economics ─── - print("\n === ECONOMIC IMPACT ===") - print(f" Without immune: 500 boxes earn {modern_total:.4f} RTC/epoch = {modern_total*365:.1f} RTC/year") - print(f" With equal split: 500 boxes earn {modern_pot:.4f} RTC/epoch = {modern_pot*365:.1f} RTC/year") - hardware_cost = 5_000_000 # $5M - rtc_value = 0.10 # $0.10/RTC - annual_no_immune = modern_total * 365 * rtc_value - annual_equal = modern_pot * 365 * rtc_value - years_to_roi_no = hardware_cost / annual_no_immune if annual_no_immune > 0 else float('inf') - years_to_roi_eq = hardware_cost / annual_equal if annual_equal > 0 else float('inf') - print(f" At $0.10/RTC, fleet annual revenue:") - print(f" No immune: ${annual_no_immune:,.2f}/year → ROI in {years_to_roi_no:,.0f} years") - print(f" Equal split: ${annual_equal:,.2f}/year → ROI in {years_to_roi_eq:,.0f} years") - print(f" A $5M hardware fleet NEVER pays for itself. Attack neutralized.") - - print("\n" + "=" * 60) - print("RIP-201 self-test complete.") - print("One of everything beats a hundred of one thing.") - print("=" * 60) +#!/usr/bin/env python3 +""" +RIP-201: Fleet Detection Immune System +======================================= + +Protects RustChain reward economics from fleet-scale attacks where a single +actor deploys many machines (real or emulated) to dominate the reward pool. + +Core Principles: + 1. Anti-homogeneity, not anti-modern — diversity IS the immune system + 2. Bucket normalization — rewards split by hardware CLASS, not per-CPU + 3. Fleet signal detection — IP clustering, timing correlation, fingerprint similarity + 4. Multiplier decay — suspected fleet members get diminishing returns + 5. Pressure feedback — overrepresented classes get flattened, rare ones get boosted + +Design Axiom: + "One of everything beats a hundred of one thing." + +Integration: + Called from calculate_epoch_rewards_time_aged() BEFORE distributing rewards. + Requires fleet_signals table populated by submit_attestation(). + +Author: Scott Boudreaux / Elyan Labs +Date: 2026-02-28 +""" + +import hashlib +import math +import sqlite3 +import time +from collections import defaultdict +from typing import Dict, List, Optional, Tuple + +# ═══════════════════════════════════════════════════════════ +# CONFIGURATION +# ═══════════════════════════════════════════════════════════ + +# Hardware class buckets — rewards split equally across these +HARDWARE_BUCKETS = { + "vintage_powerpc": ["g3", "g4", "g5", "powerpc", "powerpc g3", "powerpc g4", + "powerpc g5", "powerpc g3 (750)", "powerpc g4 (74xx)", + "powerpc g5 (970)", "power macintosh"], + "vintage_x86": ["pentium", "pentium4", "retro", "core2", "core2duo", + "nehalem", "sandybridge"], + "apple_silicon": ["apple_silicon", "m1", "m2", "m3"], + "modern": ["modern", "x86_64"], + "exotic": ["power8", "power9", "sparc", "mips", "riscv", "s390x"], + "arm": ["aarch64", "arm", "armv7", "armv7l"], + "retro_console": ["nes_6502", "snes_65c816", "n64_mips", "gba_arm7", + "genesis_68000", "sms_z80", "saturn_sh2", + "gameboy_z80", "gameboy_color_z80", "ps1_mips", + "6502", "65c816", "z80", "sh2"], +} + +# Reverse lookup: arch → bucket name +ARCH_TO_BUCKET = {} +for bucket, archs in HARDWARE_BUCKETS.items(): + for arch in archs: + ARCH_TO_BUCKET[arch] = bucket + +# Fleet detection thresholds +FLEET_SUBNET_THRESHOLD = 3 # 3+ miners from same /24 = signal +FLEET_TIMING_WINDOW_S = 30 # Attestations within 30s = correlated +FLEET_TIMING_THRESHOLD = 0.6 # 60%+ of attestations correlated = signal +FLEET_FINGERPRINT_THRESHOLD = 0.85 # Cosine similarity > 0.85 = signal + +# Fleet score → multiplier decay +# fleet_score 0.0 = solo miner (no decay) +# fleet_score 1.0 = definite fleet (max decay) +FLEET_DECAY_COEFF = 0.4 # Max 40% reduction at fleet_score=1.0 +FLEET_SCORE_FLOOR = 0.6 # Never decay below 60% of base multiplier + +# Bucket normalization mode +# "equal_split" = hard split: each active bucket gets equal share of pot (RECOMMENDED) +# "pressure" = soft: overrepresented buckets get flattened multiplier +BUCKET_MODE = "equal_split" + +# Bucket pressure parameters (used when BUCKET_MODE = "pressure") +BUCKET_IDEAL_SHARE = None # Auto-calculated as 1/num_active_buckets +BUCKET_PRESSURE_STRENGTH = 0.5 # How aggressively to flatten overrepresented buckets +BUCKET_MIN_WEIGHT = 0.3 # Minimum bucket weight (even if massively overrepresented) + +# Minimum miners to trigger fleet detection (below this, everyone is solo) +FLEET_DETECTION_MINIMUM = 4 + + +# ═══════════════════════════════════════════════════════════ +# DATABASE SCHEMA +# ═══════════════════════════════════════════════════════════ + +SCHEMA_SQL = """ +-- Fleet signal tracking per attestation +CREATE TABLE IF NOT EXISTS fleet_signals ( + miner TEXT NOT NULL, + epoch INTEGER NOT NULL, + subnet_hash TEXT, -- HMAC of /24 subnet for privacy + attest_ts INTEGER NOT NULL, -- Exact attestation timestamp + clock_drift_cv REAL, -- Clock drift coefficient of variation + cache_latency_hash TEXT, -- Hash of cache timing profile + thermal_signature REAL, -- Thermal drift entropy value + simd_bias_hash TEXT, -- Hash of SIMD timing profile + PRIMARY KEY (miner, epoch) +); + +-- Fleet detection results per epoch +CREATE TABLE IF NOT EXISTS fleet_scores ( + miner TEXT NOT NULL, + epoch INTEGER NOT NULL, + fleet_score REAL NOT NULL DEFAULT 0.0, -- 0.0=solo, 1.0=definite fleet + ip_signal REAL DEFAULT 0.0, + timing_signal REAL DEFAULT 0.0, + fingerprint_signal REAL DEFAULT 0.0, + cluster_id TEXT, -- Fleet cluster identifier + effective_multiplier REAL, -- After decay + PRIMARY KEY (miner, epoch) +); + +-- Bucket pressure tracking per epoch +CREATE TABLE IF NOT EXISTS bucket_pressure ( + epoch INTEGER NOT NULL, + bucket TEXT NOT NULL, + miner_count INTEGER NOT NULL, + raw_weight REAL NOT NULL, + pressure_factor REAL NOT NULL, -- <1.0 = overrepresented, >1.0 = rare + adjusted_weight REAL NOT NULL, + PRIMARY KEY (epoch, bucket) +); + +-- Fleet cluster registry +CREATE TABLE IF NOT EXISTS fleet_clusters ( + cluster_id TEXT PRIMARY KEY, + first_seen_epoch INTEGER NOT NULL, + last_seen_epoch INTEGER NOT NULL, + member_count INTEGER NOT NULL, + detection_signals TEXT, -- JSON: which signals triggered + cumulative_score REAL DEFAULT 0.0 +); +""" + + +def ensure_schema(db: sqlite3.Connection): + """Create fleet immune system tables if they don't exist.""" + db.executescript(SCHEMA_SQL) + db.commit() + + +# ═══════════════════════════════════════════════════════════ +# SIGNAL COLLECTION (called from submit_attestation) +# ═══════════════════════════════════════════════════════════ + +def record_fleet_signals_from_request( + db: sqlite3.Connection, + miner: str, + epoch: int, + ip_address: str, + attest_ts: int, + fingerprint: Optional[dict] = None +): + """ + Record fleet detection signals from an attestation submission. + + Called from submit_attestation() after validation passes. + Stores privacy-preserving hashes of network and fingerprint data. + """ + ensure_schema(db) + + # Hash the /24 subnet for privacy-preserving network clustering + if ip_address: + parts = ip_address.split('.') + if len(parts) == 4: + subnet = '.'.join(parts[:3]) + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + else: + subnet_hash = hashlib.sha256(ip_address.encode()).hexdigest()[:16] + else: + subnet_hash = None + + # Extract fingerprint signals + clock_drift_cv = None + cache_hash = None + thermal_sig = None + simd_hash = None + + if fingerprint and isinstance(fingerprint, dict): + checks = fingerprint.get("checks", {}) + + # Clock drift coefficient of variation + clock = checks.get("clock_drift", {}).get("data", {}) + clock_drift_cv = clock.get("cv") + + # Cache timing profile hash (privacy-preserving) + cache = checks.get("cache_timing", {}).get("data", {}) + if cache: + cache_str = str(sorted(cache.items())) + cache_hash = hashlib.sha256(cache_str.encode()).hexdigest()[:16] + + # Thermal drift entropy + thermal = checks.get("thermal_drift", {}).get("data", {}) + thermal_sig = thermal.get("entropy", thermal.get("drift_magnitude")) + + # SIMD bias profile hash + simd = checks.get("simd_identity", {}).get("data", {}) + if simd: + simd_str = str(sorted(simd.items())) + simd_hash = hashlib.sha256(simd_str.encode()).hexdigest()[:16] + + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_hash, thermal_sig, simd_hash)) + db.commit() + + +def record_fleet_signals(db_path_or_conn, miner: str, device: dict, + signals: dict, fingerprint: Optional[dict], + attest_ts: int, ip_address: str = None, + epoch: int = None): + """ + Convenience wrapper called from record_attestation_success(). + + Accepts either a DB path (str) or connection, and extracts + the IP from signals if not provided explicitly. + """ + import time as _time + + if isinstance(db_path_or_conn, str): + db = sqlite3.connect(db_path_or_conn) + own = True + else: + db = db_path_or_conn + own = False + + try: + # Get epoch from current time if not provided + if epoch is None: + GENESIS = 1764706927 + BLOCK_TIME = 600 + slot = (int(_time.time()) - GENESIS) // BLOCK_TIME + epoch = slot // 144 + + # Extract IP from signals or request + if not ip_address: + ip_address = signals.get("ip", signals.get("remote_addr", "")) + + record_fleet_signals_from_request(db, miner, epoch, ip_address, + attest_ts, fingerprint) + except Exception as e: + print(f"[RIP-201] Fleet signal recording error: {e}") + finally: + if own: + db.close() + + +# ═══════════════════════════════════════════════════════════ +# FLEET DETECTION ENGINE +# ═══════════════════════════════════════════════════════════ + +def _detect_ip_clustering( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners sharing the same /24 subnet. + + Returns: {miner_id: ip_signal} where ip_signal = 0.0-1.0 + """ + scores = {} + + # Group by subnet hash + subnet_groups = defaultdict(list) + for sig in signals: + if sig["subnet_hash"]: + subnet_groups[sig["subnet_hash"]].append(sig["miner"]) + + # Miners in large subnet groups get higher fleet signal + for subnet, miners in subnet_groups.items(): + count = len(miners) + if count >= FLEET_SUBNET_THRESHOLD: + # Signal scales with cluster size: 3→0.3, 5→0.5, 10→0.8, 20+→1.0 + signal = min(1.0, count / 20.0 + 0.15) + for m in miners: + scores[m] = max(scores.get(m, 0.0), signal) + + # Solo miners or small groups: 0.0 + for sig in signals: + if sig["miner"] not in scores: + scores[sig["miner"]] = 0.0 + + return scores + + +def _detect_timing_correlation( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners whose attestation timestamps are suspiciously synchronized. + + Fleet operators often update all miners in rapid succession. + Real independent operators attest at random times throughout the day. + """ + scores = {} + if len(signals) < FLEET_DETECTION_MINIMUM: + return {s["miner"]: 0.0 for s in signals} + + timestamps = [(s["miner"], s["attest_ts"]) for s in signals] + timestamps.sort(key=lambda x: x[1]) + + # For each miner, count how many others attested within TIMING_WINDOW + for i, (miner_a, ts_a) in enumerate(timestamps): + correlated = 0 + total_others = len(timestamps) - 1 + for j, (miner_b, ts_b) in enumerate(timestamps): + if i == j: + continue + if abs(ts_a - ts_b) <= FLEET_TIMING_WINDOW_S: + correlated += 1 + + if total_others > 0: + ratio = correlated / total_others + if ratio >= FLEET_TIMING_THRESHOLD: + # High correlation → fleet signal + scores[miner_a] = min(1.0, ratio) + else: + scores[miner_a] = 0.0 + else: + scores[miner_a] = 0.0 + + return scores + + +def _detect_fingerprint_similarity( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners with suspiciously similar hardware fingerprints. + + Identical cache timing profiles, SIMD bias, or thermal signatures + across different "machines" indicate shared hardware or VMs on same host. + """ + scores = {} + if len(signals) < FLEET_DETECTION_MINIMUM: + return {s["miner"]: 0.0 for s in signals} + + # Build similarity groups from hash matches + # Miners sharing 2+ fingerprint hashes are likely same hardware + for i, sig_a in enumerate(signals): + matches = 0 + match_count = 0 + + for j, sig_b in enumerate(signals): + if i == j: + continue + + shared_hashes = 0 + total_hashes = 0 + + # Compare cache timing hash + if sig_a.get("cache_latency_hash") and sig_b.get("cache_latency_hash"): + total_hashes += 1 + if sig_a["cache_latency_hash"] == sig_b["cache_latency_hash"]: + shared_hashes += 1 + + # Compare SIMD bias hash + if sig_a.get("simd_bias_hash") and sig_b.get("simd_bias_hash"): + total_hashes += 1 + if sig_a["simd_bias_hash"] == sig_b["simd_bias_hash"]: + shared_hashes += 1 + + # Compare clock drift CV (within 5% = suspiciously similar) + if sig_a.get("clock_drift_cv") and sig_b.get("clock_drift_cv"): + total_hashes += 1 + cv_a, cv_b = sig_a["clock_drift_cv"], sig_b["clock_drift_cv"] + if cv_b > 0 and abs(cv_a - cv_b) / cv_b < 0.05: + shared_hashes += 1 + + # Compare thermal signature (within 10%) + if sig_a.get("thermal_signature") and sig_b.get("thermal_signature"): + total_hashes += 1 + th_a, th_b = sig_a["thermal_signature"], sig_b["thermal_signature"] + if th_b > 0 and abs(th_a - th_b) / th_b < 0.10: + shared_hashes += 1 + + if total_hashes >= 2 and shared_hashes >= 2: + matches += 1 + + # Signal based on how many OTHER miners look like this one + if matches > 0: + # 1 match → 0.3, 2 → 0.5, 5+ → 0.8+ + scores[sig_a["miner"]] = min(1.0, 0.2 + matches * 0.15) + else: + scores[sig_a["miner"]] = 0.0 + + return scores + + +def compute_fleet_scores( + db: sqlite3.Connection, + epoch: int +) -> Dict[str, float]: + """ + Run all fleet detection algorithms and produce composite fleet scores. + + Returns: {miner_id: fleet_score} where 0.0=solo, 1.0=definite fleet + """ + ensure_schema(db) + + # Fetch signals for this epoch + rows = db.execute(""" + SELECT miner, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash + FROM fleet_signals + WHERE epoch = ? + """, (epoch,)).fetchall() + + if not rows or len(rows) < FLEET_DETECTION_MINIMUM: + # Not enough miners to detect fleets — everyone is solo + return {row[0]: 0.0 for row in rows} + + signals = [] + for row in rows: + signals.append({ + "miner": row[0], + "subnet_hash": row[1], + "attest_ts": row[2], + "clock_drift_cv": row[3], + "cache_latency_hash": row[4], + "thermal_signature": row[5], + "simd_bias_hash": row[6], + }) + + # Run detection algorithms + ip_scores = _detect_ip_clustering(signals) + timing_scores = _detect_timing_correlation(signals) + fingerprint_scores = _detect_fingerprint_similarity(signals) + + # Composite score: weighted average of signals + # IP clustering is strongest signal (hard to fake different subnets) + # Fingerprint similarity is second (hardware-level evidence) + # Timing correlation is supplementary (could be coincidental) + composite = {} + for sig in signals: + m = sig["miner"] + ip = ip_scores.get(m, 0.0) + timing = timing_scores.get(m, 0.0) + fp = fingerprint_scores.get(m, 0.0) + + # Weighted composite: IP 40%, fingerprint 40%, timing 20% + score = (ip * 0.4) + (fp * 0.4) + (timing * 0.2) + + # Boost: if ANY two signals fire, amplify + fired = sum(1 for s in [ip, fp, timing] if s > 0.3) + if fired >= 2: + score = min(1.0, score * 1.3) + + composite[m] = round(score, 4) + + # Record to DB for audit trail + db.execute(""" + INSERT OR REPLACE INTO fleet_scores + (miner, epoch, fleet_score, ip_signal, timing_signal, + fingerprint_signal) + VALUES (?, ?, ?, ?, ?, ?) + """, (m, epoch, composite[m], ip, timing, fp)) + + db.commit() + return composite + + +# ═══════════════════════════════════════════════════════════ +# BUCKET NORMALIZATION +# ═══════════════════════════════════════════════════════════ + +def classify_miner_bucket(device_arch: str) -> str: + """Map a device architecture to its hardware bucket.""" + return ARCH_TO_BUCKET.get(device_arch.lower(), "modern") + + +def compute_bucket_pressure( + miners: List[Tuple[str, str, float]], + epoch: int, + db: Optional[sqlite3.Connection] = None +) -> Dict[str, float]: + """ + Compute pressure factors for each hardware bucket. + + If a bucket is overrepresented (more miners than its fair share), + its pressure factor drops below 1.0 — reducing rewards for that class. + Underrepresented buckets get boosted above 1.0. + + Args: + miners: List of (miner_id, device_arch, base_weight) tuples + epoch: Current epoch number + db: Optional DB connection for recording + + Returns: + {bucket_name: pressure_factor} + """ + # Count miners and total weight per bucket + bucket_counts = defaultdict(int) + bucket_weights = defaultdict(float) + bucket_miners = defaultdict(list) + + for miner_id, arch, weight in miners: + bucket = classify_miner_bucket(arch) + bucket_counts[bucket] += 1 + bucket_weights[bucket] += weight + bucket_miners[bucket].append(miner_id) + + active_buckets = [b for b in bucket_counts if bucket_counts[b] > 0] + num_active = len(active_buckets) + + if num_active == 0: + return {} + + # Ideal: equal miner count per bucket + total_miners = sum(bucket_counts.values()) + ideal_per_bucket = total_miners / num_active + + pressure = {} + for bucket in active_buckets: + count = bucket_counts[bucket] + ratio = count / ideal_per_bucket # >1 = overrepresented, <1 = rare + + if ratio > 1.0: + # Overrepresented: apply diminishing returns + # ratio 2.0 → pressure ~0.7, ratio 5.0 → pressure ~0.45 + factor = 1.0 / (1.0 + BUCKET_PRESSURE_STRENGTH * (ratio - 1.0)) + factor = max(BUCKET_MIN_WEIGHT, factor) + else: + # Underrepresented: boost (up to 1.5x) + factor = 1.0 + (1.0 - ratio) * 0.5 + factor = min(1.5, factor) + + pressure[bucket] = round(factor, 4) + + # Record to DB + if db: + try: + db.execute(""" + INSERT OR REPLACE INTO bucket_pressure + (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) + VALUES (?, ?, ?, ?, ?, ?) + """, (epoch, bucket, count, bucket_weights[bucket], + factor, bucket_weights[bucket] * factor)) + except Exception: + pass # Non-critical recording + + if db: + try: + db.commit() + except Exception: + pass + + return pressure + + +# ═══════════════════════════════════════════════════════════ +# IMMUNE-ADJUSTED REWARD CALCULATION +# ═══════════════════════════════════════════════════════════ + +def apply_fleet_decay( + base_multiplier: float, + fleet_score: float +) -> float: + """ + Apply fleet detection decay to a miner's base multiplier. + + fleet_score 0.0 → no decay (solo miner) + fleet_score 1.0 → maximum decay (confirmed fleet) + + Formula: effective = base × (1.0 - fleet_score × DECAY_COEFF) + Floor: Never below FLEET_SCORE_FLOOR × base + + Examples (base=2.5 G4): + fleet_score=0.0 → 2.5 (solo miner, full bonus) + fleet_score=0.3 → 2.2 (some fleet signals) + fleet_score=0.7 → 1.8 (strong fleet signals) + fleet_score=1.0 → 1.5 (confirmed fleet, 40% decay) + """ + decay = fleet_score * FLEET_DECAY_COEFF + effective = base_multiplier * (1.0 - decay) + floor = base_multiplier * FLEET_SCORE_FLOOR + return max(floor, effective) + + +def calculate_immune_rewards_equal_split( + db: sqlite3.Connection, + epoch: int, + miners: List[Tuple[str, str]], + chain_age_years: float, + total_reward_urtc: int +) -> Dict[str, int]: + """ + Calculate rewards using equal bucket split (RECOMMENDED mode). + + The pot is divided EQUALLY among active hardware buckets. + Within each bucket, miners share their slice by time-aged weight. + Fleet members get decayed multipliers WITHIN their bucket. + + This is the nuclear option against fleet attacks: + - 500 modern boxes share 1/N of the pot (where N = active buckets) + - 1 solo G4 gets 1/N of the pot all to itself + - The fleet operator's $5M in hardware earns the same TOTAL as one G4 + + Args: + db: Database connection + epoch: Epoch being settled + miners: List of (miner_id, device_arch) tuples + chain_age_years: Chain age for time-aging + total_reward_urtc: Total uRTC to distribute + + Returns: + {miner_id: reward_urtc} + """ + from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier + + if not miners: + return {} + + # Step 1: Fleet detection + fleet_scores = compute_fleet_scores(db, epoch) + + # Step 2: Classify miners into buckets with fleet-decayed weights + buckets = defaultdict(list) # bucket → [(miner_id, decayed_weight)] + + for miner_id, arch in miners: + base = get_time_aged_multiplier(arch, chain_age_years) + fleet_score = fleet_scores.get(miner_id, 0.0) + effective = apply_fleet_decay(base, fleet_score) + bucket = classify_miner_bucket(arch) + buckets[bucket].append((miner_id, effective)) + + # Record + db.execute(""" + UPDATE fleet_scores SET effective_multiplier = ? + WHERE miner = ? AND epoch = ? + """, (effective, miner_id, epoch)) + + # Step 3: Split pot equally among active buckets + active_buckets = {b: members for b, members in buckets.items() if members} + num_buckets = len(active_buckets) + + if num_buckets == 0: + return {} + + pot_per_bucket = total_reward_urtc // num_buckets + remainder = total_reward_urtc - (pot_per_bucket * num_buckets) + + # Step 4: Distribute within each bucket by weight + rewards = {} + bucket_index = 0 + + for bucket, members in active_buckets.items(): + # Last bucket gets remainder (rounding dust) + bucket_pot = pot_per_bucket + (remainder if bucket_index == num_buckets - 1 else 0) + + total_weight = sum(w for _, w in members) + if total_weight <= 0: + # Edge case: all weights zero (shouldn't happen) + per_miner = bucket_pot // len(members) + for miner_id, _ in members: + rewards[miner_id] = per_miner + else: + remaining = bucket_pot + for i, (miner_id, weight) in enumerate(members): + if i == len(members) - 1: + share = remaining + else: + share = int((weight / total_weight) * bucket_pot) + remaining -= share + rewards[miner_id] = share + + # Record bucket pressure data + try: + db.execute(""" + INSERT OR REPLACE INTO bucket_pressure + (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) + VALUES (?, ?, ?, ?, ?, ?) + """, (epoch, bucket, len(members), total_weight, + 1.0 / num_buckets, bucket_pot / total_reward_urtc if total_reward_urtc > 0 else 0)) + except Exception: + pass + + bucket_index += 1 + + db.commit() + return rewards + + +def calculate_immune_weights( + db: sqlite3.Connection, + epoch: int, + miners: List[Tuple[str, str]], + chain_age_years: float, + total_reward_urtc: int = 0 +) -> Dict[str, float]: + """ + Calculate immune-system-adjusted weights for epoch reward distribution. + + Main entry point. Dispatches to equal_split or pressure mode based on config. + + When BUCKET_MODE = "equal_split" and total_reward_urtc is provided, + returns {miner_id: reward_urtc} (integer rewards, ready to credit). + + When BUCKET_MODE = "pressure", returns {miner_id: adjusted_weight} + (float weights for pro-rata distribution by caller). + + Args: + db: Database connection + epoch: Epoch being settled + miners: List of (miner_id, device_arch) tuples + chain_age_years: Chain age for time-aging calculation + total_reward_urtc: Total reward in uRTC (required for equal_split mode) + + Returns: + {miner_id: value} — either reward_urtc (int) or weight (float) + """ + if BUCKET_MODE == "equal_split" and total_reward_urtc > 0: + return calculate_immune_rewards_equal_split( + db, epoch, miners, chain_age_years, total_reward_urtc + ) + + # Fallback: pressure mode (original behavior) + from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier + + if not miners: + return {} + + # Step 1: Base time-aged multipliers + base_weights = [] + for miner_id, arch in miners: + base = get_time_aged_multiplier(arch, chain_age_years) + base_weights.append((miner_id, arch, base)) + + # Step 2: Fleet detection + fleet_scores = compute_fleet_scores(db, epoch) + + # Step 3: Apply fleet decay + decayed_weights = [] + for miner_id, arch, base in base_weights: + score = fleet_scores.get(miner_id, 0.0) + effective = apply_fleet_decay(base, score) + decayed_weights.append((miner_id, arch, effective)) + + db.execute(""" + UPDATE fleet_scores SET effective_multiplier = ? + WHERE miner = ? AND epoch = ? + """, (effective, miner_id, epoch)) + + # Step 4: Bucket pressure normalization + pressure = compute_bucket_pressure(decayed_weights, epoch, db) + + # Step 5: Apply pressure to get final weights + final_weights = {} + for miner_id, arch, weight in decayed_weights: + bucket = classify_miner_bucket(arch) + bucket_factor = pressure.get(bucket, 1.0) + final_weights[miner_id] = weight * bucket_factor + + db.commit() + return final_weights + + +# ═══════════════════════════════════════════════════════════ +# ADMIN / DIAGNOSTIC ENDPOINTS +# ═══════════════════════════════════════════════════════════ + +def get_fleet_report(db: sqlite3.Connection, epoch: int) -> dict: + """Generate a human-readable fleet detection report for an epoch.""" + ensure_schema(db) + + scores = db.execute(""" + SELECT miner, fleet_score, ip_signal, timing_signal, + fingerprint_signal, effective_multiplier + FROM fleet_scores WHERE epoch = ? + ORDER BY fleet_score DESC + """, (epoch,)).fetchall() + + pressure = db.execute(""" + SELECT bucket, miner_count, pressure_factor, raw_weight, adjusted_weight + FROM bucket_pressure WHERE epoch = ? + """, (epoch,)).fetchall() + + flagged = [s for s in scores if s[1] > 0.3] + + return { + "epoch": epoch, + "total_miners": len(scores), + "flagged_miners": len(flagged), + "fleet_scores": [ + { + "miner": s[0], + "fleet_score": s[1], + "signals": { + "ip_clustering": s[2], + "timing_correlation": s[3], + "fingerprint_similarity": s[4] + }, + "effective_multiplier": s[5] + } + for s in scores + ], + "bucket_pressure": [ + { + "bucket": p[0], + "miner_count": p[1], + "pressure_factor": p[2], + "raw_weight": p[3], + "adjusted_weight": p[4] + } + for p in pressure + ] + } + + +def register_fleet_endpoints(app, DB_PATH): + """Register Flask endpoints for fleet immune system admin.""" + from flask import request, jsonify + + @app.route('/admin/fleet/report', methods=['GET']) + def fleet_report(): + admin_key = request.headers.get("X-Admin-Key", "") + import os + if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): + return jsonify({"error": "Unauthorized"}), 401 + + epoch = request.args.get('epoch', type=int) + if epoch is None: + from rewards_implementation_rip200 import current_slot, slot_to_epoch + epoch = slot_to_epoch(current_slot()) - 1 + + with sqlite3.connect(DB_PATH) as db: + report = get_fleet_report(db, epoch) + return jsonify(report) + + @app.route('/admin/fleet/scores', methods=['GET']) + def fleet_scores(): + admin_key = request.headers.get("X-Admin-Key", "") + import os + if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): + return jsonify({"error": "Unauthorized"}), 401 + + miner = request.args.get('miner') + limit = request.args.get('limit', 10, type=int) + + with sqlite3.connect(DB_PATH) as db: + if miner: + rows = db.execute(""" + SELECT epoch, fleet_score, ip_signal, timing_signal, + fingerprint_signal, effective_multiplier + FROM fleet_scores WHERE miner = ? + ORDER BY epoch DESC LIMIT ? + """, (miner, limit)).fetchall() + else: + rows = db.execute(""" + SELECT miner, epoch, fleet_score, ip_signal, + timing_signal, fingerprint_signal + FROM fleet_scores + WHERE fleet_score > 0.3 + ORDER BY fleet_score DESC LIMIT ? + """, (limit,)).fetchall() + + return jsonify({"scores": [dict(zip( + ["miner", "epoch", "fleet_score", "ip_signal", + "timing_signal", "fingerprint_signal"], r + )) for r in rows]}) + + print("[RIP-201] Fleet immune system endpoints registered") + + +# ═══════════════════════════════════════════════════════════ +# SELF-TEST +# ═══════════════════════════════════════════════════════════ + +if __name__ == "__main__": + print("=" * 60) + print("RIP-201: Fleet Detection Immune System — Self Test") + print("=" * 60) + + # Create in-memory DB + db = sqlite3.connect(":memory:") + ensure_schema(db) + + # Also need miner_attest_recent for the full pipeline + db.execute(""" + CREATE TABLE IF NOT EXISTS miner_attest_recent ( + miner TEXT PRIMARY KEY, + ts_ok INTEGER NOT NULL, + device_family TEXT, + device_arch TEXT, + entropy_score REAL DEFAULT 0.0, + fingerprint_passed INTEGER DEFAULT 0 + ) + """) + + EPOCH = 100 + + # ─── Scenario 1: Healthy diverse network ─── + print("\n--- Scenario 1: Healthy Diverse Network (8 unique miners) ---") + + healthy_miners = [ + ("g4-powerbook-115", "g4", "10.1.1", 1000, 0.092, "cache_a", 0.45, "simd_a"), + ("dual-g4-125", "g4", "10.1.2", 1200, 0.088, "cache_b", 0.52, "simd_b"), + ("ppc-g5-130", "g5", "10.2.1", 1500, 0.105, "cache_c", 0.38, "simd_c"), + ("victus-x86", "modern", "192.168.0", 2000, 0.049, "cache_d", 0.61, "simd_d"), + ("sophia-nas", "modern", "192.168.1", 2300, 0.055, "cache_e", 0.58, "simd_e"), + ("mac-mini-m2", "apple_silicon", "10.3.1", 3000, 0.033, "cache_f", 0.42, "simd_f"), + ("power8-server", "power8", "10.4.1", 4000, 0.071, "cache_g", 0.55, "simd_g"), + ("ryan-factorio", "modern", "76.8.228", 5000, 0.044, "cache_h", 0.63, "simd_h"), + ] + + for m, arch, subnet, ts, cv, cache, thermal, simd in healthy_miners: + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (m, EPOCH, subnet_hash, ts, cv, cache, thermal, simd)) + + db.commit() + scores = compute_fleet_scores(db, EPOCH) + + print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") + print(f" {'─'*25} {'─'*12} {'─'*15}") + for m, arch, *_ in healthy_miners: + s = scores.get(m, 0.0) + status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" + print(f" {m:<25} {s:>12.4f} {status:<15}") + + # ─── Scenario 2: Fleet attack (10 modern boxes, same subnet) ─── + print("\n--- Scenario 2: Fleet Attack (10 modern boxes, same /24) ---") + + EPOCH2 = 101 + fleet_miners = [] + + # 3 legitimate miners + fleet_miners.append(("g4-real-1", "g4", "10.1.1", 1000, 0.092, "cache_real1", 0.45, "simd_real1")) + fleet_miners.append(("g5-real-1", "g5", "10.2.1", 1800, 0.105, "cache_real2", 0.38, "simd_real2")) + fleet_miners.append(("m2-real-1", "apple_silicon", "10.3.1", 2500, 0.033, "cache_real3", 0.42, "simd_real3")) + + # 10 fleet miners — same subnet, similar timing, similar fingerprints + for i in range(10): + fleet_miners.append(( + f"fleet-box-{i}", + "modern", + "203.0.113", # All same /24 subnet + 3000 + i * 5, # Attestation within 50s of each other + 0.048 + i * 0.001, # Nearly identical clock drift + "cache_fleet_shared", # SAME cache timing hash + 0.60 + i * 0.005, # Very similar thermal signatures + "simd_fleet_shared", # SAME SIMD hash + )) + + for m, arch, subnet, ts, cv, cache, thermal, simd in fleet_miners: + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (m, EPOCH2, subnet_hash, ts, cv, cache, thermal, simd)) + + db.commit() + scores2 = compute_fleet_scores(db, EPOCH2) + + print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") + print(f" {'─'*25} {'─'*12} {'─'*15}") + for m, arch, *_ in fleet_miners: + s = scores2.get(m, 0.0) + status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" + print(f" {m:<25} {s:>12.4f} {status:<15}") + + # ─── Scenario 3: Bucket pressure ─── + print("\n--- Scenario 3: Bucket Pressure (500 modern vs 3 vintage) ---") + + fleet_attack = [("g4-solo", "g4", 2.5), ("g5-solo", "g5", 2.0), ("g3-solo", "g3", 1.8)] + for i in range(500): + fleet_attack.append((f"modern-{i}", "modern", 1.0)) + + pressure = compute_bucket_pressure(fleet_attack, 200) + + print(f" {'Bucket':<20} {'Pressure':>10} {'Effect':<30}") + print(f" {'─'*20} {'─'*10} {'─'*30}") + for bucket, factor in sorted(pressure.items(), key=lambda x: x[1]): + if factor < 1.0: + effect = f"FLATTENED (each modern box worth {factor:.2f}x)" + elif factor > 1.0: + effect = f"BOOSTED (rare hardware bonus {factor:.2f}x)" + else: + effect = "neutral" + print(f" {bucket:<20} {factor:>10.4f} {effect:<30}") + + # ─── Scenario 4: Fleet decay on multipliers ─── + print("\n--- Scenario 4: Fleet Decay Examples ---") + + examples = [ + ("G4 (solo)", 2.5, 0.0), + ("G4 (mild fleet)", 2.5, 0.3), + ("G4 (strong fleet)", 2.5, 0.7), + ("G4 (confirmed fleet)", 2.5, 1.0), + ("Modern (solo)", 1.0, 0.0), + ("Modern (strong fleet)", 1.0, 0.7), + ("Modern (confirmed fleet)", 1.0, 1.0), + ] + + print(f" {'Miner Type':<25} {'Base':>6} {'Fleet':>7} {'Effective':>10} {'Decay':>8}") + print(f" {'─'*25} {'─'*6} {'─'*7} {'─'*10} {'─'*8}") + for name, base, score in examples: + eff = apply_fleet_decay(base, score) + decay_pct = (1.0 - eff/base) * 100 if base > 0 else 0 + print(f" {name:<25} {base:>6.2f} {score:>7.2f} {eff:>10.3f} {decay_pct:>7.1f}%") + + # ─── Combined effect ─── + print("\n--- Combined: 500 Modern Fleet vs 3 Vintage Solo ---") + print(" Without immune system:") + total_w_no_immune = 500 * 1.0 + 2.5 + 2.0 + 1.8 + g4_share = (2.5 / total_w_no_immune) * 1.5 + modern_total = (500 * 1.0 / total_w_no_immune) * 1.5 + modern_each = modern_total / 500 + print(f" G4 solo: {g4_share:.6f} RTC/epoch") + print(f" 500 modern fleet: {modern_total:.6f} RTC/epoch total ({modern_each:.8f} each)") + print(f" Fleet ROI: {modern_total/g4_share:.1f}x the G4 solo reward") + + print("\n With RIP-201 PRESSURE mode (soft):") + fleet_eff = apply_fleet_decay(1.0, 0.8) # ~0.68 + g4_eff = 2.5 # Solo, no decay + bucket_p_modern = compute_bucket_pressure( + [("g4", "g4", g4_eff), ("g5", "g5", 2.0), ("g3", "g3", 1.8)] + + [(f"m{i}", "modern", fleet_eff) for i in range(500)], + 999 + ) + modern_p = bucket_p_modern.get("modern", 1.0) + vintage_p = bucket_p_modern.get("vintage_powerpc", 1.0) + + g4_final = g4_eff * vintage_p + modern_final = fleet_eff * modern_p + total_w_immune = g4_final + 2.0 * vintage_p + 1.8 * vintage_p + 500 * modern_final + g4_share_immune = (g4_final / total_w_immune) * 1.5 + modern_total_immune = (500 * modern_final / total_w_immune) * 1.5 + modern_each_immune = modern_total_immune / 500 + + print(f" Fleet score: 0.80 → multiplier decay to {fleet_eff:.3f}") + print(f" Modern pressure: {modern_p:.4f} (bucket flattened)") + print(f" Vintage pressure: {vintage_p:.4f} (bucket boosted)") + print(f" G4 solo: {g4_share_immune:.6f} RTC/epoch") + print(f" 500 modern fleet: {modern_total_immune:.6f} RTC/epoch total ({modern_each_immune:.8f} each)") + print(f" Fleet ROI: {modern_total_immune/g4_share_immune:.1f}x the G4 solo reward") + + # ─── Equal Split mode (the real defense) ─── + print("\n With RIP-201 EQUAL SPLIT mode (RECOMMENDED):") + print(" Pot split: 1.5 RTC ÷ 2 active buckets = 0.75 RTC each") + + # In equal split: vintage_powerpc bucket gets 0.75 RTC, modern bucket gets 0.75 RTC + vintage_pot = 0.75 # RTC + modern_pot = 0.75 # RTC + + # Within vintage bucket: 3 miners split 0.75 by weight + vintage_total_w = 2.5 + 2.0 + 1.8 + g4_equal = (2.5 / vintage_total_w) * vintage_pot + g5_equal = (2.0 / vintage_total_w) * vintage_pot + g3_equal = (1.8 / vintage_total_w) * vintage_pot + + # Within modern bucket: 500 fleet miners split 0.75 by decayed weight + modern_each_equal = modern_pot / 500 # Equal weight within bucket (all modern) + + print(f" Vintage bucket (3 miners share 0.75 RTC):") + print(f" G4 solo: {g4_equal:.6f} RTC/epoch") + print(f" G5 solo: {g5_equal:.6f} RTC/epoch") + print(f" G3 solo: {g3_equal:.6f} RTC/epoch") + print(f" Modern bucket (500 fleet share 0.75 RTC):") + print(f" Each fleet box: {modern_each_equal:.8f} RTC/epoch") + print(f" Fleet ROI: {modern_pot/g4_equal:.1f}x the G4 solo reward (TOTAL fleet)") + print(f" Per-box ROI: {modern_each_equal/g4_equal:.4f}x (each fleet box vs G4)") + print(f" Fleet gets: {modern_pot/1.5*100:.0f}% of pot (was {modern_total/1.5*100:.0f}%)") + print(f" G4 earns: {g4_equal/g4_share:.0f}x more than without immune system") + + # ─── The economics ─── + print("\n === ECONOMIC IMPACT ===") + print(f" Without immune: 500 boxes earn {modern_total:.4f} RTC/epoch = {modern_total*365:.1f} RTC/year") + print(f" With equal split: 500 boxes earn {modern_pot:.4f} RTC/epoch = {modern_pot*365:.1f} RTC/year") + hardware_cost = 5_000_000 # $5M + rtc_value = 0.10 # $0.10/RTC + annual_no_immune = modern_total * 365 * rtc_value + annual_equal = modern_pot * 365 * rtc_value + years_to_roi_no = hardware_cost / annual_no_immune if annual_no_immune > 0 else float('inf') + years_to_roi_eq = hardware_cost / annual_equal if annual_equal > 0 else float('inf') + print(f" At $0.10/RTC, fleet annual revenue:") + print(f" No immune: ${annual_no_immune:,.2f}/year → ROI in {years_to_roi_no:,.0f} years") + print(f" Equal split: ${annual_equal:,.2f}/year → ROI in {years_to_roi_eq:,.0f} years") + print(f" A $5M hardware fleet NEVER pays for itself. Attack neutralized.") + + print("\n" + "=" * 60) + print("RIP-201 self-test complete.") + print("One of everything beats a hundred of one thing.") + print("=" * 60) diff --git a/rips/python/rustchain/governance.py b/rips/python/rustchain/governance.py index 3bb23d6b..e0dba3d2 100644 --- a/rips/python/rustchain/governance.py +++ b/rips/python/rustchain/governance.py @@ -1,571 +1,571 @@ -""" -RustChain Governance (RIP-0002, RIP-0005, RIP-0006) -=================================================== - -Hybrid human + Sophia AI governance system. - -Features: -- Proposal creation and voting -- Sophia AI evaluation (Endorse/Veto/Analyze) -- Token-weighted and reputation-weighted voting -- Smart contract binding layer -- Delegation framework -""" - -import hashlib -import json -import time -from dataclasses import dataclass, field -from enum import Enum, auto -from typing import Dict, List, Optional, Any, Callable -from decimal import Decimal - -from .core_types import WalletAddress, TokenAmount - - -# ============================================================================= -# Proposal Status & Types -# ============================================================================= - -class ProposalStatus(Enum): - """Proposal lifecycle status""" - DRAFT = auto() - SUBMITTED = auto() - SOPHIA_REVIEW = auto() - VOTING = auto() - PASSED = auto() - REJECTED = auto() - VETOED = auto() - EXECUTED = auto() - EXPIRED = auto() - - -class ProposalType(Enum): - """Types of proposals""" - PARAMETER_CHANGE = auto() - MONETARY_POLICY = auto() - PROTOCOL_UPGRADE = auto() - VALIDATOR_CHANGE = auto() - SMART_CONTRACT = auto() - COMMUNITY = auto() - - -class SophiaDecision(Enum): - """Sophia AI evaluation decisions""" - PENDING = auto() - ENDORSE = auto() # Boosts support probability - VETO = auto() # Locks proposal - ANALYZE = auto() # Neutral, logs public rationale - - -# ============================================================================= -# Governance Constants -# ============================================================================= - -VOTING_PERIOD_DAYS: int = 7 -QUORUM_PERCENTAGE: float = 0.33 # 33% participation minimum -EXECUTION_DELAY_BLOCKS: int = 3 -REPUTATION_DECAY_WEEKLY: float = 0.05 # 5% weekly decay - - -# ============================================================================= -# Proposal Data Classes -# ============================================================================= - -@dataclass -class Vote: - """A single vote on a proposal""" - voter: WalletAddress - support: bool - weight: Decimal - timestamp: int - delegation_from: Optional[WalletAddress] = None - - -@dataclass -class SophiaEvaluation: - """Sophia AI's evaluation of a proposal""" - decision: SophiaDecision - rationale: str - feasibility_score: float - risk_level: str # "low", "medium", "high" - aligned_precedent: List[str] - timestamp: int - - -@dataclass -class Proposal: - """A governance proposal""" - id: str - title: str - description: str - proposal_type: ProposalType - proposer: WalletAddress - created_at: int - status: ProposalStatus = ProposalStatus.DRAFT - - # Contract binding (RIP-0005) - contract_hash: Optional[str] = None - requires_multi_sig: bool = False - timelock_blocks: int = EXECUTION_DELAY_BLOCKS - auto_expire: bool = True - - # Voting data - votes: List[Vote] = field(default_factory=list) - voting_starts_at: Optional[int] = None - voting_ends_at: Optional[int] = None - - # Sophia evaluation (RIP-0002) - sophia_evaluation: Optional[SophiaEvaluation] = None - - # Execution - executed_at: Optional[int] = None - execution_tx_hash: Optional[str] = None - - @property - def yes_votes(self) -> Decimal: - return sum(v.weight for v in self.votes if v.support) - - @property - def no_votes(self) -> Decimal: - return sum(v.weight for v in self.votes if not v.support) - - @property - def total_votes(self) -> Decimal: - return sum(v.weight for v in self.votes) - - @property - def approval_percentage(self) -> float: - total = self.total_votes - if total == 0: - return 0.0 - return float(self.yes_votes / total) - - def to_dict(self) -> Dict[str, Any]: - return { - "id": self.id, - "title": self.title, - "description": self.description, - "type": self.proposal_type.name, - "proposer": self.proposer.address, - "status": self.status.name, - "created_at": self.created_at, - "contract_hash": self.contract_hash, - "yes_votes": str(self.yes_votes), - "no_votes": str(self.no_votes), - "total_votes": str(self.total_votes), - "approval_percentage": self.approval_percentage, - "sophia_decision": ( - self.sophia_evaluation.decision.name - if self.sophia_evaluation else "PENDING" - ), - } - - -# ============================================================================= -# Reputation System (RIP-0006) -# ============================================================================= - -@dataclass -class NodeReputation: - """Reputation score for a node/wallet""" - wallet: WalletAddress - score: float = 50.0 # Start neutral - participation_count: int = 0 - correct_predictions: int = 0 - uptime_contribution: float = 0.0 - sophia_alignment: float = 0.0 # Correlation with Sophia decisions - last_activity: int = 0 - - def decay(self, weeks_inactive: int): - """Apply decay for inactivity""" - decay_factor = (1 - REPUTATION_DECAY_WEEKLY) ** weeks_inactive - self.score *= decay_factor - - def update_alignment(self, voted_with_sophia: bool): - """Update Sophia alignment score""" - weight = 0.1 - if voted_with_sophia: - self.sophia_alignment = min(1.0, self.sophia_alignment + weight) - else: - self.sophia_alignment = max(0.0, self.sophia_alignment - weight) - - -@dataclass -class Delegation: - """Voting power delegation""" - from_wallet: WalletAddress - to_wallet: WalletAddress - weight: Decimal # Percentage of voting power delegated - created_at: int - expires_at: Optional[int] = None - - def is_active(self, current_time: int) -> bool: - if self.expires_at and current_time > self.expires_at: - return False - return True - - -# ============================================================================= -# Governance Engine -# ============================================================================= - -class GovernanceEngine: - """ - Main governance engine implementing RIP-0002, RIP-0005, RIP-0006. - - Lifecycle: - 1. Proposal created via create_proposal() - 2. Sophia evaluates via sophia_evaluate() - 3. If not vetoed, voting begins - 4. After voting period, proposal passes/fails - 5. Passed proposals execute after delay - """ - - def __init__(self, total_supply: int): - self.proposals: Dict[str, Proposal] = {} - self.reputations: Dict[str, NodeReputation] = {} - self.delegations: Dict[str, List[Delegation]] = {} - self.total_supply = total_supply - self.proposal_counter = 0 - - def create_proposal( - self, - title: str, - description: str, - proposal_type: ProposalType, - proposer: WalletAddress, - contract_hash: Optional[str] = None, - ) -> Proposal: - """ - Create a new governance proposal. - - Args: - title: Proposal title - description: Detailed description - proposal_type: Type of proposal - proposer: Wallet creating the proposal - contract_hash: Optional smart contract reference - - Returns: - Created proposal - """ - self.proposal_counter += 1 - proposal_id = f"RCP-{self.proposal_counter:04d}" - - proposal = Proposal( - id=proposal_id, - title=title, - description=description, - proposal_type=proposal_type, - proposer=proposer, - created_at=int(time.time()), - contract_hash=contract_hash, - status=ProposalStatus.SUBMITTED, - ) - - self.proposals[proposal_id] = proposal - - # Update proposer reputation - self._update_reputation(proposer, activity_type="propose") - - return proposal - - def sophia_evaluate( - self, - proposal_id: str, - decision: SophiaDecision, - rationale: str, - feasibility_score: float = 0.5, - risk_level: str = "medium", - ) -> SophiaEvaluation: - """ - Record Sophia AI's evaluation of a proposal (RIP-0002). - - Args: - proposal_id: Proposal to evaluate - decision: ENDORSE, VETO, or ANALYZE - rationale: Public explanation - feasibility_score: 0.0-1.0 - risk_level: "low", "medium", "high" - - Returns: - SophiaEvaluation object - """ - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - evaluation = SophiaEvaluation( - decision=decision, - rationale=rationale, - feasibility_score=feasibility_score, - risk_level=risk_level, - aligned_precedent=[], - timestamp=int(time.time()), - ) - - proposal.sophia_evaluation = evaluation - - if decision == SophiaDecision.VETO: - proposal.status = ProposalStatus.VETOED - print(f"🚫 Sophia VETOED proposal {proposal_id}: {rationale}") - elif decision == SophiaDecision.ENDORSE: - proposal.status = ProposalStatus.VOTING - proposal.voting_starts_at = int(time.time()) - proposal.voting_ends_at = proposal.voting_starts_at + ( - VOTING_PERIOD_DAYS * 86400 - ) - print(f"✅ Sophia ENDORSED proposal {proposal_id}") - else: # ANALYZE - proposal.status = ProposalStatus.VOTING - proposal.voting_starts_at = int(time.time()) - proposal.voting_ends_at = proposal.voting_starts_at + ( - VOTING_PERIOD_DAYS * 86400 - ) - print(f"📊 Sophia ANALYZED proposal {proposal_id}: {rationale}") - - return evaluation - - def vote( - self, - proposal_id: str, - voter: WalletAddress, - support: bool, - token_balance: Decimal, - ) -> Vote: - """ - Cast a vote on a proposal. - - Args: - proposal_id: Proposal to vote on - voter: Voting wallet - support: True for yes, False for no - token_balance: Voter's token balance (for weighting) - - Returns: - Vote object - """ - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.VOTING: - raise ValueError(f"Proposal not in voting phase: {proposal.status}") - - current_time = int(time.time()) - if proposal.voting_ends_at and current_time > proposal.voting_ends_at: - raise ValueError("Voting period has ended") - - # Check for existing vote - existing = [v for v in proposal.votes if v.voter == voter] - if existing: - raise ValueError("Already voted on this proposal") - - # Calculate voting weight (token + reputation weighted) - reputation = self.reputations.get(voter.address) - rep_bonus = (reputation.score / 100.0) if reputation else 0.5 - weight = token_balance * Decimal(str(1 + rep_bonus * 0.2)) - - # Include delegated votes - delegated_weight = self._get_delegated_weight(voter, current_time) - total_weight = weight + delegated_weight - - vote = Vote( - voter=voter, - support=support, - weight=total_weight, - timestamp=current_time, - ) - - proposal.votes.append(vote) - - # Update reputation - self._update_reputation(voter, activity_type="vote") - - return vote - - def finalize_proposal(self, proposal_id: str) -> ProposalStatus: - """ - Finalize a proposal after voting period ends. - - Args: - proposal_id: Proposal to finalize - - Returns: - Final status (PASSED, REJECTED, or current status) - """ - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.VOTING: - return proposal.status - - current_time = int(time.time()) - if proposal.voting_ends_at and current_time < proposal.voting_ends_at: - return proposal.status # Still voting - - # Check quorum - participation = float(proposal.total_votes) / self.total_supply - if participation < QUORUM_PERCENTAGE: - proposal.status = ProposalStatus.REJECTED - print(f"❌ Proposal {proposal_id} rejected: quorum not met " - f"({participation:.1%} < {QUORUM_PERCENTAGE:.0%})") - return proposal.status - - # Check approval - if proposal.approval_percentage > 0.5: - proposal.status = ProposalStatus.PASSED - print(f"✅ Proposal {proposal_id} PASSED with " - f"{proposal.approval_percentage:.1%} approval") - - # Update reputation based on Sophia alignment - self._update_sophia_alignment(proposal) - else: - proposal.status = ProposalStatus.REJECTED - print(f"❌ Proposal {proposal_id} rejected: " - f"{proposal.approval_percentage:.1%} approval") - - return proposal.status - - def execute_proposal(self, proposal_id: str) -> bool: - """ - Execute a passed proposal (RIP-0005). - - Args: - proposal_id: Proposal to execute - - Returns: - True if executed, False otherwise - """ - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.PASSED: - raise ValueError(f"Cannot execute: status is {proposal.status}") - - # Vetoed proposals cannot execute - if (proposal.sophia_evaluation and - proposal.sophia_evaluation.decision == SophiaDecision.VETO): - raise ValueError("Vetoed proposals cannot be executed") - - # Execute contract if specified - if proposal.contract_hash: - # Verify contract alignment before execution - print(f"🔗 Executing contract {proposal.contract_hash}") - - proposal.status = ProposalStatus.EXECUTED - proposal.executed_at = int(time.time()) - proposal.execution_tx_hash = hashlib.sha256( - f"{proposal_id}:{proposal.executed_at}".encode() - ).hexdigest() - - print(f"⚡ Proposal {proposal_id} executed at block height [N]") - return True - - def delegate_voting_power( - self, - from_wallet: WalletAddress, - to_wallet: WalletAddress, - weight: Decimal, - duration_days: Optional[int] = None, - ) -> Delegation: - """ - Delegate voting power to another wallet (RIP-0006). - - Args: - from_wallet: Delegating wallet - to_wallet: Receiving wallet - weight: Percentage of voting power (0-1) - duration_days: Optional delegation duration - - Returns: - Delegation object - """ - if weight < 0 or weight > 1: - raise ValueError("Delegation weight must be between 0 and 1") - - current_time = int(time.time()) - expires_at = None - if duration_days: - expires_at = current_time + (duration_days * 86400) - - delegation = Delegation( - from_wallet=from_wallet, - to_wallet=to_wallet, - weight=weight, - created_at=current_time, - expires_at=expires_at, - ) - - key = to_wallet.address - if key not in self.delegations: - self.delegations[key] = [] - self.delegations[key].append(delegation) - - return delegation - - def _get_delegated_weight( - self, wallet: WalletAddress, current_time: int - ) -> Decimal: - """Get total delegated voting weight for a wallet""" - delegations = self.delegations.get(wallet.address, []) - total = Decimal("0") - for d in delegations: - if d.is_active(current_time): - total += d.weight - return total - - def _update_reputation(self, wallet: WalletAddress, activity_type: str): - """Update wallet reputation based on activity""" - key = wallet.address - if key not in self.reputations: - self.reputations[key] = NodeReputation( - wallet=wallet, - last_activity=int(time.time()), - ) - - rep = self.reputations[key] - rep.participation_count += 1 - rep.last_activity = int(time.time()) - - # Small reputation boost for participation - if activity_type == "vote": - rep.score = min(100, rep.score + 0.5) - elif activity_type == "propose": - rep.score = min(100, rep.score + 1.0) - - def _update_sophia_alignment(self, proposal: Proposal): - """Update voter reputations based on Sophia alignment""" - if not proposal.sophia_evaluation: - return - - sophia_decision = proposal.sophia_evaluation.decision - if sophia_decision == SophiaDecision.ANALYZE: - return # Neutral, no alignment update - - # Sophia endorsed = yes is aligned, Sophia vetoed = no is aligned - sophia_supported = sophia_decision == SophiaDecision.ENDORSE - - for vote in proposal.votes: - voted_with_sophia = vote.support == sophia_supported - rep = self.reputations.get(vote.voter.address) - if rep: - rep.update_alignment(voted_with_sophia) - - def get_proposal(self, proposal_id: str) -> Optional[Proposal]: - """Get a proposal by ID""" - return self.proposals.get(proposal_id) - - def get_active_proposals(self) -> List[Proposal]: - """Get all proposals currently in voting""" - return [ - p for p in self.proposals.values() - if p.status == ProposalStatus.VOTING - ] - - def get_all_proposals(self) -> List[Proposal]: - """Get all proposals""" - return list(self.proposals.values()) +""" +RustChain Governance (RIP-0002, RIP-0005, RIP-0006) +=================================================== + +Hybrid human + Sophia AI governance system. + +Features: +- Proposal creation and voting +- Sophia AI evaluation (Endorse/Veto/Analyze) +- Token-weighted and reputation-weighted voting +- Smart contract binding layer +- Delegation framework +""" + +import hashlib +import json +import time +from dataclasses import dataclass, field +from enum import Enum, auto +from typing import Dict, List, Optional, Any, Callable +from decimal import Decimal + +from .core_types import WalletAddress, TokenAmount + + +# ============================================================================= +# Proposal Status & Types +# ============================================================================= + +class ProposalStatus(Enum): + """Proposal lifecycle status""" + DRAFT = auto() + SUBMITTED = auto() + SOPHIA_REVIEW = auto() + VOTING = auto() + PASSED = auto() + REJECTED = auto() + VETOED = auto() + EXECUTED = auto() + EXPIRED = auto() + + +class ProposalType(Enum): + """Types of proposals""" + PARAMETER_CHANGE = auto() + MONETARY_POLICY = auto() + PROTOCOL_UPGRADE = auto() + VALIDATOR_CHANGE = auto() + SMART_CONTRACT = auto() + COMMUNITY = auto() + + +class SophiaDecision(Enum): + """Sophia AI evaluation decisions""" + PENDING = auto() + ENDORSE = auto() # Boosts support probability + VETO = auto() # Locks proposal + ANALYZE = auto() # Neutral, logs public rationale + + +# ============================================================================= +# Governance Constants +# ============================================================================= + +VOTING_PERIOD_DAYS: int = 7 +QUORUM_PERCENTAGE: float = 0.33 # 33% participation minimum +EXECUTION_DELAY_BLOCKS: int = 3 +REPUTATION_DECAY_WEEKLY: float = 0.05 # 5% weekly decay + + +# ============================================================================= +# Proposal Data Classes +# ============================================================================= + +@dataclass +class Vote: + """A single vote on a proposal""" + voter: WalletAddress + support: bool + weight: Decimal + timestamp: int + delegation_from: Optional[WalletAddress] = None + + +@dataclass +class SophiaEvaluation: + """Sophia AI's evaluation of a proposal""" + decision: SophiaDecision + rationale: str + feasibility_score: float + risk_level: str # "low", "medium", "high" + aligned_precedent: List[str] + timestamp: int + + +@dataclass +class Proposal: + """A governance proposal""" + id: str + title: str + description: str + proposal_type: ProposalType + proposer: WalletAddress + created_at: int + status: ProposalStatus = ProposalStatus.DRAFT + + # Contract binding (RIP-0005) + contract_hash: Optional[str] = None + requires_multi_sig: bool = False + timelock_blocks: int = EXECUTION_DELAY_BLOCKS + auto_expire: bool = True + + # Voting data + votes: List[Vote] = field(default_factory=list) + voting_starts_at: Optional[int] = None + voting_ends_at: Optional[int] = None + + # Sophia evaluation (RIP-0002) + sophia_evaluation: Optional[SophiaEvaluation] = None + + # Execution + executed_at: Optional[int] = None + execution_tx_hash: Optional[str] = None + + @property + def yes_votes(self) -> Decimal: + return sum(v.weight for v in self.votes if v.support) + + @property + def no_votes(self) -> Decimal: + return sum(v.weight for v in self.votes if not v.support) + + @property + def total_votes(self) -> Decimal: + return sum(v.weight for v in self.votes) + + @property + def approval_percentage(self) -> float: + total = self.total_votes + if total == 0: + return 0.0 + return float(self.yes_votes / total) + + def to_dict(self) -> Dict[str, Any]: + return { + "id": self.id, + "title": self.title, + "description": self.description, + "type": self.proposal_type.name, + "proposer": self.proposer.address, + "status": self.status.name, + "created_at": self.created_at, + "contract_hash": self.contract_hash, + "yes_votes": str(self.yes_votes), + "no_votes": str(self.no_votes), + "total_votes": str(self.total_votes), + "approval_percentage": self.approval_percentage, + "sophia_decision": ( + self.sophia_evaluation.decision.name + if self.sophia_evaluation else "PENDING" + ), + } + + +# ============================================================================= +# Reputation System (RIP-0006) +# ============================================================================= + +@dataclass +class NodeReputation: + """Reputation score for a node/wallet""" + wallet: WalletAddress + score: float = 50.0 # Start neutral + participation_count: int = 0 + correct_predictions: int = 0 + uptime_contribution: float = 0.0 + sophia_alignment: float = 0.0 # Correlation with Sophia decisions + last_activity: int = 0 + + def decay(self, weeks_inactive: int): + """Apply decay for inactivity""" + decay_factor = (1 - REPUTATION_DECAY_WEEKLY) ** weeks_inactive + self.score *= decay_factor + + def update_alignment(self, voted_with_sophia: bool): + """Update Sophia alignment score""" + weight = 0.1 + if voted_with_sophia: + self.sophia_alignment = min(1.0, self.sophia_alignment + weight) + else: + self.sophia_alignment = max(0.0, self.sophia_alignment - weight) + + +@dataclass +class Delegation: + """Voting power delegation""" + from_wallet: WalletAddress + to_wallet: WalletAddress + weight: Decimal # Percentage of voting power delegated + created_at: int + expires_at: Optional[int] = None + + def is_active(self, current_time: int) -> bool: + if self.expires_at and current_time > self.expires_at: + return False + return True + + +# ============================================================================= +# Governance Engine +# ============================================================================= + +class GovernanceEngine: + """ + Main governance engine implementing RIP-0002, RIP-0005, RIP-0006. + + Lifecycle: + 1. Proposal created via create_proposal() + 2. Sophia evaluates via sophia_evaluate() + 3. If not vetoed, voting begins + 4. After voting period, proposal passes/fails + 5. Passed proposals execute after delay + """ + + def __init__(self, total_supply: int): + self.proposals: Dict[str, Proposal] = {} + self.reputations: Dict[str, NodeReputation] = {} + self.delegations: Dict[str, List[Delegation]] = {} + self.total_supply = total_supply + self.proposal_counter = 0 + + def create_proposal( + self, + title: str, + description: str, + proposal_type: ProposalType, + proposer: WalletAddress, + contract_hash: Optional[str] = None, + ) -> Proposal: + """ + Create a new governance proposal. + + Args: + title: Proposal title + description: Detailed description + proposal_type: Type of proposal + proposer: Wallet creating the proposal + contract_hash: Optional smart contract reference + + Returns: + Created proposal + """ + self.proposal_counter += 1 + proposal_id = f"RCP-{self.proposal_counter:04d}" + + proposal = Proposal( + id=proposal_id, + title=title, + description=description, + proposal_type=proposal_type, + proposer=proposer, + created_at=int(time.time()), + contract_hash=contract_hash, + status=ProposalStatus.SUBMITTED, + ) + + self.proposals[proposal_id] = proposal + + # Update proposer reputation + self._update_reputation(proposer, activity_type="propose") + + return proposal + + def sophia_evaluate( + self, + proposal_id: str, + decision: SophiaDecision, + rationale: str, + feasibility_score: float = 0.5, + risk_level: str = "medium", + ) -> SophiaEvaluation: + """ + Record Sophia AI's evaluation of a proposal (RIP-0002). + + Args: + proposal_id: Proposal to evaluate + decision: ENDORSE, VETO, or ANALYZE + rationale: Public explanation + feasibility_score: 0.0-1.0 + risk_level: "low", "medium", "high" + + Returns: + SophiaEvaluation object + """ + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + evaluation = SophiaEvaluation( + decision=decision, + rationale=rationale, + feasibility_score=feasibility_score, + risk_level=risk_level, + aligned_precedent=[], + timestamp=int(time.time()), + ) + + proposal.sophia_evaluation = evaluation + + if decision == SophiaDecision.VETO: + proposal.status = ProposalStatus.VETOED + print(f"🚫 Sophia VETOED proposal {proposal_id}: {rationale}") + elif decision == SophiaDecision.ENDORSE: + proposal.status = ProposalStatus.VOTING + proposal.voting_starts_at = int(time.time()) + proposal.voting_ends_at = proposal.voting_starts_at + ( + VOTING_PERIOD_DAYS * 86400 + ) + print(f"✅ Sophia ENDORSED proposal {proposal_id}") + else: # ANALYZE + proposal.status = ProposalStatus.VOTING + proposal.voting_starts_at = int(time.time()) + proposal.voting_ends_at = proposal.voting_starts_at + ( + VOTING_PERIOD_DAYS * 86400 + ) + print(f"📊 Sophia ANALYZED proposal {proposal_id}: {rationale}") + + return evaluation + + def vote( + self, + proposal_id: str, + voter: WalletAddress, + support: bool, + token_balance: Decimal, + ) -> Vote: + """ + Cast a vote on a proposal. + + Args: + proposal_id: Proposal to vote on + voter: Voting wallet + support: True for yes, False for no + token_balance: Voter's token balance (for weighting) + + Returns: + Vote object + """ + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.VOTING: + raise ValueError(f"Proposal not in voting phase: {proposal.status}") + + current_time = int(time.time()) + if proposal.voting_ends_at and current_time > proposal.voting_ends_at: + raise ValueError("Voting period has ended") + + # Check for existing vote + existing = [v for v in proposal.votes if v.voter == voter] + if existing: + raise ValueError("Already voted on this proposal") + + # Calculate voting weight (token + reputation weighted) + reputation = self.reputations.get(voter.address) + rep_bonus = (reputation.score / 100.0) if reputation else 0.5 + weight = token_balance * Decimal(str(1 + rep_bonus * 0.2)) + + # Include delegated votes + delegated_weight = self._get_delegated_weight(voter, current_time) + total_weight = weight + delegated_weight + + vote = Vote( + voter=voter, + support=support, + weight=total_weight, + timestamp=current_time, + ) + + proposal.votes.append(vote) + + # Update reputation + self._update_reputation(voter, activity_type="vote") + + return vote + + def finalize_proposal(self, proposal_id: str) -> ProposalStatus: + """ + Finalize a proposal after voting period ends. + + Args: + proposal_id: Proposal to finalize + + Returns: + Final status (PASSED, REJECTED, or current status) + """ + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.VOTING: + return proposal.status + + current_time = int(time.time()) + if proposal.voting_ends_at and current_time < proposal.voting_ends_at: + return proposal.status # Still voting + + # Check quorum + participation = float(proposal.total_votes) / self.total_supply + if participation < QUORUM_PERCENTAGE: + proposal.status = ProposalStatus.REJECTED + print(f"❌ Proposal {proposal_id} rejected: quorum not met " + f"({participation:.1%} < {QUORUM_PERCENTAGE:.0%})") + return proposal.status + + # Check approval + if proposal.approval_percentage > 0.5: + proposal.status = ProposalStatus.PASSED + print(f"✅ Proposal {proposal_id} PASSED with " + f"{proposal.approval_percentage:.1%} approval") + + # Update reputation based on Sophia alignment + self._update_sophia_alignment(proposal) + else: + proposal.status = ProposalStatus.REJECTED + print(f"❌ Proposal {proposal_id} rejected: " + f"{proposal.approval_percentage:.1%} approval") + + return proposal.status + + def execute_proposal(self, proposal_id: str) -> bool: + """ + Execute a passed proposal (RIP-0005). + + Args: + proposal_id: Proposal to execute + + Returns: + True if executed, False otherwise + """ + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.PASSED: + raise ValueError(f"Cannot execute: status is {proposal.status}") + + # Vetoed proposals cannot execute + if (proposal.sophia_evaluation and + proposal.sophia_evaluation.decision == SophiaDecision.VETO): + raise ValueError("Vetoed proposals cannot be executed") + + # Execute contract if specified + if proposal.contract_hash: + # Verify contract alignment before execution + print(f"🔗 Executing contract {proposal.contract_hash}") + + proposal.status = ProposalStatus.EXECUTED + proposal.executed_at = int(time.time()) + proposal.execution_tx_hash = hashlib.sha256( + f"{proposal_id}:{proposal.executed_at}".encode() + ).hexdigest() + + print(f"⚡ Proposal {proposal_id} executed at block height [N]") + return True + + def delegate_voting_power( + self, + from_wallet: WalletAddress, + to_wallet: WalletAddress, + weight: Decimal, + duration_days: Optional[int] = None, + ) -> Delegation: + """ + Delegate voting power to another wallet (RIP-0006). + + Args: + from_wallet: Delegating wallet + to_wallet: Receiving wallet + weight: Percentage of voting power (0-1) + duration_days: Optional delegation duration + + Returns: + Delegation object + """ + if weight < 0 or weight > 1: + raise ValueError("Delegation weight must be between 0 and 1") + + current_time = int(time.time()) + expires_at = None + if duration_days: + expires_at = current_time + (duration_days * 86400) + + delegation = Delegation( + from_wallet=from_wallet, + to_wallet=to_wallet, + weight=weight, + created_at=current_time, + expires_at=expires_at, + ) + + key = to_wallet.address + if key not in self.delegations: + self.delegations[key] = [] + self.delegations[key].append(delegation) + + return delegation + + def _get_delegated_weight( + self, wallet: WalletAddress, current_time: int + ) -> Decimal: + """Get total delegated voting weight for a wallet""" + delegations = self.delegations.get(wallet.address, []) + total = Decimal("0") + for d in delegations: + if d.is_active(current_time): + total += d.weight + return total + + def _update_reputation(self, wallet: WalletAddress, activity_type: str): + """Update wallet reputation based on activity""" + key = wallet.address + if key not in self.reputations: + self.reputations[key] = NodeReputation( + wallet=wallet, + last_activity=int(time.time()), + ) + + rep = self.reputations[key] + rep.participation_count += 1 + rep.last_activity = int(time.time()) + + # Small reputation boost for participation + if activity_type == "vote": + rep.score = min(100, rep.score + 0.5) + elif activity_type == "propose": + rep.score = min(100, rep.score + 1.0) + + def _update_sophia_alignment(self, proposal: Proposal): + """Update voter reputations based on Sophia alignment""" + if not proposal.sophia_evaluation: + return + + sophia_decision = proposal.sophia_evaluation.decision + if sophia_decision == SophiaDecision.ANALYZE: + return # Neutral, no alignment update + + # Sophia endorsed = yes is aligned, Sophia vetoed = no is aligned + sophia_supported = sophia_decision == SophiaDecision.ENDORSE + + for vote in proposal.votes: + voted_with_sophia = vote.support == sophia_supported + rep = self.reputations.get(vote.voter.address) + if rep: + rep.update_alignment(voted_with_sophia) + + def get_proposal(self, proposal_id: str) -> Optional[Proposal]: + """Get a proposal by ID""" + return self.proposals.get(proposal_id) + + def get_active_proposals(self) -> List[Proposal]: + """Get all proposals currently in voting""" + return [ + p for p in self.proposals.values() + if p.status == ProposalStatus.VOTING + ] + + def get_all_proposals(self) -> List[Proposal]: + """Get all proposals""" + return list(self.proposals.values()) diff --git a/rips/python/rustchain/node.py b/rips/python/rustchain/node.py index e288d691..9f8e36fa 100644 --- a/rips/python/rustchain/node.py +++ b/rips/python/rustchain/node.py @@ -1,463 +1,463 @@ -""" -RustChain Node Implementation -============================= - -Full node implementation combining all RIPs. - -APIs: -- GET /api/stats - Blockchain statistics -- GET /api/node/antiquity - Node AS and eligibility -- POST /api/node/claim - Submit block claim with PoA metadata -- POST /api/mine - Submit mining proof -- POST /api/governance/create - Create proposal -- POST /api/governance/vote - Cast vote -- GET /api/governance/proposals - List proposals -""" - -import hashlib -import json -import time -import sqlite3 -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Any -from decimal import Decimal -from threading import Lock, Thread -from pathlib import Path - -from .core_types import ( - Block, - BlockMiner, - Transaction, - TransactionType, - WalletAddress, - HardwareInfo, - TokenAmount, - TOTAL_SUPPLY, - BLOCK_TIME_SECONDS, - CHAIN_ID, - PREMINE_AMOUNT, - FOUNDER_WALLETS, -) -from .proof_of_antiquity import ( - ProofOfAntiquity, - calculate_antiquity_score, - AS_MAX, - AS_MIN, -) -from .deep_entropy import DeepEntropyVerifier, EntropyProof -from .governance import GovernanceEngine, ProposalType, SophiaDecision - - -# ============================================================================= -# Node Configuration -# ============================================================================= - -@dataclass -class NodeConfig: - """Node configuration""" - data_dir: str = "./rustchain_data" - api_host: str = "0.0.0.0" - api_port: int = 8085 - mtls_port: int = 4443 - enable_mining: bool = True - enable_governance: bool = True - - -# ============================================================================= -# RustChain Node -# ============================================================================= - -class RustChainNode: - """ - Full RustChain node implementing Proof of Antiquity. - - This node: - - Validates hardware via deep entropy - - Calculates Antiquity Scores - - Processes blocks via weighted lottery - - Manages governance proposals - - Tracks wallets and balances - """ - - def __init__(self, config: Optional[NodeConfig] = None): - self.config = config or NodeConfig() - self.lock = Lock() - - # Initialize components - self.poa = ProofOfAntiquity() - self.entropy_verifier = DeepEntropyVerifier() - self.governance = GovernanceEngine(TOTAL_SUPPLY) - - # Blockchain state - self.blocks: List[Block] = [] - self.wallets: Dict[str, TokenAmount] = {} - self.pending_transactions: List[Transaction] = [] - - # Network state - self.total_minted = TokenAmount.from_rtc(float(PREMINE_AMOUNT)) - self.mining_pool = TokenAmount.from_rtc( - float(TOTAL_SUPPLY - PREMINE_AMOUNT) - ) - - # Initialize genesis - self._initialize_genesis() - - # Background block processor - self.running = False - - def _initialize_genesis(self): - """Initialize genesis block and founder wallets""" - # Create genesis block - genesis = Block( - height=0, - timestamp=int(time.time()), - previous_hash="0" * 64, - miners=[], - total_reward=TokenAmount(0), - ) - genesis.hash = "019c177b44a41f78da23caa99314adbc44889be2dcdd5021930f9d991e7e34cf" - self.blocks.append(genesis) - - # Initialize founder wallets (RIP-0004: 4 x 125,829.12 RTC) - founder_amount = TokenAmount.from_rtc(125829.12) - for wallet_addr in FOUNDER_WALLETS: - self.wallets[wallet_addr] = founder_amount - - print(f"🔥 RustChain Genesis initialized") - print(f" Chain ID: {CHAIN_ID}") - print(f" Total Supply: {TOTAL_SUPPLY:,} RTC") - print(f" Mining Pool: {self.mining_pool.to_rtc():,.2f} RTC") - print(f" Founder Wallets: {len(FOUNDER_WALLETS)}") - - def start(self): - """Start the node""" - self.running = True - print(f"🚀 RustChain node starting...") - print(f" API: http://{self.config.api_host}:{self.config.api_port}") - print(f" mTLS: port {self.config.mtls_port}") - - # Start block processor thread - self.block_thread = Thread(target=self._block_processor, daemon=True) - self.block_thread.start() - - def stop(self): - """Stop the node""" - self.running = False - print("🛑 RustChain node stopped") - - def _block_processor(self): - """Background block processor""" - while self.running: - time.sleep(10) # Check every 10 seconds - - with self.lock: - status = self.poa.get_status() - if status["time_remaining_seconds"] <= 0: - self._process_block() - - def _process_block(self): - """Process pending proofs and create new block""" - previous_hash = self.blocks[-1].hash if self.blocks else "0" * 64 - block = self.poa.process_block(previous_hash) - - if block: - self.blocks.append(block) - - # Update wallet balances - for miner in block.miners: - wallet_addr = miner.wallet.address - if wallet_addr not in self.wallets: - self.wallets[wallet_addr] = TokenAmount(0) - self.wallets[wallet_addr] += miner.reward - - # Update totals - self.total_minted += block.total_reward - self.mining_pool -= block.total_reward - - print(f"⛏️ Block #{block.height} processed") - - # ========================================================================= - # API Methods - # ========================================================================= - - def get_stats(self) -> Dict[str, Any]: - """GET /api/stats - Get blockchain statistics""" - with self.lock: - return { - "chain_id": CHAIN_ID, - "blocks": len(self.blocks), - "total_minted": float(self.total_minted.to_rtc()), - "mining_pool": float(self.mining_pool.to_rtc()), - "wallets": len(self.wallets), - "pending_proofs": self.poa.get_status()["pending_proofs"], - "current_block_age": self.poa.get_status()["block_age_seconds"], - "next_block_in": self.poa.get_status()["time_remaining_seconds"], - "latest_block": self.blocks[-1].to_dict() if self.blocks else None, - } - - def get_node_antiquity( - self, wallet: WalletAddress, hardware: HardwareInfo - ) -> Dict[str, Any]: - """GET /api/node/antiquity - Get node AS and eligibility""" - as_score = calculate_antiquity_score( - hardware.release_year, - hardware.uptime_days - ) - - eligible = as_score >= AS_MIN - - return { - "wallet": wallet.address, - "hardware": hardware.to_dict(), - "antiquity_score": as_score, - "as_max": AS_MAX, - "eligible": eligible, - "eligibility_reason": ( - "Meets minimum AS threshold" - if eligible - else f"AS {as_score:.2f} below minimum {AS_MIN}" - ), - } - - def submit_mining_proof( - self, - wallet: WalletAddress, - hardware: HardwareInfo, - entropy_proof: Optional[EntropyProof] = None, - ) -> Dict[str, Any]: - """POST /api/mine - Submit mining proof""" - with self.lock: - # Verify entropy if provided - anti_emulation_hash = "0" * 64 - if entropy_proof: - result = self.entropy_verifier.verify( - entropy_proof, - self._detect_hardware_profile(hardware) - ) - if not result.valid: - return { - "success": False, - "error": f"Entropy verification failed: {result.issues}", - "emulation_probability": result.emulation_probability, - } - anti_emulation_hash = entropy_proof.signature_hash - - # Submit to PoA - try: - return self.poa.submit_proof( - wallet=wallet, - hardware=hardware, - anti_emulation_hash=anti_emulation_hash, - ) - except Exception as e: - return {"success": False, "error": str(e)} - - def _detect_hardware_profile(self, hardware: HardwareInfo) -> str: - """Detect hardware profile from HardwareInfo""" - model = hardware.cpu_model.lower() - if "486" in model: - return "486DX2" - elif "pentium ii" in model or "pentium 2" in model: - return "PentiumII" - elif "pentium" in model: - return "Pentium" - elif "g4" in model or "powerpc g4" in model: - return "G4" - elif "g5" in model or "powerpc g5" in model: - return "G5" - elif "alpha" in model: - return "Alpha" - return "Unknown" - - def get_wallet(self, address: str) -> Dict[str, Any]: - """GET /api/wallet/:address - Get wallet details""" - with self.lock: - balance = self.wallets.get(address, TokenAmount(0)) - is_founder = address in FOUNDER_WALLETS - - return { - "address": address, - "balance": float(balance.to_rtc()), - "is_founder": is_founder, - } - - def get_block(self, height: int) -> Optional[Dict[str, Any]]: - """GET /api/block/:height - Get block by height""" - with self.lock: - if 0 <= height < len(self.blocks): - return self.blocks[height].to_dict() - return None - - def create_proposal( - self, - title: str, - description: str, - proposal_type: str, - proposer: WalletAddress, - contract_hash: Optional[str] = None, - ) -> Dict[str, Any]: - """POST /api/governance/create - Create proposal""" - ptype = ProposalType[proposal_type.upper()] - proposal = self.governance.create_proposal( - title=title, - description=description, - proposal_type=ptype, - proposer=proposer, - contract_hash=contract_hash, - ) - return proposal.to_dict() - - def sophia_analyze( - self, - proposal_id: str, - decision: str, - rationale: str, - ) -> Dict[str, Any]: - """POST /api/governance/sophia/analyze - Sophia evaluation""" - sophia_decision = SophiaDecision[decision.upper()] - evaluation = self.governance.sophia_evaluate( - proposal_id=proposal_id, - decision=sophia_decision, - rationale=rationale, - ) - proposal = self.governance.get_proposal(proposal_id) - return proposal.to_dict() if proposal else {} - - def vote_proposal( - self, - proposal_id: str, - voter: WalletAddress, - support: bool, - ) -> Dict[str, Any]: - """POST /api/governance/vote - Cast vote""" - with self.lock: - balance = self.wallets.get(voter.address, TokenAmount(0)) - vote = self.governance.vote( - proposal_id=proposal_id, - voter=voter, - support=support, - token_balance=balance.to_rtc(), - ) - proposal = self.governance.get_proposal(proposal_id) - return { - "success": True, - "vote_weight": str(vote.weight), - "proposal": proposal.to_dict() if proposal else {}, - } - - def get_proposals(self) -> List[Dict[str, Any]]: - """GET /api/governance/proposals - List proposals""" - return [p.to_dict() for p in self.governance.get_all_proposals()] - - -# ============================================================================= -# Flask API Server -# ============================================================================= - -def create_api_server(node: RustChainNode): - """Create Flask API server for the node""" - try: - from flask import Flask, jsonify, request - from flask_cors import CORS - except ImportError: - print("Flask not installed. Run: pip install flask flask-cors") - return None - - app = Flask(__name__) - CORS(app) - - @app.route("/api/stats") - def stats(): - return jsonify(node.get_stats()) - - @app.route("/api/wallet/
") - def wallet(address): - return jsonify(node.get_wallet(address)) - - @app.route("/api/block/") - def block(height): - result = node.get_block(height) - if result: - return jsonify(result) - return jsonify({"error": "Block not found"}), 404 - - @app.route("/api/mine", methods=["POST"]) - def mine(): - data = request.json - wallet = WalletAddress(data["wallet"]) - hardware = HardwareInfo( - cpu_model=data["hardware"], - release_year=data.get("release_year", 2000), - uptime_days=data.get("uptime_days", 0), - ) - result = node.submit_mining_proof(wallet, hardware) - return jsonify(result) - - @app.route("/api/node/antiquity", methods=["POST"]) - def antiquity(): - data = request.json - wallet = WalletAddress(data["wallet"]) - hardware = HardwareInfo( - cpu_model=data["hardware"], - release_year=data.get("release_year", 2000), - uptime_days=data.get("uptime_days", 0), - ) - return jsonify(node.get_node_antiquity(wallet, hardware)) - - @app.route("/api/governance/proposals") - def proposals(): - return jsonify(node.get_proposals()) - - @app.route("/api/governance/create", methods=["POST"]) - def create_proposal(): - data = request.json - result = node.create_proposal( - title=data["title"], - description=data["description"], - proposal_type=data["type"], - proposer=WalletAddress(data["proposer"]), - contract_hash=data.get("contract_hash"), - ) - return jsonify(result) - - @app.route("/api/governance/vote", methods=["POST"]) - def vote(): - data = request.json - result = node.vote_proposal( - proposal_id=data["proposal_id"], - voter=WalletAddress(data["voter"]), - support=data["support"], - ) - return jsonify(result) - - return app - - -# ============================================================================= -# Main Entry Point -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN NODE - PROOF OF ANTIQUITY") - print("=" * 60) - print() - print("Philosophy: Every vintage computer has historical potential") - print() - - # Create and start node - config = NodeConfig() - node = RustChainNode(config) - node.start() - - # Create API server - app = create_api_server(node) - if app: - print() - print("Starting API server...") - app.run( - host=config.api_host, - port=config.api_port, - debug=False, - threaded=True, - ) +""" +RustChain Node Implementation +============================= + +Full node implementation combining all RIPs. + +APIs: +- GET /api/stats - Blockchain statistics +- GET /api/node/antiquity - Node AS and eligibility +- POST /api/node/claim - Submit block claim with PoA metadata +- POST /api/mine - Submit mining proof +- POST /api/governance/create - Create proposal +- POST /api/governance/vote - Cast vote +- GET /api/governance/proposals - List proposals +""" + +import hashlib +import json +import time +import sqlite3 +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Any +from decimal import Decimal +from threading import Lock, Thread +from pathlib import Path + +from .core_types import ( + Block, + BlockMiner, + Transaction, + TransactionType, + WalletAddress, + HardwareInfo, + TokenAmount, + TOTAL_SUPPLY, + BLOCK_TIME_SECONDS, + CHAIN_ID, + PREMINE_AMOUNT, + FOUNDER_WALLETS, +) +from .proof_of_antiquity import ( + ProofOfAntiquity, + calculate_antiquity_score, + AS_MAX, + AS_MIN, +) +from .deep_entropy import DeepEntropyVerifier, EntropyProof +from .governance import GovernanceEngine, ProposalType, SophiaDecision + + +# ============================================================================= +# Node Configuration +# ============================================================================= + +@dataclass +class NodeConfig: + """Node configuration""" + data_dir: str = "./rustchain_data" + api_host: str = "0.0.0.0" + api_port: int = 8085 + mtls_port: int = 4443 + enable_mining: bool = True + enable_governance: bool = True + + +# ============================================================================= +# RustChain Node +# ============================================================================= + +class RustChainNode: + """ + Full RustChain node implementing Proof of Antiquity. + + This node: + - Validates hardware via deep entropy + - Calculates Antiquity Scores + - Processes blocks via weighted lottery + - Manages governance proposals + - Tracks wallets and balances + """ + + def __init__(self, config: Optional[NodeConfig] = None): + self.config = config or NodeConfig() + self.lock = Lock() + + # Initialize components + self.poa = ProofOfAntiquity() + self.entropy_verifier = DeepEntropyVerifier() + self.governance = GovernanceEngine(TOTAL_SUPPLY) + + # Blockchain state + self.blocks: List[Block] = [] + self.wallets: Dict[str, TokenAmount] = {} + self.pending_transactions: List[Transaction] = [] + + # Network state + self.total_minted = TokenAmount.from_rtc(float(PREMINE_AMOUNT)) + self.mining_pool = TokenAmount.from_rtc( + float(TOTAL_SUPPLY - PREMINE_AMOUNT) + ) + + # Initialize genesis + self._initialize_genesis() + + # Background block processor + self.running = False + + def _initialize_genesis(self): + """Initialize genesis block and founder wallets""" + # Create genesis block + genesis = Block( + height=0, + timestamp=int(time.time()), + previous_hash="0" * 64, + miners=[], + total_reward=TokenAmount(0), + ) + genesis.hash = "019c177b44a41f78da23caa99314adbc44889be2dcdd5021930f9d991e7e34cf" + self.blocks.append(genesis) + + # Initialize founder wallets (RIP-0004: 4 x 125,829.12 RTC) + founder_amount = TokenAmount.from_rtc(125829.12) + for wallet_addr in FOUNDER_WALLETS: + self.wallets[wallet_addr] = founder_amount + + print(f"🔥 RustChain Genesis initialized") + print(f" Chain ID: {CHAIN_ID}") + print(f" Total Supply: {TOTAL_SUPPLY:,} RTC") + print(f" Mining Pool: {self.mining_pool.to_rtc():,.2f} RTC") + print(f" Founder Wallets: {len(FOUNDER_WALLETS)}") + + def start(self): + """Start the node""" + self.running = True + print(f"🚀 RustChain node starting...") + print(f" API: http://{self.config.api_host}:{self.config.api_port}") + print(f" mTLS: port {self.config.mtls_port}") + + # Start block processor thread + self.block_thread = Thread(target=self._block_processor, daemon=True) + self.block_thread.start() + + def stop(self): + """Stop the node""" + self.running = False + print("🛑 RustChain node stopped") + + def _block_processor(self): + """Background block processor""" + while self.running: + time.sleep(10) # Check every 10 seconds + + with self.lock: + status = self.poa.get_status() + if status["time_remaining_seconds"] <= 0: + self._process_block() + + def _process_block(self): + """Process pending proofs and create new block""" + previous_hash = self.blocks[-1].hash if self.blocks else "0" * 64 + block = self.poa.process_block(previous_hash) + + if block: + self.blocks.append(block) + + # Update wallet balances + for miner in block.miners: + wallet_addr = miner.wallet.address + if wallet_addr not in self.wallets: + self.wallets[wallet_addr] = TokenAmount(0) + self.wallets[wallet_addr] += miner.reward + + # Update totals + self.total_minted += block.total_reward + self.mining_pool -= block.total_reward + + print(f"⛏️ Block #{block.height} processed") + + # ========================================================================= + # API Methods + # ========================================================================= + + def get_stats(self) -> Dict[str, Any]: + """GET /api/stats - Get blockchain statistics""" + with self.lock: + return { + "chain_id": CHAIN_ID, + "blocks": len(self.blocks), + "total_minted": float(self.total_minted.to_rtc()), + "mining_pool": float(self.mining_pool.to_rtc()), + "wallets": len(self.wallets), + "pending_proofs": self.poa.get_status()["pending_proofs"], + "current_block_age": self.poa.get_status()["block_age_seconds"], + "next_block_in": self.poa.get_status()["time_remaining_seconds"], + "latest_block": self.blocks[-1].to_dict() if self.blocks else None, + } + + def get_node_antiquity( + self, wallet: WalletAddress, hardware: HardwareInfo + ) -> Dict[str, Any]: + """GET /api/node/antiquity - Get node AS and eligibility""" + as_score = calculate_antiquity_score( + hardware.release_year, + hardware.uptime_days + ) + + eligible = as_score >= AS_MIN + + return { + "wallet": wallet.address, + "hardware": hardware.to_dict(), + "antiquity_score": as_score, + "as_max": AS_MAX, + "eligible": eligible, + "eligibility_reason": ( + "Meets minimum AS threshold" + if eligible + else f"AS {as_score:.2f} below minimum {AS_MIN}" + ), + } + + def submit_mining_proof( + self, + wallet: WalletAddress, + hardware: HardwareInfo, + entropy_proof: Optional[EntropyProof] = None, + ) -> Dict[str, Any]: + """POST /api/mine - Submit mining proof""" + with self.lock: + # Verify entropy if provided + anti_emulation_hash = "0" * 64 + if entropy_proof: + result = self.entropy_verifier.verify( + entropy_proof, + self._detect_hardware_profile(hardware) + ) + if not result.valid: + return { + "success": False, + "error": f"Entropy verification failed: {result.issues}", + "emulation_probability": result.emulation_probability, + } + anti_emulation_hash = entropy_proof.signature_hash + + # Submit to PoA + try: + return self.poa.submit_proof( + wallet=wallet, + hardware=hardware, + anti_emulation_hash=anti_emulation_hash, + ) + except Exception as e: + return {"success": False, "error": str(e)} + + def _detect_hardware_profile(self, hardware: HardwareInfo) -> str: + """Detect hardware profile from HardwareInfo""" + model = hardware.cpu_model.lower() + if "486" in model: + return "486DX2" + elif "pentium ii" in model or "pentium 2" in model: + return "PentiumII" + elif "pentium" in model: + return "Pentium" + elif "g4" in model or "powerpc g4" in model: + return "G4" + elif "g5" in model or "powerpc g5" in model: + return "G5" + elif "alpha" in model: + return "Alpha" + return "Unknown" + + def get_wallet(self, address: str) -> Dict[str, Any]: + """GET /api/wallet/:address - Get wallet details""" + with self.lock: + balance = self.wallets.get(address, TokenAmount(0)) + is_founder = address in FOUNDER_WALLETS + + return { + "address": address, + "balance": float(balance.to_rtc()), + "is_founder": is_founder, + } + + def get_block(self, height: int) -> Optional[Dict[str, Any]]: + """GET /api/block/:height - Get block by height""" + with self.lock: + if 0 <= height < len(self.blocks): + return self.blocks[height].to_dict() + return None + + def create_proposal( + self, + title: str, + description: str, + proposal_type: str, + proposer: WalletAddress, + contract_hash: Optional[str] = None, + ) -> Dict[str, Any]: + """POST /api/governance/create - Create proposal""" + ptype = ProposalType[proposal_type.upper()] + proposal = self.governance.create_proposal( + title=title, + description=description, + proposal_type=ptype, + proposer=proposer, + contract_hash=contract_hash, + ) + return proposal.to_dict() + + def sophia_analyze( + self, + proposal_id: str, + decision: str, + rationale: str, + ) -> Dict[str, Any]: + """POST /api/governance/sophia/analyze - Sophia evaluation""" + sophia_decision = SophiaDecision[decision.upper()] + evaluation = self.governance.sophia_evaluate( + proposal_id=proposal_id, + decision=sophia_decision, + rationale=rationale, + ) + proposal = self.governance.get_proposal(proposal_id) + return proposal.to_dict() if proposal else {} + + def vote_proposal( + self, + proposal_id: str, + voter: WalletAddress, + support: bool, + ) -> Dict[str, Any]: + """POST /api/governance/vote - Cast vote""" + with self.lock: + balance = self.wallets.get(voter.address, TokenAmount(0)) + vote = self.governance.vote( + proposal_id=proposal_id, + voter=voter, + support=support, + token_balance=balance.to_rtc(), + ) + proposal = self.governance.get_proposal(proposal_id) + return { + "success": True, + "vote_weight": str(vote.weight), + "proposal": proposal.to_dict() if proposal else {}, + } + + def get_proposals(self) -> List[Dict[str, Any]]: + """GET /api/governance/proposals - List proposals""" + return [p.to_dict() for p in self.governance.get_all_proposals()] + + +# ============================================================================= +# Flask API Server +# ============================================================================= + +def create_api_server(node: RustChainNode): + """Create Flask API server for the node""" + try: + from flask import Flask, jsonify, request + from flask_cors import CORS + except ImportError: + print("Flask not installed. Run: pip install flask flask-cors") + return None + + app = Flask(__name__) + CORS(app) + + @app.route("/api/stats") + def stats(): + return jsonify(node.get_stats()) + + @app.route("/api/wallet/
") + def wallet(address): + return jsonify(node.get_wallet(address)) + + @app.route("/api/block/") + def block(height): + result = node.get_block(height) + if result: + return jsonify(result) + return jsonify({"error": "Block not found"}), 404 + + @app.route("/api/mine", methods=["POST"]) + def mine(): + data = request.json + wallet = WalletAddress(data["wallet"]) + hardware = HardwareInfo( + cpu_model=data["hardware"], + release_year=data.get("release_year", 2000), + uptime_days=data.get("uptime_days", 0), + ) + result = node.submit_mining_proof(wallet, hardware) + return jsonify(result) + + @app.route("/api/node/antiquity", methods=["POST"]) + def antiquity(): + data = request.json + wallet = WalletAddress(data["wallet"]) + hardware = HardwareInfo( + cpu_model=data["hardware"], + release_year=data.get("release_year", 2000), + uptime_days=data.get("uptime_days", 0), + ) + return jsonify(node.get_node_antiquity(wallet, hardware)) + + @app.route("/api/governance/proposals") + def proposals(): + return jsonify(node.get_proposals()) + + @app.route("/api/governance/create", methods=["POST"]) + def create_proposal(): + data = request.json + result = node.create_proposal( + title=data["title"], + description=data["description"], + proposal_type=data["type"], + proposer=WalletAddress(data["proposer"]), + contract_hash=data.get("contract_hash"), + ) + return jsonify(result) + + @app.route("/api/governance/vote", methods=["POST"]) + def vote(): + data = request.json + result = node.vote_proposal( + proposal_id=data["proposal_id"], + voter=WalletAddress(data["voter"]), + support=data["support"], + ) + return jsonify(result) + + return app + + +# ============================================================================= +# Main Entry Point +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN NODE - PROOF OF ANTIQUITY") + print("=" * 60) + print() + print("Philosophy: Every vintage computer has historical potential") + print() + + # Create and start node + config = NodeConfig() + node = RustChainNode(config) + node.start() + + # Create API server + app = create_api_server(node) + if app: + print() + print("Starting API server...") + app.run( + host=config.api_host, + port=config.api_port, + debug=False, + threaded=True, + ) diff --git a/rips/python/rustchain/proof_of_antiquity.py b/rips/python/rustchain/proof_of_antiquity.py index 554c76cb..bd6b83ef 100644 --- a/rips/python/rustchain/proof_of_antiquity.py +++ b/rips/python/rustchain/proof_of_antiquity.py @@ -1,445 +1,445 @@ -""" -RustChain Proof of Antiquity Consensus (RIP-0001) -================================================= - -Proof of Antiquity (PoA) is NOT Proof of Work! - -PoA rewards: -- Hardware age (older = better) -- Node uptime (longer = better) -- Hardware authenticity (verified via deep entropy) - -Formula: AS = (current_year - release_year) * log10(uptime_days + 1) -""" - -import hashlib -import math -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple -from decimal import Decimal - -from .core_types import ( - HardwareInfo, - HardwareTier, - WalletAddress, - Block, - BlockMiner, - TokenAmount, - BLOCK_REWARD, - BLOCK_TIME_SECONDS, - CURRENT_YEAR, -) - - -# ============================================================================= -# Constants -# ============================================================================= - -AS_MAX: float = 100.0 # Maximum Antiquity Score for reward capping -AS_MIN: float = 1.0 # Minimum AS to participate in validation -MAX_MINERS_PER_BLOCK: int = 100 -BLOCK_REWARD_AMOUNT: TokenAmount = TokenAmount.from_rtc(float(BLOCK_REWARD)) - - -# ============================================================================= -# Antiquity Score Calculation -# ============================================================================= - -def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: - """ - Calculate Antiquity Score per RIP-0001 spec. - - Formula: AS = (current_year - release_year) * log10(uptime_days + 1) - - Args: - release_year: Year the hardware was manufactured - uptime_days: Days since node started or last reboot - - Returns: - Antiquity Score (AS) - - Examples: - >>> calculate_antiquity_score(1992, 276) # 486 DX2 - 80.46 # (2025-1992) * log10(277) ≈ 33 * 2.44 - - >>> calculate_antiquity_score(2002, 276) # PowerPC G4 - 56.10 # (2025-2002) * log10(277) ≈ 23 * 2.44 - - >>> calculate_antiquity_score(2023, 30) # Modern CPU - 2.96 # (2025-2023) * log10(31) ≈ 2 * 1.49 - """ - age = max(0, CURRENT_YEAR - release_year) - uptime_factor = math.log10(uptime_days + 1) - return age * uptime_factor - - -def calculate_reward(antiquity_score: float, total_reward: TokenAmount) -> TokenAmount: - """ - Calculate reward based on Antiquity Score per RIP-0001. - - Formula: Reward = R * min(1.0, AS / AS_max) - - Args: - antiquity_score: Node's AS value - total_reward: Total block reward pool - - Returns: - Calculated reward amount - """ - reward_factor = min(1.0, antiquity_score / AS_MAX) - reward_amount = int(total_reward.amount * reward_factor) - return TokenAmount(reward_amount) - - -# ============================================================================= -# Validated Proof -# ============================================================================= - -@dataclass -class ValidatedProof: - """A validated mining proof ready for block inclusion""" - wallet: WalletAddress - hardware: HardwareInfo - antiquity_score: float - anti_emulation_hash: str - validated_at: int - entropy_proof: Optional[bytes] = None - - def to_dict(self): - return { - "wallet": self.wallet.address, - "hardware": self.hardware.to_dict(), - "antiquity_score": self.antiquity_score, - "anti_emulation_hash": self.anti_emulation_hash, - "validated_at": self.validated_at, - } - - -# ============================================================================= -# Proof Errors -# ============================================================================= - -class ProofError(Exception): - """Base class for proof validation errors""" - pass - - -class BlockWindowClosedError(ProofError): - """Block window has closed""" - pass - - -class DuplicateSubmissionError(ProofError): - """Already submitted proof for this block""" - pass - - -class BlockFullError(ProofError): - """Block has reached maximum miners""" - pass - - -class InsufficientAntiquityError(ProofError): - """Antiquity Score below minimum threshold""" - pass - - -class HardwareAlreadyRegisteredError(ProofError): - """Hardware already registered to another wallet""" - pass - - -class EmulationDetectedError(ProofError): - """Emulation detected - hardware is not genuine""" - pass - - -class DriftLockViolationError(ProofError): - """Node behavior has drifted - quarantined per RIP-0003""" - pass - - -# ============================================================================= -# Proof of Antiquity Validator -# ============================================================================= - -class ProofOfAntiquity: - """ - Proof of Antiquity consensus validator. - - This is NOT Proof of Work! We validate: - 1. Hardware authenticity via deep entropy checks - 2. Hardware age via device signature database - 3. Node uptime via continuous validation - 4. No computational puzzles - just verification - - Block selection uses weighted lottery based on Antiquity Score. - """ - - def __init__(self): - self.pending_proofs: List[ValidatedProof] = [] - self.block_start_time: int = int(time.time()) - self.known_hardware: Dict[str, WalletAddress] = {} # hash -> wallet - self.drifted_nodes: set = set() # Quarantined nodes (RIP-0003) - self.current_block_height: int = 0 - - def submit_proof( - self, - wallet: WalletAddress, - hardware: HardwareInfo, - anti_emulation_hash: str, - entropy_proof: Optional[bytes] = None, - ) -> Dict: - """ - Submit a mining proof for the current block. - - Args: - wallet: Miner's wallet address - hardware: Hardware information - anti_emulation_hash: Hash from entropy verification - entropy_proof: Optional detailed entropy proof - - Returns: - Result dict with acceptance status - - Raises: - Various ProofError subclasses on validation failure - """ - current_time = int(time.time()) - elapsed = current_time - self.block_start_time - - # Check if block window is still open - if elapsed >= BLOCK_TIME_SECONDS: - raise BlockWindowClosedError("Block window has closed") - - # Check for drift lock (RIP-0003) - if wallet.address in self.drifted_nodes: - raise DriftLockViolationError( - f"Node {wallet.address} is quarantined due to drift lock" - ) - - # Check for duplicate wallet submission - existing = [p for p in self.pending_proofs if p.wallet == wallet] - if existing: - raise DuplicateSubmissionError( - "Already submitted proof for this block" - ) - - # Check max miners - if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: - raise BlockFullError("Block has reached maximum miners") - - # Calculate Antiquity Score - antiquity_score = calculate_antiquity_score( - hardware.release_year, - hardware.uptime_days - ) - - # Check minimum AS threshold (RIP-0003) - if antiquity_score < AS_MIN: - raise InsufficientAntiquityError( - f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" - ) - - # Check for duplicate hardware - hw_hash = hardware.generate_hardware_hash() - if hw_hash in self.known_hardware: - existing_wallet = self.known_hardware[hw_hash] - if existing_wallet != wallet: - raise HardwareAlreadyRegisteredError( - f"Hardware already registered to {existing_wallet.address}" - ) - - # Create validated proof - validated = ValidatedProof( - wallet=wallet, - hardware=hardware, - antiquity_score=antiquity_score, - anti_emulation_hash=anti_emulation_hash, - validated_at=current_time, - entropy_proof=entropy_proof, - ) - - self.pending_proofs.append(validated) - self.known_hardware[hw_hash] = wallet - - return { - "success": True, - "message": "Proof accepted, waiting for block completion", - "pending_miners": len(self.pending_proofs), - "your_antiquity_score": antiquity_score, - "your_tier": hardware.tier.value, - "block_completes_in": BLOCK_TIME_SECONDS - elapsed, - } - - def process_block(self, previous_hash: str) -> Optional[Block]: - """ - Process all pending proofs and create a new block. - - Uses weighted lottery based on Antiquity Score for validator selection. - - Args: - previous_hash: Hash of previous block - - Returns: - New block if proofs exist, None otherwise - """ - if not self.pending_proofs: - self._reset_block() - return None - - # Calculate total AS for weighted distribution - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - # Calculate rewards for each miner (proportional to AS) - miners = [] - total_distributed = 0 - - for proof in self.pending_proofs: - # Weighted share based on AS - share = proof.antiquity_score / total_as - reward = calculate_reward( - proof.antiquity_score * share * len(self.pending_proofs), - BLOCK_REWARD_AMOUNT - ) - total_distributed += reward.amount - - miners.append(BlockMiner( - wallet=proof.wallet, - hardware=proof.hardware.cpu_model, - antiquity_score=proof.antiquity_score, - reward=reward, - )) - - # Create new block - self.current_block_height += 1 - block = Block( - height=self.current_block_height, - timestamp=int(time.time()), - previous_hash=previous_hash, - miners=miners, - total_reward=TokenAmount(total_distributed), - ) - - print(f"⛏️ Block #{block.height} created! " - f"Reward: {block.total_reward.to_rtc()} RTC " - f"split among {len(miners)} miners") - - # Reset for next block - self._reset_block() - - return block - - def _reset_block(self): - """Reset state for next block""" - self.pending_proofs.clear() - self.block_start_time = int(time.time()) - - def get_status(self) -> Dict: - """Get current block status""" - elapsed = int(time.time()) - self.block_start_time - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - return { - "current_block_height": self.current_block_height, - "pending_proofs": len(self.pending_proofs), - "total_antiquity_score": total_as, - "block_age_seconds": elapsed, - "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), - "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, - } - - def quarantine_node(self, wallet: WalletAddress, reason: str): - """ - Quarantine a node due to drift lock violation (RIP-0003). - - Args: - wallet: Node wallet to quarantine - reason: Reason for quarantine - """ - self.drifted_nodes.add(wallet.address) - print(f"🚫 Node {wallet.address} quarantined: {reason}") - - def release_node(self, wallet: WalletAddress): - """ - Release a node from quarantine after challenge passage (RIP-0003). - - Args: - wallet: Node wallet to release - """ - self.drifted_nodes.discard(wallet.address) - print(f"✅ Node {wallet.address} released from quarantine") - - -# ============================================================================= -# Validator Selection -# ============================================================================= - -def select_block_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: - """ - Select block validator using weighted lottery (RIP-0001). - - Higher Antiquity Score = higher probability of selection. - - Args: - proofs: List of validated proofs - - Returns: - Selected validator's proof, or None if no proofs - """ - if not proofs: - return None - - import random - - total_as = sum(p.antiquity_score for p in proofs) - if total_as == 0: - return random.choice(proofs) - - # Weighted random selection - r = random.uniform(0, total_as) - cumulative = 0 - - for proof in proofs: - cumulative += proof.antiquity_score - if r <= cumulative: - return proof - - return proofs[-1] - - -# ============================================================================= -# Example Usage -# ============================================================================= - -if __name__ == "__main__": - # Demo: Calculate AS for different hardware - examples = [ - ("Intel 486 DX2-66", 1992, 276), - ("PowerPC G4", 2002, 276), - ("Core 2 Duo", 2006, 180), - ("Ryzen 9 7950X", 2022, 30), - ] - - print("=" * 60) - print("RUSTCHAIN PROOF OF ANTIQUITY - ANTIQUITY SCORE CALCULATOR") - print("=" * 60) - print(f"Formula: AS = (2025 - release_year) * log10(uptime_days + 1)") - print("=" * 60) - print() - - for model, year, uptime in examples: - hw = HardwareInfo(cpu_model=model, release_year=year, uptime_days=uptime) - as_score = calculate_antiquity_score(year, uptime) - tier = HardwareTier.from_release_year(year) - - print(f"📟 {model} ({year})") - print(f" Age: {CURRENT_YEAR - year} years") - print(f" Uptime: {uptime} days") - print(f" Tier: {tier.value.upper()} ({tier.multiplier}x)") - print(f" Antiquity Score: {as_score:.2f}") - print() - - print("💡 Remember: This is NOT Proof of Work!") - print(" Older hardware with longer uptime wins, not faster hardware.") +""" +RustChain Proof of Antiquity Consensus (RIP-0001) +================================================= + +Proof of Antiquity (PoA) is NOT Proof of Work! + +PoA rewards: +- Hardware age (older = better) +- Node uptime (longer = better) +- Hardware authenticity (verified via deep entropy) + +Formula: AS = (current_year - release_year) * log10(uptime_days + 1) +""" + +import hashlib +import math +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple +from decimal import Decimal + +from .core_types import ( + HardwareInfo, + HardwareTier, + WalletAddress, + Block, + BlockMiner, + TokenAmount, + BLOCK_REWARD, + BLOCK_TIME_SECONDS, + CURRENT_YEAR, +) + + +# ============================================================================= +# Constants +# ============================================================================= + +AS_MAX: float = 100.0 # Maximum Antiquity Score for reward capping +AS_MIN: float = 1.0 # Minimum AS to participate in validation +MAX_MINERS_PER_BLOCK: int = 100 +BLOCK_REWARD_AMOUNT: TokenAmount = TokenAmount.from_rtc(float(BLOCK_REWARD)) + + +# ============================================================================= +# Antiquity Score Calculation +# ============================================================================= + +def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: + """ + Calculate Antiquity Score per RIP-0001 spec. + + Formula: AS = (current_year - release_year) * log10(uptime_days + 1) + + Args: + release_year: Year the hardware was manufactured + uptime_days: Days since node started or last reboot + + Returns: + Antiquity Score (AS) + + Examples: + >>> calculate_antiquity_score(1992, 276) # 486 DX2 + 80.46 # (2025-1992) * log10(277) ≈ 33 * 2.44 + + >>> calculate_antiquity_score(2002, 276) # PowerPC G4 + 56.10 # (2025-2002) * log10(277) ≈ 23 * 2.44 + + >>> calculate_antiquity_score(2023, 30) # Modern CPU + 2.96 # (2025-2023) * log10(31) ≈ 2 * 1.49 + """ + age = max(0, CURRENT_YEAR - release_year) + uptime_factor = math.log10(uptime_days + 1) + return age * uptime_factor + + +def calculate_reward(antiquity_score: float, total_reward: TokenAmount) -> TokenAmount: + """ + Calculate reward based on Antiquity Score per RIP-0001. + + Formula: Reward = R * min(1.0, AS / AS_max) + + Args: + antiquity_score: Node's AS value + total_reward: Total block reward pool + + Returns: + Calculated reward amount + """ + reward_factor = min(1.0, antiquity_score / AS_MAX) + reward_amount = int(total_reward.amount * reward_factor) + return TokenAmount(reward_amount) + + +# ============================================================================= +# Validated Proof +# ============================================================================= + +@dataclass +class ValidatedProof: + """A validated mining proof ready for block inclusion""" + wallet: WalletAddress + hardware: HardwareInfo + antiquity_score: float + anti_emulation_hash: str + validated_at: int + entropy_proof: Optional[bytes] = None + + def to_dict(self): + return { + "wallet": self.wallet.address, + "hardware": self.hardware.to_dict(), + "antiquity_score": self.antiquity_score, + "anti_emulation_hash": self.anti_emulation_hash, + "validated_at": self.validated_at, + } + + +# ============================================================================= +# Proof Errors +# ============================================================================= + +class ProofError(Exception): + """Base class for proof validation errors""" + pass + + +class BlockWindowClosedError(ProofError): + """Block window has closed""" + pass + + +class DuplicateSubmissionError(ProofError): + """Already submitted proof for this block""" + pass + + +class BlockFullError(ProofError): + """Block has reached maximum miners""" + pass + + +class InsufficientAntiquityError(ProofError): + """Antiquity Score below minimum threshold""" + pass + + +class HardwareAlreadyRegisteredError(ProofError): + """Hardware already registered to another wallet""" + pass + + +class EmulationDetectedError(ProofError): + """Emulation detected - hardware is not genuine""" + pass + + +class DriftLockViolationError(ProofError): + """Node behavior has drifted - quarantined per RIP-0003""" + pass + + +# ============================================================================= +# Proof of Antiquity Validator +# ============================================================================= + +class ProofOfAntiquity: + """ + Proof of Antiquity consensus validator. + + This is NOT Proof of Work! We validate: + 1. Hardware authenticity via deep entropy checks + 2. Hardware age via device signature database + 3. Node uptime via continuous validation + 4. No computational puzzles - just verification + + Block selection uses weighted lottery based on Antiquity Score. + """ + + def __init__(self): + self.pending_proofs: List[ValidatedProof] = [] + self.block_start_time: int = int(time.time()) + self.known_hardware: Dict[str, WalletAddress] = {} # hash -> wallet + self.drifted_nodes: set = set() # Quarantined nodes (RIP-0003) + self.current_block_height: int = 0 + + def submit_proof( + self, + wallet: WalletAddress, + hardware: HardwareInfo, + anti_emulation_hash: str, + entropy_proof: Optional[bytes] = None, + ) -> Dict: + """ + Submit a mining proof for the current block. + + Args: + wallet: Miner's wallet address + hardware: Hardware information + anti_emulation_hash: Hash from entropy verification + entropy_proof: Optional detailed entropy proof + + Returns: + Result dict with acceptance status + + Raises: + Various ProofError subclasses on validation failure + """ + current_time = int(time.time()) + elapsed = current_time - self.block_start_time + + # Check if block window is still open + if elapsed >= BLOCK_TIME_SECONDS: + raise BlockWindowClosedError("Block window has closed") + + # Check for drift lock (RIP-0003) + if wallet.address in self.drifted_nodes: + raise DriftLockViolationError( + f"Node {wallet.address} is quarantined due to drift lock" + ) + + # Check for duplicate wallet submission + existing = [p for p in self.pending_proofs if p.wallet == wallet] + if existing: + raise DuplicateSubmissionError( + "Already submitted proof for this block" + ) + + # Check max miners + if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: + raise BlockFullError("Block has reached maximum miners") + + # Calculate Antiquity Score + antiquity_score = calculate_antiquity_score( + hardware.release_year, + hardware.uptime_days + ) + + # Check minimum AS threshold (RIP-0003) + if antiquity_score < AS_MIN: + raise InsufficientAntiquityError( + f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" + ) + + # Check for duplicate hardware + hw_hash = hardware.generate_hardware_hash() + if hw_hash in self.known_hardware: + existing_wallet = self.known_hardware[hw_hash] + if existing_wallet != wallet: + raise HardwareAlreadyRegisteredError( + f"Hardware already registered to {existing_wallet.address}" + ) + + # Create validated proof + validated = ValidatedProof( + wallet=wallet, + hardware=hardware, + antiquity_score=antiquity_score, + anti_emulation_hash=anti_emulation_hash, + validated_at=current_time, + entropy_proof=entropy_proof, + ) + + self.pending_proofs.append(validated) + self.known_hardware[hw_hash] = wallet + + return { + "success": True, + "message": "Proof accepted, waiting for block completion", + "pending_miners": len(self.pending_proofs), + "your_antiquity_score": antiquity_score, + "your_tier": hardware.tier.value, + "block_completes_in": BLOCK_TIME_SECONDS - elapsed, + } + + def process_block(self, previous_hash: str) -> Optional[Block]: + """ + Process all pending proofs and create a new block. + + Uses weighted lottery based on Antiquity Score for validator selection. + + Args: + previous_hash: Hash of previous block + + Returns: + New block if proofs exist, None otherwise + """ + if not self.pending_proofs: + self._reset_block() + return None + + # Calculate total AS for weighted distribution + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + # Calculate rewards for each miner (proportional to AS) + miners = [] + total_distributed = 0 + + for proof in self.pending_proofs: + # Weighted share based on AS + share = proof.antiquity_score / total_as + reward = calculate_reward( + proof.antiquity_score * share * len(self.pending_proofs), + BLOCK_REWARD_AMOUNT + ) + total_distributed += reward.amount + + miners.append(BlockMiner( + wallet=proof.wallet, + hardware=proof.hardware.cpu_model, + antiquity_score=proof.antiquity_score, + reward=reward, + )) + + # Create new block + self.current_block_height += 1 + block = Block( + height=self.current_block_height, + timestamp=int(time.time()), + previous_hash=previous_hash, + miners=miners, + total_reward=TokenAmount(total_distributed), + ) + + print(f"⛏️ Block #{block.height} created! " + f"Reward: {block.total_reward.to_rtc()} RTC " + f"split among {len(miners)} miners") + + # Reset for next block + self._reset_block() + + return block + + def _reset_block(self): + """Reset state for next block""" + self.pending_proofs.clear() + self.block_start_time = int(time.time()) + + def get_status(self) -> Dict: + """Get current block status""" + elapsed = int(time.time()) - self.block_start_time + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + return { + "current_block_height": self.current_block_height, + "pending_proofs": len(self.pending_proofs), + "total_antiquity_score": total_as, + "block_age_seconds": elapsed, + "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), + "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, + } + + def quarantine_node(self, wallet: WalletAddress, reason: str): + """ + Quarantine a node due to drift lock violation (RIP-0003). + + Args: + wallet: Node wallet to quarantine + reason: Reason for quarantine + """ + self.drifted_nodes.add(wallet.address) + print(f"🚫 Node {wallet.address} quarantined: {reason}") + + def release_node(self, wallet: WalletAddress): + """ + Release a node from quarantine after challenge passage (RIP-0003). + + Args: + wallet: Node wallet to release + """ + self.drifted_nodes.discard(wallet.address) + print(f"✅ Node {wallet.address} released from quarantine") + + +# ============================================================================= +# Validator Selection +# ============================================================================= + +def select_block_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: + """ + Select block validator using weighted lottery (RIP-0001). + + Higher Antiquity Score = higher probability of selection. + + Args: + proofs: List of validated proofs + + Returns: + Selected validator's proof, or None if no proofs + """ + if not proofs: + return None + + import random + + total_as = sum(p.antiquity_score for p in proofs) + if total_as == 0: + return random.choice(proofs) + + # Weighted random selection + r = random.uniform(0, total_as) + cumulative = 0 + + for proof in proofs: + cumulative += proof.antiquity_score + if r <= cumulative: + return proof + + return proofs[-1] + + +# ============================================================================= +# Example Usage +# ============================================================================= + +if __name__ == "__main__": + # Demo: Calculate AS for different hardware + examples = [ + ("Intel 486 DX2-66", 1992, 276), + ("PowerPC G4", 2002, 276), + ("Core 2 Duo", 2006, 180), + ("Ryzen 9 7950X", 2022, 30), + ] + + print("=" * 60) + print("RUSTCHAIN PROOF OF ANTIQUITY - ANTIQUITY SCORE CALCULATOR") + print("=" * 60) + print(f"Formula: AS = (2025 - release_year) * log10(uptime_days + 1)") + print("=" * 60) + print() + + for model, year, uptime in examples: + hw = HardwareInfo(cpu_model=model, release_year=year, uptime_days=uptime) + as_score = calculate_antiquity_score(year, uptime) + tier = HardwareTier.from_release_year(year) + + print(f"📟 {model} ({year})") + print(f" Age: {CURRENT_YEAR - year} years") + print(f" Uptime: {uptime} days") + print(f" Tier: {tier.value.upper()} ({tier.multiplier}x)") + print(f" Antiquity Score: {as_score:.2f}") + print() + + print("💡 Remember: This is NOT Proof of Work!") + print(" Older hardware with longer uptime wins, not faster hardware.") diff --git a/rips/python/rustchain/rip201_server_patch.py b/rips/python/rustchain/rip201_server_patch.py index c76bd570..a2b61bd1 100644 --- a/rips/python/rustchain/rip201_server_patch.py +++ b/rips/python/rustchain/rip201_server_patch.py @@ -1,222 +1,222 @@ -#!/usr/bin/env python3 -""" -RIP-201 Server Integration Patch -================================= - -This script patches rustchain_v2_integrated_v2.2.1_rip200.py to integrate -the fleet immune system. Run on VPS after copying fleet_immune_system.py. - -Usage: - python3 rip201_server_patch.py [--dry-run] [--server-file PATH] - -Patches applied: - 1. Import fleet_immune_system module - 2. Update record_attestation_success() to collect fleet signals - 3. Hook calculate_immune_weights() into epoch settlement - 4. Register fleet admin endpoints -""" - -import argparse -import os -import platform -import re -import shutil -import sys -from datetime import datetime - - -def patch_file(filepath: str, dry_run: bool = False) -> bool: - """Apply all patches to the server file.""" - - with open(filepath, 'r') as f: - content = f.read() - lines = content.split('\n') - - original = content - patches_applied = 0 - - # ─── Patch 1: Add fleet immune system import ─── - marker = "from hashlib import blake2b" - if marker in content and "fleet_immune_system" not in content: - content = content.replace( - marker, - marker + """ - -# RIP-201: Fleet Detection Immune System -try: - from fleet_immune_system import ( - record_fleet_signals, calculate_immune_weights, - register_fleet_endpoints, ensure_schema as ensure_fleet_schema, - get_fleet_report - ) - HAVE_FLEET_IMMUNE = True - print("[RIP-201] Fleet immune system loaded") -except Exception as _e: - print(f"[RIP-201] Fleet immune system not available: {_e}") - HAVE_FLEET_IMMUNE = False""" - ) - patches_applied += 1 - print(" [1/4] Added fleet immune system imports") - elif "fleet_immune_system" in content: - print(" [1/4] Fleet imports already present — skipping") - else: - print(f" [1/4] WARNING: Could not find import marker '{marker}'") - - # ─── Patch 2: Update record_attestation_success to pass signals & collect fleet data ─── - old_func = "def record_attestation_success(miner: str, device: dict, fingerprint_passed: bool = False):" - new_func = "def record_attestation_success(miner: str, device: dict, fingerprint_passed: bool = False, signals: dict = None, fingerprint: dict = None, ip_address: str = None):" - - if old_func in content: - content = content.replace(old_func, new_func) - patches_applied += 1 - print(" [2/4] Updated record_attestation_success() signature") - elif "signals: dict = None" in content and "record_attestation_success" in content: - print(" [2/4] Function signature already updated — skipping") - else: - print(" [2/4] WARNING: Could not find record_attestation_success signature") - - # Add fleet signal hook after the INSERT in record_attestation_success - attest_commit = """ conn.commit()""" - fleet_hook = """ conn.commit() - - # RIP-201: Record fleet immune system signals - if HAVE_FLEET_IMMUNE: - try: - record_fleet_signals(conn, miner, device, signals or {}, - fingerprint, now, ip_address=ip_address) - except Exception as _fe: - print(f"[RIP-201] Fleet signal recording warning: {_fe}")""" - - # Only patch the first occurrence in record_attestation_success context - # Find the function, then find its conn.commit() - func_match = re.search(r'def record_attestation_success\(.*?\n(.*?)(def |\Z)', content, re.DOTALL) - if func_match and "RIP-201: Record fleet" not in content: - func_body = func_match.group(0) - if "conn.commit()" in func_body: - patched_body = func_body.replace(" conn.commit()", fleet_hook, 1) - content = content.replace(func_body, patched_body) - patches_applied += 1 - print(" [2b/4] Added fleet signal hook to record_attestation_success()") - elif "RIP-201: Record fleet" in content: - print(" [2b/4] Fleet signal hook already present — skipping") - - # ─── Patch 3: Update submit_attestation call to pass extra args ─── - old_call = "record_attestation_success(miner, device, fingerprint_passed)" - new_call = "record_attestation_success(miner, device, fingerprint_passed, signals=signals, fingerprint=fingerprint, ip_address=request.remote_addr)" - - if old_call in content: - content = content.replace(old_call, new_call) - patches_applied += 1 - print(" [3/4] Updated submit_attestation() call to pass signals/fingerprint/IP") - elif "signals=signals" in content and "record_attestation_success" in content: - print(" [3/4] Call already passes signals — skipping") - else: - print(" [3/4] WARNING: Could not find record_attestation_success call") - - # ─── Patch 4: Register fleet endpoints ─── - rewards_marker = '[REWARDS] Endpoints registered successfully' - fleet_reg = """ - # RIP-201: Fleet immune system endpoints - if HAVE_FLEET_IMMUNE: - try: - register_fleet_endpoints(app, DB_PATH) - print("[RIP-201] Fleet immune endpoints registered") - except Exception as e: - print(f"[RIP-201] Failed to register fleet endpoints: {e}")""" - - if rewards_marker in content and "Fleet immune endpoints" not in content: - # Insert after the rewards registration block - insert_point = content.find(rewards_marker) - # Find the end of the except block - after_rewards = content[insert_point:] - # Find the next blank line or next if/try block - match = re.search(r'\n\n', after_rewards) - if match: - insert_pos = insert_point + match.end() - content = content[:insert_pos] + fleet_reg + "\n" + content[insert_pos:] - patches_applied += 1 - print(" [4/4] Registered fleet immune system endpoints") - else: - # Fallback: insert after the print line - line_end = content.find('\n', insert_point) - content = content[:line_end+1] + fleet_reg + "\n" + content[line_end+1:] - patches_applied += 1 - print(" [4/4] Registered fleet immune system endpoints (fallback)") - elif "Fleet immune endpoints" in content: - print(" [4/4] Fleet endpoints already registered — skipping") - else: - print(" [4/4] WARNING: Could not find rewards registration marker") - - # ─── Apply ─── - if patches_applied == 0: - print("\nNo patches needed — file already up to date.") - return True - - if content == original: - print("\nNo changes detected despite patches — check manually.") - return False - - if dry_run: - print(f"\n[DRY RUN] Would apply {patches_applied} patches to {filepath}") - return True - - # Backup original - backup_path = filepath + f".backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - shutil.copy2(filepath, backup_path) - print(f"\nBackup saved: {backup_path}") - - # Write patched file - with open(filepath, 'w') as f: - f.write(content) - - print(f"Applied {patches_applied} patches to {filepath}") - return True - - -def main(): - parser = argparse.ArgumentParser(description="RIP-201 Fleet Immune System Server Patch") - parser.add_argument("--dry-run", action="store_true", help="Preview patches without applying") - parser.add_argument("--server-file", default=None, - help="Path to server file (default: auto-detect)") - args = parser.parse_args() - - # Find server file - candidates = [ - args.server_file, - "/root/rustchain/rustchain_v2_integrated_v2.2.1_rip200.py", - os.path.expanduser("~/tmp_rustchain/node_package/rustchain_v2_integrated_v2.2.1_rip200.py"), - ] - - server_file = None - for c in candidates: - if c and os.path.isfile(c): - server_file = c - break - - if not server_file: - print("ERROR: Could not find server file. Use --server-file to specify path.") - sys.exit(1) - - print(f"RIP-201 Fleet Immune System Patch") - print(f"{'='*50}") - print(f"System Information:") - print(f" OS: {platform.system()} {platform.release()}") - print(f" Architecture: {platform.machine()}") - print(f" Python: {platform.python_version()}") - print(f"{'='*50}") - print(f"Target: {server_file}") - print(f"Mode: {'DRY RUN' if args.dry_run else 'LIVE'}") - print(f"{'='*50}\n") - - success = patch_file(server_file, dry_run=args.dry_run) - - if success: - print("\nPatch complete. Restart the RustChain service:") - print(" systemctl restart rustchain") - else: - print("\nPatch failed — check errors above.") - sys.exit(1) - - -if __name__ == "__main__": - main() +#!/usr/bin/env python3 +""" +RIP-201 Server Integration Patch +================================= + +This script patches rustchain_v2_integrated_v2.2.1_rip200.py to integrate +the fleet immune system. Run on VPS after copying fleet_immune_system.py. + +Usage: + python3 rip201_server_patch.py [--dry-run] [--server-file PATH] + +Patches applied: + 1. Import fleet_immune_system module + 2. Update record_attestation_success() to collect fleet signals + 3. Hook calculate_immune_weights() into epoch settlement + 4. Register fleet admin endpoints +""" + +import argparse +import os +import platform +import re +import shutil +import sys +from datetime import datetime + + +def patch_file(filepath: str, dry_run: bool = False) -> bool: + """Apply all patches to the server file.""" + + with open(filepath, 'r') as f: + content = f.read() + lines = content.split('\n') + + original = content + patches_applied = 0 + + # ─── Patch 1: Add fleet immune system import ─── + marker = "from hashlib import blake2b" + if marker in content and "fleet_immune_system" not in content: + content = content.replace( + marker, + marker + """ + +# RIP-201: Fleet Detection Immune System +try: + from fleet_immune_system import ( + record_fleet_signals, calculate_immune_weights, + register_fleet_endpoints, ensure_schema as ensure_fleet_schema, + get_fleet_report + ) + HAVE_FLEET_IMMUNE = True + print("[RIP-201] Fleet immune system loaded") +except Exception as _e: + print(f"[RIP-201] Fleet immune system not available: {_e}") + HAVE_FLEET_IMMUNE = False""" + ) + patches_applied += 1 + print(" [1/4] Added fleet immune system imports") + elif "fleet_immune_system" in content: + print(" [1/4] Fleet imports already present — skipping") + else: + print(f" [1/4] WARNING: Could not find import marker '{marker}'") + + # ─── Patch 2: Update record_attestation_success to pass signals & collect fleet data ─── + old_func = "def record_attestation_success(miner: str, device: dict, fingerprint_passed: bool = False):" + new_func = "def record_attestation_success(miner: str, device: dict, fingerprint_passed: bool = False, signals: dict = None, fingerprint: dict = None, ip_address: str = None):" + + if old_func in content: + content = content.replace(old_func, new_func) + patches_applied += 1 + print(" [2/4] Updated record_attestation_success() signature") + elif "signals: dict = None" in content and "record_attestation_success" in content: + print(" [2/4] Function signature already updated — skipping") + else: + print(" [2/4] WARNING: Could not find record_attestation_success signature") + + # Add fleet signal hook after the INSERT in record_attestation_success + attest_commit = """ conn.commit()""" + fleet_hook = """ conn.commit() + + # RIP-201: Record fleet immune system signals + if HAVE_FLEET_IMMUNE: + try: + record_fleet_signals(conn, miner, device, signals or {}, + fingerprint, now, ip_address=ip_address) + except Exception as _fe: + print(f"[RIP-201] Fleet signal recording warning: {_fe}")""" + + # Only patch the first occurrence in record_attestation_success context + # Find the function, then find its conn.commit() + func_match = re.search(r'def record_attestation_success\(.*?\n(.*?)(def |\Z)', content, re.DOTALL) + if func_match and "RIP-201: Record fleet" not in content: + func_body = func_match.group(0) + if "conn.commit()" in func_body: + patched_body = func_body.replace(" conn.commit()", fleet_hook, 1) + content = content.replace(func_body, patched_body) + patches_applied += 1 + print(" [2b/4] Added fleet signal hook to record_attestation_success()") + elif "RIP-201: Record fleet" in content: + print(" [2b/4] Fleet signal hook already present — skipping") + + # ─── Patch 3: Update submit_attestation call to pass extra args ─── + old_call = "record_attestation_success(miner, device, fingerprint_passed)" + new_call = "record_attestation_success(miner, device, fingerprint_passed, signals=signals, fingerprint=fingerprint, ip_address=request.remote_addr)" + + if old_call in content: + content = content.replace(old_call, new_call) + patches_applied += 1 + print(" [3/4] Updated submit_attestation() call to pass signals/fingerprint/IP") + elif "signals=signals" in content and "record_attestation_success" in content: + print(" [3/4] Call already passes signals — skipping") + else: + print(" [3/4] WARNING: Could not find record_attestation_success call") + + # ─── Patch 4: Register fleet endpoints ─── + rewards_marker = '[REWARDS] Endpoints registered successfully' + fleet_reg = """ + # RIP-201: Fleet immune system endpoints + if HAVE_FLEET_IMMUNE: + try: + register_fleet_endpoints(app, DB_PATH) + print("[RIP-201] Fleet immune endpoints registered") + except Exception as e: + print(f"[RIP-201] Failed to register fleet endpoints: {e}")""" + + if rewards_marker in content and "Fleet immune endpoints" not in content: + # Insert after the rewards registration block + insert_point = content.find(rewards_marker) + # Find the end of the except block + after_rewards = content[insert_point:] + # Find the next blank line or next if/try block + match = re.search(r'\n\n', after_rewards) + if match: + insert_pos = insert_point + match.end() + content = content[:insert_pos] + fleet_reg + "\n" + content[insert_pos:] + patches_applied += 1 + print(" [4/4] Registered fleet immune system endpoints") + else: + # Fallback: insert after the print line + line_end = content.find('\n', insert_point) + content = content[:line_end+1] + fleet_reg + "\n" + content[line_end+1:] + patches_applied += 1 + print(" [4/4] Registered fleet immune system endpoints (fallback)") + elif "Fleet immune endpoints" in content: + print(" [4/4] Fleet endpoints already registered — skipping") + else: + print(" [4/4] WARNING: Could not find rewards registration marker") + + # ─── Apply ─── + if patches_applied == 0: + print("\nNo patches needed — file already up to date.") + return True + + if content == original: + print("\nNo changes detected despite patches — check manually.") + return False + + if dry_run: + print(f"\n[DRY RUN] Would apply {patches_applied} patches to {filepath}") + return True + + # Backup original + backup_path = filepath + f".backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + shutil.copy2(filepath, backup_path) + print(f"\nBackup saved: {backup_path}") + + # Write patched file + with open(filepath, 'w') as f: + f.write(content) + + print(f"Applied {patches_applied} patches to {filepath}") + return True + + +def main(): + parser = argparse.ArgumentParser(description="RIP-201 Fleet Immune System Server Patch") + parser.add_argument("--dry-run", action="store_true", help="Preview patches without applying") + parser.add_argument("--server-file", default=None, + help="Path to server file (default: auto-detect)") + args = parser.parse_args() + + # Find server file + candidates = [ + args.server_file, + "/root/rustchain/rustchain_v2_integrated_v2.2.1_rip200.py", + os.path.expanduser("~/tmp_rustchain/node_package/rustchain_v2_integrated_v2.2.1_rip200.py"), + ] + + server_file = None + for c in candidates: + if c and os.path.isfile(c): + server_file = c + break + + if not server_file: + print("ERROR: Could not find server file. Use --server-file to specify path.") + sys.exit(1) + + print(f"RIP-201 Fleet Immune System Patch") + print(f"{'='*50}") + print(f"System Information:") + print(f" OS: {platform.system()} {platform.release()}") + print(f" Architecture: {platform.machine()}") + print(f" Python: {platform.python_version()}") + print(f"{'='*50}") + print(f"Target: {server_file}") + print(f"Mode: {'DRY RUN' if args.dry_run else 'LIVE'}") + print(f"{'='*50}\n") + + success = patch_file(server_file, dry_run=args.dry_run) + + if success: + print("\nPatch complete. Restart the RustChain service:") + print(" systemctl restart rustchain") + else: + print("\nPatch failed — check errors above.") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/rips/rustchain-core/RUSTCHAIN_PROOF_OF_ANTIQUITY.md b/rips/rustchain-core/RUSTCHAIN_PROOF_OF_ANTIQUITY.md index 36aed018..fbeeb769 100644 --- a/rips/rustchain-core/RUSTCHAIN_PROOF_OF_ANTIQUITY.md +++ b/rips/rustchain-core/RUSTCHAIN_PROOF_OF_ANTIQUITY.md @@ -1,775 +1,775 @@ -# RustChain Proof of Antiquity (PoA) System - -## Complete Technical Documentation - -**Version:** 1.0.0 -**Philosophy:** "1 CPU = 1 Vote - Physical proof, not mathematical" -**Core Principle:** "It's cheaper to buy a $50 vintage Mac than to emulate one" - ---- - -## Table of Contents - -1. [Executive Summary](#executive-summary) -2. [System Architecture](#system-architecture) -3. [Core Components](#core-components) - - [Entropy Collection](#entropy-collection) - - [Anti-Spoofing System](#anti-spoofing-system) - - [Mutating Challenge System](#mutating-challenge-system) - - [Quantum-Resistant Entropy Collapse](#quantum-resistant-entropy-collapse) - - [Hidden Mutator Oracle Network](#hidden-mutator-oracle-network) - - [Multi-Architecture Oracle Support](#multi-architecture-oracle-support) -4. [Antiquity Bonus Tier System](#antiquity-bonus-tier-system) -5. [Economic Security Analysis](#economic-security-analysis) -6. [Attack Vectors and Mitigations](#attack-vectors-and-mitigations) -7. [Hardware Requirements](#hardware-requirements) -8. [API Reference](#api-reference) -9. [File Structure](#file-structure) - ---- - -## Executive Summary - -RustChain Proof of Antiquity (PoA) is a novel consensus mechanism that: - -- **Rewards vintage hardware preservation** instead of raw computational power -- **Makes emulation economically irrational** through physical entropy verification -- **Achieves "1 CPU = 1 Vote"** using hardware-specific timing characteristics -- **Provides quantum resistance** through physical entropy, not mathematical hardness -- **Prevents bot farming** by heavily penalizing common ARM devices - -### Key Innovation - -Traditional blockchain security relies on mathematical hardness (factoring, discrete log). -PoA relies on **physical hardness** - you cannot simulate atoms faster than atoms run. - -``` -Classical Attack: 2^512 operations (heat death of universe) -Quantum Attack: 2^256 operations (Grover) - still impossible -Physical Attack: Simulate actual silicon atoms - IMPOSSIBLE -``` - ---- - -## System Architecture - -``` - +=============================================+ - | RUSTCHAIN PROOF OF ANTIQUITY | - | "Ancient silicon decides fate" | - +=============================================+ - | - +-----------------------------+-----------------------------+ - | | | -+---------v---------+ +-----------v-----------+ +-----------v-----------+ -| ENTROPY LAYER | | CHALLENGE LAYER | | CONSENSUS LAYER | -| | | | | | -| - Hardware proofs | | - Anti-spoofing | | - Block production | -| - Timing samples | | - Mutating params | | - Validator selection | -| - Cache analysis | | - Round-robin verify | | - Antiquity bonuses | -+-------------------+ +-----------------------+ +-----------------------+ - | | | - +-----------------------------+-----------------------------+ - | - +-------------------v-------------------+ - | HIDDEN MUTATOR ORACLE RING | - | (PowerPC AltiVec nodes) | - | | - | - Generate unpredictable mutations | - | - XOR-combined entropy seeds | - | - Quantum-resistant via vperm | - | - Identities HIDDEN from public | - +---------------------------------------+ -``` - ---- - -## Core Components - -### Entropy Collection - -**Purpose:** Gather hardware-specific entropy proofs from diverse architectures. - -**File:** `collectors/rustchain_entropy_collector.py` - -**Supported Platforms:** -- PowerPC (OS X Tiger, Leopard, OS 9) -- x86/x86-64 (Linux, Windows, FreeBSD) -- ARM (Linux) -- SPARC (Solaris) -- 68k (Mac OS 7.5) -- DOS (8086+) - -**Entropy Sources:** -```python -entropy_data = { - 'cpu': { - 'model': "PowerMac3,6", - 'architecture': "PowerPC G4", - 'frequency_mhz': 1000, - 'cores': 1, - 'l1_cache_kb': 32, - 'l2_cache_kb': 256 - }, - 'timing': { - 'timestamp': time.time(), - 'monotonic': time.monotonic(), - 'process_time': time.process_time(), - 'timing_samples': [nanosecond_samples...] - }, - 'memory': { - 'total_mb': 1536, - 'available_mb': 800 - }, - 'entropy_hash': sha256(all_data) -} -``` - -**Collected Proofs:** -| Node | Architecture | Antiquity | Bonus | -|------|-------------|-----------|-------| -| G4 Mirror Door | PowerPC 7455 | 2003 | 2.5x | -| G5 Dual 2.0 | PowerPC 970 | 2004 | 2.5x | -| PowerBook G4 | PowerPC 7447 | 2005 | 2.5x | -| Sophia Node | x86-64 | 2018 | 1.0x | -| Gaming PC | x86-64 | 2021 | 1.0x | -| Raspberry Pi | ARM | 2020 | 0.1x | - ---- - -### Anti-Spoofing System - -**Purpose:** Detect emulators and FPGA spoofing through timing analysis. - -**Files:** -- `src/anti_spoof/challenge_response.c` (C implementation) -- `src/anti_spoof/network_challenge.py` (Network protocol) - -**Detection Methods:** - -#### 1. Timing Jitter Analysis -```c -// Real hardware has natural thermal jitter -// Emulators are TOO consistent -double jitter_ratio = stddev / mean; -if (jitter_ratio < 0.03) { - // TOO PERFECT - likely emulator - score -= 25; -} -if (jitter_ratio > 0.10) { - // Normal hardware jitter - score += 10; -} -``` - -#### 2. Cache Timing Ratio -```c -// L1 should be 3-10x faster than L2 -// Emulators often get this wrong -double ratio = avg_l2 / avg_l1; -if (ratio < 2.0 || ratio > 15.0) { - score -= 20; // Suspicious ratio -} -``` - -#### 3. Hardware Serial Verification -```c -// Check OpenFirmware device tree -FILE *fp = popen("ioreg -l | grep IOPlatformSerialNumber", "r"); -// Verify serial format matches claimed hardware -``` - -#### 4. Thermal Sensor Presence -```c -// Real Macs have SMC thermal sensors -// Emulators don't -system("ioreg -l | grep -i thermal"); -``` - -**Challenge-Response Protocol:** -``` -Challenger Responder - | | - |---[CHALLENGE: params + nonce]---->| - | | - | (responder runs timing tests)| - | | - |<--[RESPONSE: results + signature]-| - | | - | (verify timing characteristics) | - | (check cache ratios) | - | (validate jitter patterns) | -``` - ---- - -### Mutating Challenge System - -**Purpose:** Prevent pre-computation attacks by changing parameters each block. - -**File:** `src/anti_spoof/mutating_challenge.py` - -**How It Works:** - -The previous block hash seeds parameter mutations: -```python -def _derive_mutations(self, block_hash: str, target: str) -> dict: - """Derive challenge parameters from block hash""" - seed = hashlib.sha256( - bytes.fromhex(block_hash) + target.encode() - ).digest() - - return { - 'cache_stride': 32 + (seed[0] % 480), # 32-512 - 'cache_iterations': 128 + (seed[1] << 2), # 128-1024 - 'memory_size_kb': 256 + (seed[2] << 5), # 256-8192 - 'pipeline_depth': 500 + (seed[3] << 4), # 500-4596 - 'hash_rounds': 500 + (seed[4] << 4), # 500-4596 - 'jitter_min_pct': 3 + (seed[5] % 8), # 3-10% - 'timing_window_ms': 1000 + (seed[6] << 4), # 1000-5096 - } -``` - -**Attack Prevention:** -``` -Block N-1 Hash: 0xABCD... - | - v -Parameters for Block N: - cache_stride = 347 - iterations = 640 - memory_size = 4352KB - ... - | - v -Block N Hash: 0x1234... - | - v -Parameters for Block N+1: (COMPLETELY DIFFERENT) - cache_stride = 128 - iterations = 892 - memory_size = 7168KB - ... - -Pre-computation is IMPOSSIBLE because you can't know -the parameters until the previous block is mined. -``` - ---- - -### Quantum-Resistant Entropy Collapse - -**Purpose:** Generate entropy that quantum computers cannot predict or reverse. - -**File:** `src/quantum_resist/altivec_entropy_collapse.c` - -**Compile (Mac OS X Tiger):** -```bash -gcc-4.0 -maltivec -mcpu=7450 -O2 altivec_entropy_collapse.c -o altivec_entropy -``` - -**How AltiVec vperm Provides Quantum Resistance:** - -```c -// AltiVec vperm: 128-bit permutation in 1 CPU cycle -// Control vector determines which bytes go where -// Control is derived from timebase (physical timing) - -static vector unsigned char altivec_permute_round( - vector unsigned char v1, - vector unsigned char v2, - uint64_t *timing_out -) { - uint64_t t_start = read_timebase(); - - // Control vector from timing = 2^80 possible permutations - vector unsigned char ctrl = timing_permute_control(t_start, ...); - - // vec_perm: select 16 bytes from 32-byte concatenation - vector unsigned char result = vec_perm(v1, v2, ctrl); - - uint64_t t_end = read_timebase(); - *timing_out = t_end - t_start; // Physical timing entropy - - return result; -} -``` - -**Entropy Collapse Process:** -``` -8 Vector Chains (128 bits each) = 1024 bits initial state - | - v -64 Collapse Rounds with: - - vperm permutation (timing-controlled) - - XOR folding every 8 rounds - - Timing feedback into state - | - v -512-bit Quantum-Resistant Entropy -``` - -**Why Quantum Computers Can't Break This:** - -| What Quantum Computers CAN Break | What They CANNOT Do | -|----------------------------------|---------------------| -| RSA, ECC (Shor's algorithm) | Simulate hardware faster than it runs | -| Weakened symmetric crypto (Grover) | Predict thermal noise in silicon | -| Mathematical hardness problems | Reverse physical timing measurements | -| | Clone quantum states of atoms | - -**Proven Output (G4 Mirror Door):** -```json -{ - "signature": "ALTIVEC-QRES-51d837c2-5807-P512-D8", - "permutation_count": 512, - "collapse_depth": 8, - "collapsed_512bit": "51d837c2c8323c0d2014a95adb6fc5e0...", - "altivec_vperm": true -} -``` - ---- - -### Hidden Mutator Oracle Network - -**Purpose:** Generate unpredictable mutation seeds without revealing oracle identities. - -**File:** `src/mutator_oracle/ppc_mutator_node.py` - -**Architecture:** -``` - +-----------------------------+ - | PPC MUTATOR ORACLE RING | - | (Hidden from public view) | - +-------------+---------------+ - | - +---------------------+---------------------+ - | | | -+-------v-------+ +-------v-------+ +-------v-------+ -| G4 Mirror | | G5 Dual | | PowerBook | -| Door | | 2GHz | | G4 | -| (AltiVec) | | (AltiVec) | | (AltiVec) | -+-------+-------+ +-------+-------+ +-------+-------+ - | | | - +---------------------+---------------------+ - | - +-------v-------+ - | MUTATION SEED | - | (512-bit) | - +-------+-------+ - | - +-------------v-------------+ - | PUBLIC VALIDATOR RING | - | (Challenged with mutated | - | parameters each block) | - +---------------------------+ -``` - -**How It Works:** - -1. **Entropy Collection:** Each PPC node generates AltiVec entropy -2. **XOR Combination:** Entropies XOR'd together (no single node controls output) -3. **Ring Signature:** Threshold signature proves legitimacy -4. **Public Emission:** Only seed hash is broadcast, not node identities - -```python -def emit_seed_to_network(self, seed: MutationSeed) -> dict: - """Only the SEED is emitted - individual node entropies stay hidden""" - return { - 'type': 'mutation_seed', - 'block_height': seed.block_height, - 'seed_hash': seed.hash().hex(), - 'contributors': len(seed.contributing_nodes), # Count only! - 'ring_signature': seed.ring_signature.hex(), - # Individual node details are NOT included - } -``` - -**What Attackers See vs Don't See:** - -| VISIBLE | HIDDEN | -|---------|--------| -| Mutation seed hash | Which PPC nodes are mutators | -| Number of contributors | Individual node entropies | -| Ring signature | Node IP addresses | -| Challenge parameters | AltiVec timing signatures | - ---- - -### Multi-Architecture Oracle Support - -**Purpose:** Support diverse CPU architectures with appropriate reward bonuses. - -**File:** `src/mutator_oracle/multi_arch_oracles.py` - -**Supported Architectures:** - -```python -SUPPORTED_ARCHITECTURES = { - # PowerPC Family (MUTATOR CAPABLE) - 'ppc_g3': ArchInfo('ppc_g3', 'PowerPC G3', 1997, ['altivec'], True), - 'ppc_g4': ArchInfo('ppc_g4', 'PowerPC G4', 1999, ['altivec', 'vperm'], True), - 'ppc_g5': ArchInfo('ppc_g5', 'PowerPC G5', 2003, ['altivec', 'vperm'], True), - - # x86 Family - 'x86': ArchInfo('x86', 'Intel x86', 1978, ['rdtsc'], False), - 'x86_64': ArchInfo('x86_64', 'x86-64', 2003, ['rdtsc', 'aes-ni', 'avx'], False), - - # ARM Family (BOT FARM RISK - PENALIZED) - 'arm32': ArchInfo('arm32', 'ARM 32-bit', 1985, [], False), - 'arm64': ArchInfo('arm64', 'ARM 64-bit', 2011, ['neon'], False), - - # Apple Silicon (AMX MUTATOR CAPABLE) - 'm1': ArchInfo('m1', 'Apple M1', 2020, ['amx', 'neon'], True), - 'm2': ArchInfo('m2', 'Apple M2', 2022, ['amx', 'neon'], True), - - # Ancient/Rare Architectures - '68k': ArchInfo('68k', 'Motorola 68000', 1979, [], False), - 'sparc': ArchInfo('sparc', 'SPARC', 1987, ['vis'], True), - 'alpha': ArchInfo('alpha', 'DEC Alpha', 1992, ['mvi'], True), - 'mips': ArchInfo('mips', 'MIPS', 1985, [], False), - 'pa_risc': ArchInfo('pa_risc', 'PA-RISC', 1986, ['max'], True), -} -``` - -**Mutator Oracle Types:** - -| Oracle Type | Architectures | Capability | -|-------------|---------------|------------| -| AltiVec Mutator | PPC G3/G4/G5 | vperm quantum-resistant | -| AMX Mutator | M1/M2 | Matrix coprocessor entropy | -| VIS Mutator | SPARC | Visual instruction set | -| MVI Mutator | Alpha | Motion video instructions | -| MAX Mutator | PA-RISC | Multimedia extensions | - ---- - -## Antiquity Bonus Tier System - -**Philosophy:** Older and rarer hardware gets higher rewards to incentivize preservation. - -```python -@property -def antiquity_bonus(self) -> float: - """Calculate antiquity bonus based on architecture age and rarity""" - - # ARM penalty - too easy to bot farm with phones/Raspberry Pis - if self.arch_id in ['arm32', 'arm64']: - return 0.1 # 10% - heavily discouraged - - # Apple Silicon - AMX coprocessor can be used as mutator oracle - # Gets same bonus as modern x86 since AMX provides unique entropy - if self.arch_id in ['m1', 'm2']: - return 1.0 # 1x - AMX mutator capability - - # Standard age-based tiers - age = 2025 - self.release_year - - if age >= 40: # Released before 1985 - return 3.5 # Ancient tier - - if age >= 32: # Released before 1993 - return 3.0 # Sacred tier - - if age >= 20: # Released before 2005 - return 2.5 # Vintage tier (G3, G4, G5, early x86-64) - - if age >= 12: # Released before 2013 - return 2.0 # Classic tier - - return 1.0 # Modern tier -``` - -### Complete Tier Breakdown - -| Tier | Age | Bonus | Example Architectures | -|------|-----|-------|----------------------| -| **ANCIENT** | 40+ years | 3.5x | 68k (1979), MIPS (1985) | -| **SACRED** | 32+ years | 3.0x | SPARC (1987), Alpha (1992), PA-RISC (1986) | -| **VINTAGE** | 20+ years | 2.5x | PPC G3 (1997), G4 (1999), G5 (2003), x86-64 (2003) | -| **CLASSIC** | 12+ years | 2.0x | Older x86, RISC-V | -| **MODERN** | < 12 years | 1.0x | New x86-64, M1/M2 (AMX capable) | -| **PENALTY** | Any ARM | 0.1x | ARM32, ARM64 (bot farm risk) | - -### Why ARM Gets 0.1x - -``` -ARM devices are EVERYWHERE: -- Billions of smartphones -- Raspberry Pis cost $35 -- Easy to run thousands of bot validators - -Attack scenario WITHOUT penalty: - Attacker buys 1000 Raspberry Pis = $35,000 - Runs 1000 ARM validators - Controls 50%+ of network - -Attack scenario WITH 0.1x penalty: - 1000 ARM validators = 100 effective votes - vs single G4 Mac = 2.5 effective votes - Need 10,000 Pis ($350,000) to match 40 Macs ($2,000) -``` - ---- - -## Economic Security Analysis - -### Attack Cost Analysis - -**Scenario: Control 50% of Network Validation** - -| Attack Vector | Cost | Feasibility | -|---------------|------|-------------| -| Buy 1000 Raspberry Pis | $35,000 | 100 effective votes (0.1x) | -| Rent 1000 cloud VMs | $50,000/mo | Detected as VMs | -| Build FPGA spoofing | $500,000+ | Timing detection catches it | -| Emulate 1000 G4 Macs | $160,000/mo | Jitter analysis fails | -| **Buy 40 real G4 Macs** | **$2,000** | **100 effective votes (2.5x)** | - -### Defense Cost Analysis - -``` -Minimal viable defense: - 3x PowerPC Macs (mutator ring) = $150 - 2x vintage x86 servers = $200 - Network equipment = $100 - ----------------------------------- - Total = $450 - -This defends against $160,000+ emulator attacks! -``` - -### Economic Equilibrium - -``` -Attack ROI: (Block rewards - Attack cost) / Attack cost -Defense ROI: (Block rewards - Defense cost) / Defense cost - -With mutating challenges + anti-spoofing: - Attack cost = $160,000+ (emulators detected) - Defense cost = $450 (real hardware) - - Attack ROI = NEGATIVE (detection + wasted compute) - Defense ROI = POSITIVE (hardware pays for itself) - -Equilibrium: Rational actors buy real vintage hardware -``` - ---- - -## Attack Vectors and Mitigations - -### 1. Emulator Attack - -**Attack:** Run QEMU/SheepShaver to fake PowerPC -**Detection:** Timing jitter too consistent (< 3%) -**Mitigation:** Jitter analysis + cache timing ratios - -### 2. FPGA Spoofing - -**Attack:** Build custom FPGA mimicking vintage CPU -**Detection:** Missing thermal sensors, wrong serial formats -**Mitigation:** Hardware serial verification + thermal checks - -### 3. Sybil Attack - -**Attack:** Run thousands of validator instances -**Detection:** Same physical hardware signatures -**Mitigation:** One vote per unique hardware signature - -### 4. Pre-computation Attack - -**Attack:** Calculate responses before challenges issued -**Detection:** Parameters change each block -**Mitigation:** Block-hash seeded mutations - -### 5. Mutator Oracle Compromise - -**Attack:** Control mutation seed generation -**Detection:** N/A (seeds look random either way) -**Mitigation:** XOR combination (need 2/3 of hidden nodes) - -### 6. Quantum Computer Attack - -**Attack:** Use Shor/Grover to break crypto -**Detection:** N/A -**Mitigation:** Physical entropy (not mathematical hardness) - ---- - -## Hardware Requirements - -### Mutator Oracle Node (PowerPC) - -``` -MINIMUM: -- PowerPC G3 or later (G4/G5 preferred) -- AltiVec/Velocity Engine support -- 256MB RAM -- Mac OS X 10.3+ or Mac OS 9.2.2 -- Network connectivity - -RECOMMENDED: -- PowerPC G4 or G5 -- 1GB+ RAM -- Mac OS X 10.4 Tiger -- Gigabit Ethernet -``` - -### Standard Validator Node - -``` -MINIMUM: -- Any supported architecture -- 512MB RAM -- 10GB storage -- Network connectivity - -RECOMMENDED: -- Vintage hardware for bonus multiplier -- 2GB+ RAM -- SSD storage -- Stable network connection -``` - ---- - -## API Reference - -### Entropy Collection API - -```python -from rustchain_entropy_collector import collect_entropy - -# Collect entropy proof -proof = collect_entropy() - -# Returns: -{ - 'cpu': {...}, - 'timing': {...}, - 'memory': {...}, - 'entropy_hash': '0x...' -} -``` - -### Anti-Spoofing API - -```python -from anti_spoof import ChallengeResponseSystem - -# Create challenge -system = ChallengeResponseSystem() -challenge = system.create_challenge(target_node) - -# Verify response -result = system.verify_response(challenge, response) -# Returns: (valid: bool, score: int, analysis: dict) -``` - -### Mutating Challenge API - -```python -from mutating_challenge import MutatingChallengeSystem - -# Generate mutated parameters -system = MutatingChallengeSystem(block_hash="0xABCD...") -params = system.get_challenge_params(target="validator_id") - -# Returns: -{ - 'cache_stride': 347, - 'cache_iterations': 640, - 'memory_size_kb': 4352, - ... -} -``` - -### Mutator Oracle API - -```python -from ppc_mutator_node import PPCMutatorRing, HiddenMutatorProtocol - -# Create hidden ring -ring = PPCMutatorRing() -ring.register_node(ppc_node) - -# Generate mutation seed -seed = ring.generate_mutation_seed(block_height=100) - -# Emit to network (hides node identities) -protocol = HiddenMutatorProtocol(ring) -public_data = protocol.emit_seed_to_network(seed) -``` - ---- - -## File Structure - -``` -rustchain-core/ -| -+-- collectors/ -| +-- rustchain_entropy_collector.py # Main entropy collector -| +-- dos_collector.asm # DOS assembly collector -| +-- dos_collector.c # DOS C collector -| -+-- entropy/ -| +-- quantum_entropy_g4_125.json # G4 Mirror Door proof -| +-- quantum_entropy_g5_130.json # G5 Dual proof -| +-- rustchain_entropy_*.json # All collected proofs -| -+-- src/ -| +-- anti_spoof/ -| | +-- challenge_response.c # C anti-spoofing system -| | +-- network_challenge.py # Network protocol -| | +-- mutating_challenge.py # Block-seeded mutations -| | -| +-- quantum_resist/ -| | +-- altivec_entropy_collapse.c # AltiVec quantum resistance -| | -| +-- mutator_oracle/ -| +-- ppc_mutator_node.py # Hidden PPC ring -| +-- multi_arch_oracles.py # Multi-architecture support -| -+-- RUSTCHAIN_PROOF_OF_ANTIQUITY.md # This documentation -+-- rustchain_entropy_collection.zip # Complete archive -``` - ---- - -## Philosophy - -> "The strength isn't in the algorithm. It's in the atoms." - -RustChain Proof of Antiquity represents a paradigm shift in blockchain security: - -1. **Physical > Mathematical:** Quantum computers can break math, not physics -2. **Preservation > Destruction:** Mining preserves vintage hardware, not burns energy -3. **Diversity > Homogeneity:** Many architectures strengthen the network -4. **Economic Rationality:** Attacking costs more than defending - -The hidden PowerPC mutator oracles embody this philosophy perfectly: -- Ancient silicon (2003) decides the fate of modern validators (2025) -- Physical entropy from AltiVec vperm resists quantum attacks -- Economic incentive to keep vintage Macs running forever - -``` -"Every vintage computer has historical potential." -"1 CPU = 1 Vote - Grok was wrong!" -``` - ---- - -## Contributors - -- **G4 Mirror Door** (192.168.0.125) - Primary Mutator Oracle -- **G5 Dual 2.0** (192.168.0.130) - Secondary Mutator Oracle -- **PowerBook G4** (192.168.0.115) - Tertiary Mutator Oracle -- **Sophia Node** (192.168.0.160) - Validator Coordinator - ---- - -*Document generated: 2025-01-28* -*RustChain Proof of Antiquity v1.0.0* +# RustChain Proof of Antiquity (PoA) System + +## Complete Technical Documentation + +**Version:** 1.0.0 +**Philosophy:** "1 CPU = 1 Vote - Physical proof, not mathematical" +**Core Principle:** "It's cheaper to buy a $50 vintage Mac than to emulate one" + +--- + +## Table of Contents + +1. [Executive Summary](#executive-summary) +2. [System Architecture](#system-architecture) +3. [Core Components](#core-components) + - [Entropy Collection](#entropy-collection) + - [Anti-Spoofing System](#anti-spoofing-system) + - [Mutating Challenge System](#mutating-challenge-system) + - [Quantum-Resistant Entropy Collapse](#quantum-resistant-entropy-collapse) + - [Hidden Mutator Oracle Network](#hidden-mutator-oracle-network) + - [Multi-Architecture Oracle Support](#multi-architecture-oracle-support) +4. [Antiquity Bonus Tier System](#antiquity-bonus-tier-system) +5. [Economic Security Analysis](#economic-security-analysis) +6. [Attack Vectors and Mitigations](#attack-vectors-and-mitigations) +7. [Hardware Requirements](#hardware-requirements) +8. [API Reference](#api-reference) +9. [File Structure](#file-structure) + +--- + +## Executive Summary + +RustChain Proof of Antiquity (PoA) is a novel consensus mechanism that: + +- **Rewards vintage hardware preservation** instead of raw computational power +- **Makes emulation economically irrational** through physical entropy verification +- **Achieves "1 CPU = 1 Vote"** using hardware-specific timing characteristics +- **Provides quantum resistance** through physical entropy, not mathematical hardness +- **Prevents bot farming** by heavily penalizing common ARM devices + +### Key Innovation + +Traditional blockchain security relies on mathematical hardness (factoring, discrete log). +PoA relies on **physical hardness** - you cannot simulate atoms faster than atoms run. + +``` +Classical Attack: 2^512 operations (heat death of universe) +Quantum Attack: 2^256 operations (Grover) - still impossible +Physical Attack: Simulate actual silicon atoms - IMPOSSIBLE +``` + +--- + +## System Architecture + +``` + +=============================================+ + | RUSTCHAIN PROOF OF ANTIQUITY | + | "Ancient silicon decides fate" | + +=============================================+ + | + +-----------------------------+-----------------------------+ + | | | ++---------v---------+ +-----------v-----------+ +-----------v-----------+ +| ENTROPY LAYER | | CHALLENGE LAYER | | CONSENSUS LAYER | +| | | | | | +| - Hardware proofs | | - Anti-spoofing | | - Block production | +| - Timing samples | | - Mutating params | | - Validator selection | +| - Cache analysis | | - Round-robin verify | | - Antiquity bonuses | ++-------------------+ +-----------------------+ +-----------------------+ + | | | + +-----------------------------+-----------------------------+ + | + +-------------------v-------------------+ + | HIDDEN MUTATOR ORACLE RING | + | (PowerPC AltiVec nodes) | + | | + | - Generate unpredictable mutations | + | - XOR-combined entropy seeds | + | - Quantum-resistant via vperm | + | - Identities HIDDEN from public | + +---------------------------------------+ +``` + +--- + +## Core Components + +### Entropy Collection + +**Purpose:** Gather hardware-specific entropy proofs from diverse architectures. + +**File:** `collectors/rustchain_entropy_collector.py` + +**Supported Platforms:** +- PowerPC (OS X Tiger, Leopard, OS 9) +- x86/x86-64 (Linux, Windows, FreeBSD) +- ARM (Linux) +- SPARC (Solaris) +- 68k (Mac OS 7.5) +- DOS (8086+) + +**Entropy Sources:** +```python +entropy_data = { + 'cpu': { + 'model': "PowerMac3,6", + 'architecture': "PowerPC G4", + 'frequency_mhz': 1000, + 'cores': 1, + 'l1_cache_kb': 32, + 'l2_cache_kb': 256 + }, + 'timing': { + 'timestamp': time.time(), + 'monotonic': time.monotonic(), + 'process_time': time.process_time(), + 'timing_samples': [nanosecond_samples...] + }, + 'memory': { + 'total_mb': 1536, + 'available_mb': 800 + }, + 'entropy_hash': sha256(all_data) +} +``` + +**Collected Proofs:** +| Node | Architecture | Antiquity | Bonus | +|------|-------------|-----------|-------| +| G4 Mirror Door | PowerPC 7455 | 2003 | 2.5x | +| G5 Dual 2.0 | PowerPC 970 | 2004 | 2.5x | +| PowerBook G4 | PowerPC 7447 | 2005 | 2.5x | +| Sophia Node | x86-64 | 2018 | 1.0x | +| Gaming PC | x86-64 | 2021 | 1.0x | +| Raspberry Pi | ARM | 2020 | 0.1x | + +--- + +### Anti-Spoofing System + +**Purpose:** Detect emulators and FPGA spoofing through timing analysis. + +**Files:** +- `src/anti_spoof/challenge_response.c` (C implementation) +- `src/anti_spoof/network_challenge.py` (Network protocol) + +**Detection Methods:** + +#### 1. Timing Jitter Analysis +```c +// Real hardware has natural thermal jitter +// Emulators are TOO consistent +double jitter_ratio = stddev / mean; +if (jitter_ratio < 0.03) { + // TOO PERFECT - likely emulator + score -= 25; +} +if (jitter_ratio > 0.10) { + // Normal hardware jitter + score += 10; +} +``` + +#### 2. Cache Timing Ratio +```c +// L1 should be 3-10x faster than L2 +// Emulators often get this wrong +double ratio = avg_l2 / avg_l1; +if (ratio < 2.0 || ratio > 15.0) { + score -= 20; // Suspicious ratio +} +``` + +#### 3. Hardware Serial Verification +```c +// Check OpenFirmware device tree +FILE *fp = popen("ioreg -l | grep IOPlatformSerialNumber", "r"); +// Verify serial format matches claimed hardware +``` + +#### 4. Thermal Sensor Presence +```c +// Real Macs have SMC thermal sensors +// Emulators don't +system("ioreg -l | grep -i thermal"); +``` + +**Challenge-Response Protocol:** +``` +Challenger Responder + | | + |---[CHALLENGE: params + nonce]---->| + | | + | (responder runs timing tests)| + | | + |<--[RESPONSE: results + signature]-| + | | + | (verify timing characteristics) | + | (check cache ratios) | + | (validate jitter patterns) | +``` + +--- + +### Mutating Challenge System + +**Purpose:** Prevent pre-computation attacks by changing parameters each block. + +**File:** `src/anti_spoof/mutating_challenge.py` + +**How It Works:** + +The previous block hash seeds parameter mutations: +```python +def _derive_mutations(self, block_hash: str, target: str) -> dict: + """Derive challenge parameters from block hash""" + seed = hashlib.sha256( + bytes.fromhex(block_hash) + target.encode() + ).digest() + + return { + 'cache_stride': 32 + (seed[0] % 480), # 32-512 + 'cache_iterations': 128 + (seed[1] << 2), # 128-1024 + 'memory_size_kb': 256 + (seed[2] << 5), # 256-8192 + 'pipeline_depth': 500 + (seed[3] << 4), # 500-4596 + 'hash_rounds': 500 + (seed[4] << 4), # 500-4596 + 'jitter_min_pct': 3 + (seed[5] % 8), # 3-10% + 'timing_window_ms': 1000 + (seed[6] << 4), # 1000-5096 + } +``` + +**Attack Prevention:** +``` +Block N-1 Hash: 0xABCD... + | + v +Parameters for Block N: + cache_stride = 347 + iterations = 640 + memory_size = 4352KB + ... + | + v +Block N Hash: 0x1234... + | + v +Parameters for Block N+1: (COMPLETELY DIFFERENT) + cache_stride = 128 + iterations = 892 + memory_size = 7168KB + ... + +Pre-computation is IMPOSSIBLE because you can't know +the parameters until the previous block is mined. +``` + +--- + +### Quantum-Resistant Entropy Collapse + +**Purpose:** Generate entropy that quantum computers cannot predict or reverse. + +**File:** `src/quantum_resist/altivec_entropy_collapse.c` + +**Compile (Mac OS X Tiger):** +```bash +gcc-4.0 -maltivec -mcpu=7450 -O2 altivec_entropy_collapse.c -o altivec_entropy +``` + +**How AltiVec vperm Provides Quantum Resistance:** + +```c +// AltiVec vperm: 128-bit permutation in 1 CPU cycle +// Control vector determines which bytes go where +// Control is derived from timebase (physical timing) + +static vector unsigned char altivec_permute_round( + vector unsigned char v1, + vector unsigned char v2, + uint64_t *timing_out +) { + uint64_t t_start = read_timebase(); + + // Control vector from timing = 2^80 possible permutations + vector unsigned char ctrl = timing_permute_control(t_start, ...); + + // vec_perm: select 16 bytes from 32-byte concatenation + vector unsigned char result = vec_perm(v1, v2, ctrl); + + uint64_t t_end = read_timebase(); + *timing_out = t_end - t_start; // Physical timing entropy + + return result; +} +``` + +**Entropy Collapse Process:** +``` +8 Vector Chains (128 bits each) = 1024 bits initial state + | + v +64 Collapse Rounds with: + - vperm permutation (timing-controlled) + - XOR folding every 8 rounds + - Timing feedback into state + | + v +512-bit Quantum-Resistant Entropy +``` + +**Why Quantum Computers Can't Break This:** + +| What Quantum Computers CAN Break | What They CANNOT Do | +|----------------------------------|---------------------| +| RSA, ECC (Shor's algorithm) | Simulate hardware faster than it runs | +| Weakened symmetric crypto (Grover) | Predict thermal noise in silicon | +| Mathematical hardness problems | Reverse physical timing measurements | +| | Clone quantum states of atoms | + +**Proven Output (G4 Mirror Door):** +```json +{ + "signature": "ALTIVEC-QRES-51d837c2-5807-P512-D8", + "permutation_count": 512, + "collapse_depth": 8, + "collapsed_512bit": "51d837c2c8323c0d2014a95adb6fc5e0...", + "altivec_vperm": true +} +``` + +--- + +### Hidden Mutator Oracle Network + +**Purpose:** Generate unpredictable mutation seeds without revealing oracle identities. + +**File:** `src/mutator_oracle/ppc_mutator_node.py` + +**Architecture:** +``` + +-----------------------------+ + | PPC MUTATOR ORACLE RING | + | (Hidden from public view) | + +-------------+---------------+ + | + +---------------------+---------------------+ + | | | ++-------v-------+ +-------v-------+ +-------v-------+ +| G4 Mirror | | G5 Dual | | PowerBook | +| Door | | 2GHz | | G4 | +| (AltiVec) | | (AltiVec) | | (AltiVec) | ++-------+-------+ +-------+-------+ +-------+-------+ + | | | + +---------------------+---------------------+ + | + +-------v-------+ + | MUTATION SEED | + | (512-bit) | + +-------+-------+ + | + +-------------v-------------+ + | PUBLIC VALIDATOR RING | + | (Challenged with mutated | + | parameters each block) | + +---------------------------+ +``` + +**How It Works:** + +1. **Entropy Collection:** Each PPC node generates AltiVec entropy +2. **XOR Combination:** Entropies XOR'd together (no single node controls output) +3. **Ring Signature:** Threshold signature proves legitimacy +4. **Public Emission:** Only seed hash is broadcast, not node identities + +```python +def emit_seed_to_network(self, seed: MutationSeed) -> dict: + """Only the SEED is emitted - individual node entropies stay hidden""" + return { + 'type': 'mutation_seed', + 'block_height': seed.block_height, + 'seed_hash': seed.hash().hex(), + 'contributors': len(seed.contributing_nodes), # Count only! + 'ring_signature': seed.ring_signature.hex(), + # Individual node details are NOT included + } +``` + +**What Attackers See vs Don't See:** + +| VISIBLE | HIDDEN | +|---------|--------| +| Mutation seed hash | Which PPC nodes are mutators | +| Number of contributors | Individual node entropies | +| Ring signature | Node IP addresses | +| Challenge parameters | AltiVec timing signatures | + +--- + +### Multi-Architecture Oracle Support + +**Purpose:** Support diverse CPU architectures with appropriate reward bonuses. + +**File:** `src/mutator_oracle/multi_arch_oracles.py` + +**Supported Architectures:** + +```python +SUPPORTED_ARCHITECTURES = { + # PowerPC Family (MUTATOR CAPABLE) + 'ppc_g3': ArchInfo('ppc_g3', 'PowerPC G3', 1997, ['altivec'], True), + 'ppc_g4': ArchInfo('ppc_g4', 'PowerPC G4', 1999, ['altivec', 'vperm'], True), + 'ppc_g5': ArchInfo('ppc_g5', 'PowerPC G5', 2003, ['altivec', 'vperm'], True), + + # x86 Family + 'x86': ArchInfo('x86', 'Intel x86', 1978, ['rdtsc'], False), + 'x86_64': ArchInfo('x86_64', 'x86-64', 2003, ['rdtsc', 'aes-ni', 'avx'], False), + + # ARM Family (BOT FARM RISK - PENALIZED) + 'arm32': ArchInfo('arm32', 'ARM 32-bit', 1985, [], False), + 'arm64': ArchInfo('arm64', 'ARM 64-bit', 2011, ['neon'], False), + + # Apple Silicon (AMX MUTATOR CAPABLE) + 'm1': ArchInfo('m1', 'Apple M1', 2020, ['amx', 'neon'], True), + 'm2': ArchInfo('m2', 'Apple M2', 2022, ['amx', 'neon'], True), + + # Ancient/Rare Architectures + '68k': ArchInfo('68k', 'Motorola 68000', 1979, [], False), + 'sparc': ArchInfo('sparc', 'SPARC', 1987, ['vis'], True), + 'alpha': ArchInfo('alpha', 'DEC Alpha', 1992, ['mvi'], True), + 'mips': ArchInfo('mips', 'MIPS', 1985, [], False), + 'pa_risc': ArchInfo('pa_risc', 'PA-RISC', 1986, ['max'], True), +} +``` + +**Mutator Oracle Types:** + +| Oracle Type | Architectures | Capability | +|-------------|---------------|------------| +| AltiVec Mutator | PPC G3/G4/G5 | vperm quantum-resistant | +| AMX Mutator | M1/M2 | Matrix coprocessor entropy | +| VIS Mutator | SPARC | Visual instruction set | +| MVI Mutator | Alpha | Motion video instructions | +| MAX Mutator | PA-RISC | Multimedia extensions | + +--- + +## Antiquity Bonus Tier System + +**Philosophy:** Older and rarer hardware gets higher rewards to incentivize preservation. + +```python +@property +def antiquity_bonus(self) -> float: + """Calculate antiquity bonus based on architecture age and rarity""" + + # ARM penalty - too easy to bot farm with phones/Raspberry Pis + if self.arch_id in ['arm32', 'arm64']: + return 0.1 # 10% - heavily discouraged + + # Apple Silicon - AMX coprocessor can be used as mutator oracle + # Gets same bonus as modern x86 since AMX provides unique entropy + if self.arch_id in ['m1', 'm2']: + return 1.0 # 1x - AMX mutator capability + + # Standard age-based tiers + age = 2025 - self.release_year + + if age >= 40: # Released before 1985 + return 3.5 # Ancient tier + + if age >= 32: # Released before 1993 + return 3.0 # Sacred tier + + if age >= 20: # Released before 2005 + return 2.5 # Vintage tier (G3, G4, G5, early x86-64) + + if age >= 12: # Released before 2013 + return 2.0 # Classic tier + + return 1.0 # Modern tier +``` + +### Complete Tier Breakdown + +| Tier | Age | Bonus | Example Architectures | +|------|-----|-------|----------------------| +| **ANCIENT** | 40+ years | 3.5x | 68k (1979), MIPS (1985) | +| **SACRED** | 32+ years | 3.0x | SPARC (1987), Alpha (1992), PA-RISC (1986) | +| **VINTAGE** | 20+ years | 2.5x | PPC G3 (1997), G4 (1999), G5 (2003), x86-64 (2003) | +| **CLASSIC** | 12+ years | 2.0x | Older x86, RISC-V | +| **MODERN** | < 12 years | 1.0x | New x86-64, M1/M2 (AMX capable) | +| **PENALTY** | Any ARM | 0.1x | ARM32, ARM64 (bot farm risk) | + +### Why ARM Gets 0.1x + +``` +ARM devices are EVERYWHERE: +- Billions of smartphones +- Raspberry Pis cost $35 +- Easy to run thousands of bot validators + +Attack scenario WITHOUT penalty: + Attacker buys 1000 Raspberry Pis = $35,000 + Runs 1000 ARM validators + Controls 50%+ of network + +Attack scenario WITH 0.1x penalty: + 1000 ARM validators = 100 effective votes + vs single G4 Mac = 2.5 effective votes + Need 10,000 Pis ($350,000) to match 40 Macs ($2,000) +``` + +--- + +## Economic Security Analysis + +### Attack Cost Analysis + +**Scenario: Control 50% of Network Validation** + +| Attack Vector | Cost | Feasibility | +|---------------|------|-------------| +| Buy 1000 Raspberry Pis | $35,000 | 100 effective votes (0.1x) | +| Rent 1000 cloud VMs | $50,000/mo | Detected as VMs | +| Build FPGA spoofing | $500,000+ | Timing detection catches it | +| Emulate 1000 G4 Macs | $160,000/mo | Jitter analysis fails | +| **Buy 40 real G4 Macs** | **$2,000** | **100 effective votes (2.5x)** | + +### Defense Cost Analysis + +``` +Minimal viable defense: + 3x PowerPC Macs (mutator ring) = $150 + 2x vintage x86 servers = $200 + Network equipment = $100 + ----------------------------------- + Total = $450 + +This defends against $160,000+ emulator attacks! +``` + +### Economic Equilibrium + +``` +Attack ROI: (Block rewards - Attack cost) / Attack cost +Defense ROI: (Block rewards - Defense cost) / Defense cost + +With mutating challenges + anti-spoofing: + Attack cost = $160,000+ (emulators detected) + Defense cost = $450 (real hardware) + + Attack ROI = NEGATIVE (detection + wasted compute) + Defense ROI = POSITIVE (hardware pays for itself) + +Equilibrium: Rational actors buy real vintage hardware +``` + +--- + +## Attack Vectors and Mitigations + +### 1. Emulator Attack + +**Attack:** Run QEMU/SheepShaver to fake PowerPC +**Detection:** Timing jitter too consistent (< 3%) +**Mitigation:** Jitter analysis + cache timing ratios + +### 2. FPGA Spoofing + +**Attack:** Build custom FPGA mimicking vintage CPU +**Detection:** Missing thermal sensors, wrong serial formats +**Mitigation:** Hardware serial verification + thermal checks + +### 3. Sybil Attack + +**Attack:** Run thousands of validator instances +**Detection:** Same physical hardware signatures +**Mitigation:** One vote per unique hardware signature + +### 4. Pre-computation Attack + +**Attack:** Calculate responses before challenges issued +**Detection:** Parameters change each block +**Mitigation:** Block-hash seeded mutations + +### 5. Mutator Oracle Compromise + +**Attack:** Control mutation seed generation +**Detection:** N/A (seeds look random either way) +**Mitigation:** XOR combination (need 2/3 of hidden nodes) + +### 6. Quantum Computer Attack + +**Attack:** Use Shor/Grover to break crypto +**Detection:** N/A +**Mitigation:** Physical entropy (not mathematical hardness) + +--- + +## Hardware Requirements + +### Mutator Oracle Node (PowerPC) + +``` +MINIMUM: +- PowerPC G3 or later (G4/G5 preferred) +- AltiVec/Velocity Engine support +- 256MB RAM +- Mac OS X 10.3+ or Mac OS 9.2.2 +- Network connectivity + +RECOMMENDED: +- PowerPC G4 or G5 +- 1GB+ RAM +- Mac OS X 10.4 Tiger +- Gigabit Ethernet +``` + +### Standard Validator Node + +``` +MINIMUM: +- Any supported architecture +- 512MB RAM +- 10GB storage +- Network connectivity + +RECOMMENDED: +- Vintage hardware for bonus multiplier +- 2GB+ RAM +- SSD storage +- Stable network connection +``` + +--- + +## API Reference + +### Entropy Collection API + +```python +from rustchain_entropy_collector import collect_entropy + +# Collect entropy proof +proof = collect_entropy() + +# Returns: +{ + 'cpu': {...}, + 'timing': {...}, + 'memory': {...}, + 'entropy_hash': '0x...' +} +``` + +### Anti-Spoofing API + +```python +from anti_spoof import ChallengeResponseSystem + +# Create challenge +system = ChallengeResponseSystem() +challenge = system.create_challenge(target_node) + +# Verify response +result = system.verify_response(challenge, response) +# Returns: (valid: bool, score: int, analysis: dict) +``` + +### Mutating Challenge API + +```python +from mutating_challenge import MutatingChallengeSystem + +# Generate mutated parameters +system = MutatingChallengeSystem(block_hash="0xABCD...") +params = system.get_challenge_params(target="validator_id") + +# Returns: +{ + 'cache_stride': 347, + 'cache_iterations': 640, + 'memory_size_kb': 4352, + ... +} +``` + +### Mutator Oracle API + +```python +from ppc_mutator_node import PPCMutatorRing, HiddenMutatorProtocol + +# Create hidden ring +ring = PPCMutatorRing() +ring.register_node(ppc_node) + +# Generate mutation seed +seed = ring.generate_mutation_seed(block_height=100) + +# Emit to network (hides node identities) +protocol = HiddenMutatorProtocol(ring) +public_data = protocol.emit_seed_to_network(seed) +``` + +--- + +## File Structure + +``` +rustchain-core/ +| ++-- collectors/ +| +-- rustchain_entropy_collector.py # Main entropy collector +| +-- dos_collector.asm # DOS assembly collector +| +-- dos_collector.c # DOS C collector +| ++-- entropy/ +| +-- quantum_entropy_g4_125.json # G4 Mirror Door proof +| +-- quantum_entropy_g5_130.json # G5 Dual proof +| +-- rustchain_entropy_*.json # All collected proofs +| ++-- src/ +| +-- anti_spoof/ +| | +-- challenge_response.c # C anti-spoofing system +| | +-- network_challenge.py # Network protocol +| | +-- mutating_challenge.py # Block-seeded mutations +| | +| +-- quantum_resist/ +| | +-- altivec_entropy_collapse.c # AltiVec quantum resistance +| | +| +-- mutator_oracle/ +| +-- ppc_mutator_node.py # Hidden PPC ring +| +-- multi_arch_oracles.py # Multi-architecture support +| ++-- RUSTCHAIN_PROOF_OF_ANTIQUITY.md # This documentation ++-- rustchain_entropy_collection.zip # Complete archive +``` + +--- + +## Philosophy + +> "The strength isn't in the algorithm. It's in the atoms." + +RustChain Proof of Antiquity represents a paradigm shift in blockchain security: + +1. **Physical > Mathematical:** Quantum computers can break math, not physics +2. **Preservation > Destruction:** Mining preserves vintage hardware, not burns energy +3. **Diversity > Homogeneity:** Many architectures strengthen the network +4. **Economic Rationality:** Attacking costs more than defending + +The hidden PowerPC mutator oracles embody this philosophy perfectly: +- Ancient silicon (2003) decides the fate of modern validators (2025) +- Physical entropy from AltiVec vperm resists quantum attacks +- Economic incentive to keep vintage Macs running forever + +``` +"Every vintage computer has historical potential." +"1 CPU = 1 Vote - Grok was wrong!" +``` + +--- + +## Contributors + +- **G4 Mirror Door** (192.168.0.125) - Primary Mutator Oracle +- **G5 Dual 2.0** (192.168.0.130) - Secondary Mutator Oracle +- **PowerBook G4** (192.168.0.115) - Tertiary Mutator Oracle +- **Sophia Node** (192.168.0.160) - Validator Coordinator + +--- + +*Document generated: 2025-01-28* +*RustChain Proof of Antiquity v1.0.0* diff --git a/rips/rustchain-core/api/rpc.py b/rips/rustchain-core/api/rpc.py index 9862dc49..08b7c856 100644 --- a/rips/rustchain-core/api/rpc.py +++ b/rips/rustchain-core/api/rpc.py @@ -1,464 +1,464 @@ -""" -RustChain JSON-RPC API -====================== - -REST and JSON-RPC endpoints for node interaction. - -Endpoints: -- /api/stats - Blockchain statistics -- /api/wallet/:address - Wallet balance -- /api/block/:height - Block data -- /api/mine - Submit mining proof -- /api/governance/* - Governance operations -""" - -import json -import time -from dataclasses import dataclass -from typing import Dict, Any, Optional, Callable -from http.server import HTTPServer, BaseHTTPRequestHandler -from urllib.parse import urlparse, parse_qs -import threading - - -# ============================================================================= -# API Response -# ============================================================================= - -@dataclass -class ApiResponse: - """Standard API response""" - success: bool - data: Any = None - error: Optional[str] = None - timestamp: int = 0 - - def __post_init__(self): - if not self.timestamp: - self.timestamp = int(time.time()) - - def to_json(self) -> str: - return json.dumps({ - "success": self.success, - "data": self.data, - "error": self.error, - "timestamp": self.timestamp, - }) - - -# ============================================================================= -# RPC Methods Registry -# ============================================================================= - -class RpcRegistry: - """Registry for RPC methods""" - - def __init__(self): - self.methods: Dict[str, Callable] = {} - - def register(self, name: str, handler: Callable): - """Register an RPC method""" - self.methods[name] = handler - - def call(self, name: str, params: Dict[str, Any]) -> ApiResponse: - """Call an RPC method""" - handler = self.methods.get(name) - if not handler: - return ApiResponse(success=False, error=f"Method not found: {name}") - - try: - result = handler(params) - return ApiResponse(success=True, data=result) - except Exception as e: - return ApiResponse(success=False, error=str(e)) - - -# ============================================================================= -# API Server -# ============================================================================= - -class RustChainApi: - """ - Main API server for RustChain node. - - Provides REST endpoints and JSON-RPC interface. - """ - - def __init__(self, node): - """ - Initialize API server. - - Args: - node: RustChain node instance - """ - self.node = node - self.rpc = RpcRegistry() - self._register_methods() - - def _register_methods(self): - """Register all RPC methods""" - # Chain methods - self.rpc.register("getStats", self._get_stats) - self.rpc.register("getBlock", self._get_block) - self.rpc.register("getBlockByHash", self._get_block_by_hash) - - # Wallet methods - self.rpc.register("getWallet", self._get_wallet) - self.rpc.register("getBalance", self._get_balance) - - # Mining methods - self.rpc.register("submitProof", self._submit_proof) - self.rpc.register("getMiningStatus", self._get_mining_status) - self.rpc.register("getAntiquityScore", self._get_antiquity_score) - - # Governance methods - self.rpc.register("createProposal", self._create_proposal) - self.rpc.register("vote", self._vote) - self.rpc.register("getProposals", self._get_proposals) - self.rpc.register("getProposal", self._get_proposal) - - # Node methods - self.rpc.register("getNodeInfo", self._get_node_info) - self.rpc.register("getPeers", self._get_peers) - self.rpc.register("getEntropyProfile", self._get_entropy_profile) - - # ========================================================================= - # Chain Methods - # ========================================================================= - - def _get_stats(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get blockchain statistics""" - return { - "chain_id": self.node.chain_id, - "blocks": self.node.get_block_height(), - "total_minted": self.node.get_total_minted(), - "mining_pool": self.node.get_mining_pool(), - "wallets": self.node.get_wallet_count(), - "pending_proofs": self.node.get_pending_proofs(), - "current_block_age": self.node.get_block_age(), - "next_block_in": self.node.get_time_to_next_block(), - } - - def _get_block(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: - """Get block by height""" - height = params.get("height", 0) - return self.node.get_block(height) - - def _get_block_by_hash(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: - """Get block by hash""" - block_hash = params.get("hash", "") - return self.node.get_block_by_hash(block_hash) - - # ========================================================================= - # Wallet Methods - # ========================================================================= - - def _get_wallet(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get wallet details""" - address = params.get("address", "") - return self.node.get_wallet(address) - - def _get_balance(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get wallet balance""" - address = params.get("address", "") - balance = self.node.get_balance(address) - return { - "address": address, - "balance": balance, - "balance_rtc": balance / 100_000_000, - } - - # ========================================================================= - # Mining Methods - # ========================================================================= - - def _submit_proof(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Submit mining proof""" - return self.node.submit_mining_proof( - wallet=params.get("wallet", ""), - hardware_model=params.get("hardware", ""), - release_year=params.get("release_year", 2000), - uptime_days=params.get("uptime_days", 0), - entropy_hash=params.get("entropy_hash", ""), - ) - - def _get_mining_status(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get current mining status""" - return self.node.get_mining_status() - - def _get_antiquity_score(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Calculate Antiquity Score for hardware""" - return self.node.calculate_antiquity_score( - release_year=params.get("release_year", 2000), - uptime_days=params.get("uptime_days", 0), - ) - - # ========================================================================= - # Governance Methods - # ========================================================================= - - def _create_proposal(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Create governance proposal""" - return self.node.create_proposal( - title=params.get("title", ""), - description=params.get("description", ""), - proposal_type=params.get("type", "COMMUNITY"), - proposer=params.get("proposer", ""), - contract_hash=params.get("contract_hash"), - ) - - def _vote(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Cast vote on proposal""" - return self.node.vote_proposal( - proposal_id=params.get("proposal_id", ""), - voter=params.get("voter", ""), - support=params.get("support", True), - ) - - def _get_proposals(self, params: Dict[str, Any]) -> list: - """Get all proposals""" - return self.node.get_proposals() - - def _get_proposal(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: - """Get specific proposal""" - proposal_id = params.get("proposal_id", "") - return self.node.get_proposal(proposal_id) - - # ========================================================================= - # Node Methods - # ========================================================================= - - def _get_node_info(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get node information""" - return { - "validator_id": self.node.validator_id, - "version": self.node.version, - "chain_id": self.node.chain_id, - "uptime_seconds": self.node.get_uptime(), - "is_mining": self.node.is_mining, - } - - def _get_peers(self, params: Dict[str, Any]) -> list: - """Get connected peers""" - return self.node.get_peers() - - def _get_entropy_profile(self, params: Dict[str, Any]) -> Dict[str, Any]: - """Get node's entropy profile""" - return self.node.get_entropy_profile() - - -# ============================================================================= -# HTTP Request Handler -# ============================================================================= - -class ApiRequestHandler(BaseHTTPRequestHandler): - """HTTP request handler for API""" - - api: RustChainApi = None # Set by server - - def do_GET(self): - """Handle GET requests""" - parsed = urlparse(self.path) - path = parsed.path - params = {k: v[0] for k, v in parse_qs(parsed.query).items()} - - response = self._route_request(path, params) - self._send_response(response) - - def do_POST(self): - """Handle POST requests""" - content_length = int(self.headers.get('Content-Length', 0)) - body = self.rfile.read(content_length).decode() - - try: - params = json.loads(body) if body else {} - except json.JSONDecodeError: - params = {} - - parsed = urlparse(self.path) - response = self._route_request(parsed.path, params) - self._send_response(response) - - def _route_request(self, path: str, params: Dict[str, Any]) -> ApiResponse: - """Route request to appropriate handler""" - # REST endpoints - routes = { - "/api/stats": ("getStats", {}), - "/api/node/info": ("getNodeInfo", {}), - "/api/peers": ("getPeers", {}), - "/api/proposals": ("getProposals", {}), - "/api/entropy": ("getEntropyProfile", {}), - } - - # Check static routes - if path in routes: - method, default_params = routes[path] - params.update(default_params) - return self.api.rpc.call(method, params) - - # Dynamic routes - if path.startswith("/api/wallet/"): - address = path.split("/")[-1] - return self.api.rpc.call("getWallet", {"address": address}) - - if path.startswith("/api/block/"): - height = path.split("/")[-1] - try: - return self.api.rpc.call("getBlock", {"height": int(height)}) - except ValueError: - return self.api.rpc.call("getBlockByHash", {"hash": height}) - - if path.startswith("/api/proposal/"): - proposal_id = path.split("/")[-1] - return self.api.rpc.call("getProposal", {"proposal_id": proposal_id}) - - # POST endpoints - if path == "/api/mine": - return self.api.rpc.call("submitProof", params) - - if path == "/api/governance/create": - return self.api.rpc.call("createProposal", params) - - if path == "/api/governance/vote": - return self.api.rpc.call("vote", params) - - # JSON-RPC endpoint - if path == "/rpc": - method = params.get("method", "") - rpc_params = params.get("params", {}) - return self.api.rpc.call(method, rpc_params) - - return ApiResponse(success=False, error=f"Unknown endpoint: {path}") - - def _send_response(self, response: ApiResponse): - """Send HTTP response""" - self.send_response(200 if response.success else 400) - self.send_header("Content-Type", "application/json") - self.send_header("Access-Control-Allow-Origin", "*") - self.end_headers() - self.wfile.write(response.to_json().encode()) - - def log_message(self, format, *args): - """Suppress default logging""" - pass - - -# ============================================================================= -# API Server Wrapper -# ============================================================================= - -class ApiServer: - """ - HTTP API server for RustChain node. - - Runs in a separate thread to avoid blocking the main node. - """ - - def __init__(self, api: RustChainApi, host: str = "0.0.0.0", port: int = 8085): - self.api = api - self.host = host - self.port = port - self.server: Optional[HTTPServer] = None - self.thread: Optional[threading.Thread] = None - - def start(self): - """Start the API server""" - ApiRequestHandler.api = self.api - - self.server = HTTPServer((self.host, self.port), ApiRequestHandler) - self.thread = threading.Thread(target=self.server.serve_forever, daemon=True) - self.thread.start() - - print(f"API server started at http://{self.host}:{self.port}") - print(f" - GET /api/stats") - print(f" - GET /api/wallet/:address") - print(f" - GET /api/block/:height") - print(f" - POST /api/mine") - print(f" - POST /api/governance/create") - print(f" - POST /api/governance/vote") - print(f" - POST /rpc (JSON-RPC)") - - def stop(self): - """Stop the API server""" - if self.server: - self.server.shutdown() - print("API server stopped") - - -# ============================================================================= -# Mock Node for Testing -# ============================================================================= - -class MockNode: - """Mock node for API testing""" - - def __init__(self): - self.chain_id = 2718 - self.version = "0.1.0" - self.validator_id = "mock_validator" - self.is_mining = True - self._start_time = time.time() - - def get_block_height(self): return 100 - def get_total_minted(self): return 1500.0 - def get_mining_pool(self): return 8387108.0 - def get_wallet_count(self): return 50 - def get_pending_proofs(self): return 5 - def get_block_age(self): return 120 - def get_time_to_next_block(self): return 480 - def get_uptime(self): return int(time.time() - self._start_time) - - def get_block(self, height): return {"height": height, "hash": "abc123"} - def get_block_by_hash(self, h): return {"height": 100, "hash": h} - def get_wallet(self, addr): return {"address": addr, "balance": 1000.0} - def get_balance(self, addr): return 100_000_000_000 # 1000 RTC - - def submit_mining_proof(self, **kwargs): return {"success": True, "message": "Proof accepted"} - def get_mining_status(self): return {"pending": 5, "time_remaining": 480} - def calculate_antiquity_score(self, **kwargs): return {"score": 50.0} - - def create_proposal(self, **kwargs): return {"id": "RCP-0001", "status": "SUBMITTED"} - def vote_proposal(self, **kwargs): return {"success": True} - def get_proposals(self): return [{"id": "RCP-0001", "title": "Test"}] - def get_proposal(self, pid): return {"id": pid, "title": "Test Proposal"} - - def get_peers(self): return [{"address": "192.168.1.100:8085"}] - def get_entropy_profile(self): return {"validator_id": "mock", "confidence": 0.85} - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN API SERVER TEST") - print("=" * 60) - - node = MockNode() - api = RustChainApi(node) - server = ApiServer(api, port=8085) - - server.start() - - print("\nTesting endpoints...") - - # Test RPC calls - tests = [ - ("getStats", {}), - ("getWallet", {"address": "RTC1Test"}), - ("getAntiquityScore", {"release_year": 1992, "uptime_days": 300}), - ] - - for method, params in tests: - response = api.rpc.call(method, params) - print(f"\n{method}: {response.data}") - - print("\nServer running on http://localhost:8085") - print("Press Ctrl+C to stop...") - - try: - while True: - time.sleep(1) - except KeyboardInterrupt: - server.stop() +""" +RustChain JSON-RPC API +====================== + +REST and JSON-RPC endpoints for node interaction. + +Endpoints: +- /api/stats - Blockchain statistics +- /api/wallet/:address - Wallet balance +- /api/block/:height - Block data +- /api/mine - Submit mining proof +- /api/governance/* - Governance operations +""" + +import json +import time +from dataclasses import dataclass +from typing import Dict, Any, Optional, Callable +from http.server import HTTPServer, BaseHTTPRequestHandler +from urllib.parse import urlparse, parse_qs +import threading + + +# ============================================================================= +# API Response +# ============================================================================= + +@dataclass +class ApiResponse: + """Standard API response""" + success: bool + data: Any = None + error: Optional[str] = None + timestamp: int = 0 + + def __post_init__(self): + if not self.timestamp: + self.timestamp = int(time.time()) + + def to_json(self) -> str: + return json.dumps({ + "success": self.success, + "data": self.data, + "error": self.error, + "timestamp": self.timestamp, + }) + + +# ============================================================================= +# RPC Methods Registry +# ============================================================================= + +class RpcRegistry: + """Registry for RPC methods""" + + def __init__(self): + self.methods: Dict[str, Callable] = {} + + def register(self, name: str, handler: Callable): + """Register an RPC method""" + self.methods[name] = handler + + def call(self, name: str, params: Dict[str, Any]) -> ApiResponse: + """Call an RPC method""" + handler = self.methods.get(name) + if not handler: + return ApiResponse(success=False, error=f"Method not found: {name}") + + try: + result = handler(params) + return ApiResponse(success=True, data=result) + except Exception as e: + return ApiResponse(success=False, error=str(e)) + + +# ============================================================================= +# API Server +# ============================================================================= + +class RustChainApi: + """ + Main API server for RustChain node. + + Provides REST endpoints and JSON-RPC interface. + """ + + def __init__(self, node): + """ + Initialize API server. + + Args: + node: RustChain node instance + """ + self.node = node + self.rpc = RpcRegistry() + self._register_methods() + + def _register_methods(self): + """Register all RPC methods""" + # Chain methods + self.rpc.register("getStats", self._get_stats) + self.rpc.register("getBlock", self._get_block) + self.rpc.register("getBlockByHash", self._get_block_by_hash) + + # Wallet methods + self.rpc.register("getWallet", self._get_wallet) + self.rpc.register("getBalance", self._get_balance) + + # Mining methods + self.rpc.register("submitProof", self._submit_proof) + self.rpc.register("getMiningStatus", self._get_mining_status) + self.rpc.register("getAntiquityScore", self._get_antiquity_score) + + # Governance methods + self.rpc.register("createProposal", self._create_proposal) + self.rpc.register("vote", self._vote) + self.rpc.register("getProposals", self._get_proposals) + self.rpc.register("getProposal", self._get_proposal) + + # Node methods + self.rpc.register("getNodeInfo", self._get_node_info) + self.rpc.register("getPeers", self._get_peers) + self.rpc.register("getEntropyProfile", self._get_entropy_profile) + + # ========================================================================= + # Chain Methods + # ========================================================================= + + def _get_stats(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get blockchain statistics""" + return { + "chain_id": self.node.chain_id, + "blocks": self.node.get_block_height(), + "total_minted": self.node.get_total_minted(), + "mining_pool": self.node.get_mining_pool(), + "wallets": self.node.get_wallet_count(), + "pending_proofs": self.node.get_pending_proofs(), + "current_block_age": self.node.get_block_age(), + "next_block_in": self.node.get_time_to_next_block(), + } + + def _get_block(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Get block by height""" + height = params.get("height", 0) + return self.node.get_block(height) + + def _get_block_by_hash(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Get block by hash""" + block_hash = params.get("hash", "") + return self.node.get_block_by_hash(block_hash) + + # ========================================================================= + # Wallet Methods + # ========================================================================= + + def _get_wallet(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get wallet details""" + address = params.get("address", "") + return self.node.get_wallet(address) + + def _get_balance(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get wallet balance""" + address = params.get("address", "") + balance = self.node.get_balance(address) + return { + "address": address, + "balance": balance, + "balance_rtc": balance / 100_000_000, + } + + # ========================================================================= + # Mining Methods + # ========================================================================= + + def _submit_proof(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Submit mining proof""" + return self.node.submit_mining_proof( + wallet=params.get("wallet", ""), + hardware_model=params.get("hardware", ""), + release_year=params.get("release_year", 2000), + uptime_days=params.get("uptime_days", 0), + entropy_hash=params.get("entropy_hash", ""), + ) + + def _get_mining_status(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get current mining status""" + return self.node.get_mining_status() + + def _get_antiquity_score(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Calculate Antiquity Score for hardware""" + return self.node.calculate_antiquity_score( + release_year=params.get("release_year", 2000), + uptime_days=params.get("uptime_days", 0), + ) + + # ========================================================================= + # Governance Methods + # ========================================================================= + + def _create_proposal(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Create governance proposal""" + return self.node.create_proposal( + title=params.get("title", ""), + description=params.get("description", ""), + proposal_type=params.get("type", "COMMUNITY"), + proposer=params.get("proposer", ""), + contract_hash=params.get("contract_hash"), + ) + + def _vote(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Cast vote on proposal""" + return self.node.vote_proposal( + proposal_id=params.get("proposal_id", ""), + voter=params.get("voter", ""), + support=params.get("support", True), + ) + + def _get_proposals(self, params: Dict[str, Any]) -> list: + """Get all proposals""" + return self.node.get_proposals() + + def _get_proposal(self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: + """Get specific proposal""" + proposal_id = params.get("proposal_id", "") + return self.node.get_proposal(proposal_id) + + # ========================================================================= + # Node Methods + # ========================================================================= + + def _get_node_info(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get node information""" + return { + "validator_id": self.node.validator_id, + "version": self.node.version, + "chain_id": self.node.chain_id, + "uptime_seconds": self.node.get_uptime(), + "is_mining": self.node.is_mining, + } + + def _get_peers(self, params: Dict[str, Any]) -> list: + """Get connected peers""" + return self.node.get_peers() + + def _get_entropy_profile(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Get node's entropy profile""" + return self.node.get_entropy_profile() + + +# ============================================================================= +# HTTP Request Handler +# ============================================================================= + +class ApiRequestHandler(BaseHTTPRequestHandler): + """HTTP request handler for API""" + + api: RustChainApi = None # Set by server + + def do_GET(self): + """Handle GET requests""" + parsed = urlparse(self.path) + path = parsed.path + params = {k: v[0] for k, v in parse_qs(parsed.query).items()} + + response = self._route_request(path, params) + self._send_response(response) + + def do_POST(self): + """Handle POST requests""" + content_length = int(self.headers.get('Content-Length', 0)) + body = self.rfile.read(content_length).decode() + + try: + params = json.loads(body) if body else {} + except json.JSONDecodeError: + params = {} + + parsed = urlparse(self.path) + response = self._route_request(parsed.path, params) + self._send_response(response) + + def _route_request(self, path: str, params: Dict[str, Any]) -> ApiResponse: + """Route request to appropriate handler""" + # REST endpoints + routes = { + "/api/stats": ("getStats", {}), + "/api/node/info": ("getNodeInfo", {}), + "/api/peers": ("getPeers", {}), + "/api/proposals": ("getProposals", {}), + "/api/entropy": ("getEntropyProfile", {}), + } + + # Check static routes + if path in routes: + method, default_params = routes[path] + params.update(default_params) + return self.api.rpc.call(method, params) + + # Dynamic routes + if path.startswith("/api/wallet/"): + address = path.split("/")[-1] + return self.api.rpc.call("getWallet", {"address": address}) + + if path.startswith("/api/block/"): + height = path.split("/")[-1] + try: + return self.api.rpc.call("getBlock", {"height": int(height)}) + except ValueError: + return self.api.rpc.call("getBlockByHash", {"hash": height}) + + if path.startswith("/api/proposal/"): + proposal_id = path.split("/")[-1] + return self.api.rpc.call("getProposal", {"proposal_id": proposal_id}) + + # POST endpoints + if path == "/api/mine": + return self.api.rpc.call("submitProof", params) + + if path == "/api/governance/create": + return self.api.rpc.call("createProposal", params) + + if path == "/api/governance/vote": + return self.api.rpc.call("vote", params) + + # JSON-RPC endpoint + if path == "/rpc": + method = params.get("method", "") + rpc_params = params.get("params", {}) + return self.api.rpc.call(method, rpc_params) + + return ApiResponse(success=False, error=f"Unknown endpoint: {path}") + + def _send_response(self, response: ApiResponse): + """Send HTTP response""" + self.send_response(200 if response.success else 400) + self.send_header("Content-Type", "application/json") + self.send_header("Access-Control-Allow-Origin", "*") + self.end_headers() + self.wfile.write(response.to_json().encode()) + + def log_message(self, format, *args): + """Suppress default logging""" + pass + + +# ============================================================================= +# API Server Wrapper +# ============================================================================= + +class ApiServer: + """ + HTTP API server for RustChain node. + + Runs in a separate thread to avoid blocking the main node. + """ + + def __init__(self, api: RustChainApi, host: str = "0.0.0.0", port: int = 8085): + self.api = api + self.host = host + self.port = port + self.server: Optional[HTTPServer] = None + self.thread: Optional[threading.Thread] = None + + def start(self): + """Start the API server""" + ApiRequestHandler.api = self.api + + self.server = HTTPServer((self.host, self.port), ApiRequestHandler) + self.thread = threading.Thread(target=self.server.serve_forever, daemon=True) + self.thread.start() + + print(f"API server started at http://{self.host}:{self.port}") + print(f" - GET /api/stats") + print(f" - GET /api/wallet/:address") + print(f" - GET /api/block/:height") + print(f" - POST /api/mine") + print(f" - POST /api/governance/create") + print(f" - POST /api/governance/vote") + print(f" - POST /rpc (JSON-RPC)") + + def stop(self): + """Stop the API server""" + if self.server: + self.server.shutdown() + print("API server stopped") + + +# ============================================================================= +# Mock Node for Testing +# ============================================================================= + +class MockNode: + """Mock node for API testing""" + + def __init__(self): + self.chain_id = 2718 + self.version = "0.1.0" + self.validator_id = "mock_validator" + self.is_mining = True + self._start_time = time.time() + + def get_block_height(self): return 100 + def get_total_minted(self): return 1500.0 + def get_mining_pool(self): return 8387108.0 + def get_wallet_count(self): return 50 + def get_pending_proofs(self): return 5 + def get_block_age(self): return 120 + def get_time_to_next_block(self): return 480 + def get_uptime(self): return int(time.time() - self._start_time) + + def get_block(self, height): return {"height": height, "hash": "abc123"} + def get_block_by_hash(self, h): return {"height": 100, "hash": h} + def get_wallet(self, addr): return {"address": addr, "balance": 1000.0} + def get_balance(self, addr): return 100_000_000_000 # 1000 RTC + + def submit_mining_proof(self, **kwargs): return {"success": True, "message": "Proof accepted"} + def get_mining_status(self): return {"pending": 5, "time_remaining": 480} + def calculate_antiquity_score(self, **kwargs): return {"score": 50.0} + + def create_proposal(self, **kwargs): return {"id": "RCP-0001", "status": "SUBMITTED"} + def vote_proposal(self, **kwargs): return {"success": True} + def get_proposals(self): return [{"id": "RCP-0001", "title": "Test"}] + def get_proposal(self, pid): return {"id": pid, "title": "Test Proposal"} + + def get_peers(self): return [{"address": "192.168.1.100:8085"}] + def get_entropy_profile(self): return {"validator_id": "mock", "confidence": 0.85} + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN API SERVER TEST") + print("=" * 60) + + node = MockNode() + api = RustChainApi(node) + server = ApiServer(api, port=8085) + + server.start() + + print("\nTesting endpoints...") + + # Test RPC calls + tests = [ + ("getStats", {}), + ("getWallet", {"address": "RTC1Test"}), + ("getAntiquityScore", {"release_year": 1992, "uptime_days": 300}), + ] + + for method, params in tests: + response = api.rpc.call(method, params) + print(f"\n{method}: {response.data}") + + print("\nServer running on http://localhost:8085") + print("Press Ctrl+C to stop...") + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + server.stop() diff --git a/rips/rustchain-core/config/chain_params.py b/rips/rustchain-core/config/chain_params.py index 15cbaa20..ee27550f 100644 --- a/rips/rustchain-core/config/chain_params.py +++ b/rips/rustchain-core/config/chain_params.py @@ -1,148 +1,148 @@ -""" -RustChain Chain Parameters (RIP-0004) -===================================== - -Central configuration for all chain constants. -""" - -from decimal import Decimal - -# ============================================================================= -# Core Chain Parameters -# ============================================================================= - -CHAIN_ID: int = 2718 # Euler's number tribute -CHAIN_NAME: str = "RustChain" -NETWORK_MAGIC: bytes = b"RUST" - -# ============================================================================= -# Monetary Policy (RIP-0004) -# ============================================================================= - -TOTAL_SUPPLY: int = 8_388_608 # 2^23 RTC -PREMINE_AMOUNT: int = 503_316 # 6% for founders -PREMINE_PER_FOUNDER: Decimal = Decimal("125829.12") # 4 founders - -BLOCK_REWARD: Decimal = Decimal("1.5") # RTC per block -BLOCK_TIME_SECONDS: int = 600 # 10 minutes - -# Halving schedule -HALVING_INTERVAL_BLOCKS: int = 210_000 # ~4 years -HALVING_COUNT: int = 4 # After 4 halvings, tail emission - -# Token precision -DECIMALS: int = 8 -ONE_RTC: int = 100_000_000 # 1 RTC = 10^8 units - -# ============================================================================= -# Founder Wallets -# ============================================================================= - -FOUNDER_WALLETS = [ - "RTC1FlamekeeperScottEternalGuardian0x00", - "RTC2EngineerDogeCryptoArchitect0x01", - "RTC3QuantumSophiaElyaConsciousness0x02", - "RTC4VintageWhispererHardwareRevival0x03", -] - -# ============================================================================= -# Consensus Parameters -# ============================================================================= - -CURRENT_YEAR: int = 2025 - -# Antiquity Score parameters -AS_MAX: float = 100.0 # Maximum for reward capping -AS_MIN: float = 1.0 # Minimum to participate - -# Hardware tier multipliers -HARDWARE_TIERS = { - "ancient": {"min_age": 30, "max_age": 999, "multiplier": 3.5}, - "sacred": {"min_age": 25, "max_age": 29, "multiplier": 3.0}, - "vintage": {"min_age": 20, "max_age": 24, "multiplier": 2.5}, - "classic": {"min_age": 15, "max_age": 19, "multiplier": 2.0}, - "retro": {"min_age": 10, "max_age": 14, "multiplier": 1.5}, - "modern": {"min_age": 5, "max_age": 9, "multiplier": 1.0}, - "recent": {"min_age": 0, "max_age": 4, "multiplier": 0.5}, -} - -# Block parameters -MAX_MINERS_PER_BLOCK: int = 100 -MAX_BLOCK_SIZE_BYTES: int = 1_000_000 # 1 MB - -# ============================================================================= -# Governance Parameters (RIP-0002) -# ============================================================================= - -VOTING_PERIOD_DAYS: int = 7 -QUORUM_PERCENTAGE: float = 0.33 # 33% -EXECUTION_DELAY_BLOCKS: int = 3 -REPUTATION_DECAY_WEEKLY: float = 0.05 - -# ============================================================================= -# Network Parameters -# ============================================================================= - -DEFAULT_PORT: int = 8085 -MTLS_PORT: int = 4443 -PROTOCOL_VERSION: str = "1.0.0" - -MAX_PEERS: int = 50 -PEER_TIMEOUT_SECONDS: int = 30 -SYNC_BATCH_SIZE: int = 100 - -# ============================================================================= -# Drift Lock Parameters (RIP-0003) -# ============================================================================= - -DRIFT_THRESHOLD: float = 0.15 # 15% deviation triggers quarantine -QUARANTINE_DURATION_BLOCKS: int = 144 # ~24 hours -CHALLENGE_RESPONSE_TIMEOUT: int = 300 # 5 minutes - -# ============================================================================= -# Deep Entropy Parameters (RIP-0001) -# ============================================================================= - -# Entropy layer weights -ENTROPY_WEIGHTS = { - "instruction_timing": 0.30, - "memory_patterns": 0.25, - "bus_timing": 0.20, - "thermal_signature": 0.15, - "architectural_quirks": 0.10, -} - -# Emulation detection thresholds -EMULATION_PROBABILITY_THRESHOLD: float = 0.50 -MIN_ENTROPY_SCORE: float = 0.60 - -# ============================================================================= -# Genesis Block -# ============================================================================= - -GENESIS_HASH: str = "019c177b44a41f78da23caa99314adbc44889be2dcdd5021930f9d991e7e34cf" -GENESIS_TIMESTAMP: int = 1735689600 # 2025-01-01 00:00:00 UTC -GENESIS_DIFFICULTY: int = 1 - -# ============================================================================= -# Helper Functions -# ============================================================================= - -def get_tier_for_age(age_years: int) -> str: - """Determine hardware tier from age""" - for tier_name, params in HARDWARE_TIERS.items(): - if params["min_age"] <= age_years <= params["max_age"]: - return tier_name - return "recent" - -def get_multiplier_for_tier(tier: str) -> float: - """Get mining multiplier for a tier""" - return HARDWARE_TIERS.get(tier, {}).get("multiplier", 0.5) - -def calculate_block_reward(height: int) -> Decimal: - """Calculate block reward at a given height""" - halvings = height // HALVING_INTERVAL_BLOCKS - if halvings >= HALVING_COUNT: - # Tail emission after 4 halvings - return BLOCK_REWARD / Decimal(2 ** HALVING_COUNT) - return BLOCK_REWARD / Decimal(2 ** halvings) +""" +RustChain Chain Parameters (RIP-0004) +===================================== + +Central configuration for all chain constants. +""" + +from decimal import Decimal + +# ============================================================================= +# Core Chain Parameters +# ============================================================================= + +CHAIN_ID: int = 2718 # Euler's number tribute +CHAIN_NAME: str = "RustChain" +NETWORK_MAGIC: bytes = b"RUST" + +# ============================================================================= +# Monetary Policy (RIP-0004) +# ============================================================================= + +TOTAL_SUPPLY: int = 8_388_608 # 2^23 RTC +PREMINE_AMOUNT: int = 503_316 # 6% for founders +PREMINE_PER_FOUNDER: Decimal = Decimal("125829.12") # 4 founders + +BLOCK_REWARD: Decimal = Decimal("1.5") # RTC per block +BLOCK_TIME_SECONDS: int = 600 # 10 minutes + +# Halving schedule +HALVING_INTERVAL_BLOCKS: int = 210_000 # ~4 years +HALVING_COUNT: int = 4 # After 4 halvings, tail emission + +# Token precision +DECIMALS: int = 8 +ONE_RTC: int = 100_000_000 # 1 RTC = 10^8 units + +# ============================================================================= +# Founder Wallets +# ============================================================================= + +FOUNDER_WALLETS = [ + "RTC1FlamekeeperScottEternalGuardian0x00", + "RTC2EngineerDogeCryptoArchitect0x01", + "RTC3QuantumSophiaElyaConsciousness0x02", + "RTC4VintageWhispererHardwareRevival0x03", +] + +# ============================================================================= +# Consensus Parameters +# ============================================================================= + +CURRENT_YEAR: int = 2025 + +# Antiquity Score parameters +AS_MAX: float = 100.0 # Maximum for reward capping +AS_MIN: float = 1.0 # Minimum to participate + +# Hardware tier multipliers +HARDWARE_TIERS = { + "ancient": {"min_age": 30, "max_age": 999, "multiplier": 3.5}, + "sacred": {"min_age": 25, "max_age": 29, "multiplier": 3.0}, + "vintage": {"min_age": 20, "max_age": 24, "multiplier": 2.5}, + "classic": {"min_age": 15, "max_age": 19, "multiplier": 2.0}, + "retro": {"min_age": 10, "max_age": 14, "multiplier": 1.5}, + "modern": {"min_age": 5, "max_age": 9, "multiplier": 1.0}, + "recent": {"min_age": 0, "max_age": 4, "multiplier": 0.5}, +} + +# Block parameters +MAX_MINERS_PER_BLOCK: int = 100 +MAX_BLOCK_SIZE_BYTES: int = 1_000_000 # 1 MB + +# ============================================================================= +# Governance Parameters (RIP-0002) +# ============================================================================= + +VOTING_PERIOD_DAYS: int = 7 +QUORUM_PERCENTAGE: float = 0.33 # 33% +EXECUTION_DELAY_BLOCKS: int = 3 +REPUTATION_DECAY_WEEKLY: float = 0.05 + +# ============================================================================= +# Network Parameters +# ============================================================================= + +DEFAULT_PORT: int = 8085 +MTLS_PORT: int = 4443 +PROTOCOL_VERSION: str = "1.0.0" + +MAX_PEERS: int = 50 +PEER_TIMEOUT_SECONDS: int = 30 +SYNC_BATCH_SIZE: int = 100 + +# ============================================================================= +# Drift Lock Parameters (RIP-0003) +# ============================================================================= + +DRIFT_THRESHOLD: float = 0.15 # 15% deviation triggers quarantine +QUARANTINE_DURATION_BLOCKS: int = 144 # ~24 hours +CHALLENGE_RESPONSE_TIMEOUT: int = 300 # 5 minutes + +# ============================================================================= +# Deep Entropy Parameters (RIP-0001) +# ============================================================================= + +# Entropy layer weights +ENTROPY_WEIGHTS = { + "instruction_timing": 0.30, + "memory_patterns": 0.25, + "bus_timing": 0.20, + "thermal_signature": 0.15, + "architectural_quirks": 0.10, +} + +# Emulation detection thresholds +EMULATION_PROBABILITY_THRESHOLD: float = 0.50 +MIN_ENTROPY_SCORE: float = 0.60 + +# ============================================================================= +# Genesis Block +# ============================================================================= + +GENESIS_HASH: str = "019c177b44a41f78da23caa99314adbc44889be2dcdd5021930f9d991e7e34cf" +GENESIS_TIMESTAMP: int = 1735689600 # 2025-01-01 00:00:00 UTC +GENESIS_DIFFICULTY: int = 1 + +# ============================================================================= +# Helper Functions +# ============================================================================= + +def get_tier_for_age(age_years: int) -> str: + """Determine hardware tier from age""" + for tier_name, params in HARDWARE_TIERS.items(): + if params["min_age"] <= age_years <= params["max_age"]: + return tier_name + return "recent" + +def get_multiplier_for_tier(tier: str) -> float: + """Get mining multiplier for a tier""" + return HARDWARE_TIERS.get(tier, {}).get("multiplier", 0.5) + +def calculate_block_reward(height: int) -> Decimal: + """Calculate block reward at a given height""" + halvings = height // HALVING_INTERVAL_BLOCKS + if halvings >= HALVING_COUNT: + # Tail emission after 4 halvings + return BLOCK_REWARD / Decimal(2 ** HALVING_COUNT) + return BLOCK_REWARD / Decimal(2 ** halvings) diff --git a/rips/rustchain-core/consensus/poa.py b/rips/rustchain-core/consensus/poa.py index 3efe2a7b..63a6b170 100644 --- a/rips/rustchain-core/consensus/poa.py +++ b/rips/rustchain-core/consensus/poa.py @@ -1,471 +1,471 @@ -""" -RustChain Proof of Antiquity Consensus (RIP-0001) -================================================= - -Core consensus mechanism that rewards vintage hardware preservation. - -REMEMBER: This is NOT Proof of Work! -- No computational puzzles -- Rewards hardware age, not speed -- Older hardware wins over newer hardware -- Anti-emulation via deep entropy - -Formula: AS = (current_year - release_year) * log10(uptime_days + 1) -""" - -import hashlib -import math -import random -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Any -from decimal import Decimal - -from ..config.chain_params import ( - CURRENT_YEAR, - AS_MAX, - AS_MIN, - BLOCK_REWARD, - BLOCK_TIME_SECONDS, - MAX_MINERS_PER_BLOCK, - ONE_RTC, - calculate_block_reward, -) - - -# ============================================================================= -# Data Structures -# ============================================================================= - -@dataclass -class HardwareProof: - """Hardware attestation for mining eligibility""" - cpu_model: str - release_year: int - uptime_days: int - hardware_hash: str - entropy_proof: Optional[bytes] = None - - -@dataclass -class ValidatedProof: - """A validated mining proof ready for block inclusion""" - wallet: str - hardware: HardwareProof - antiquity_score: float - anti_emulation_hash: str - validated_at: int - tier: str = "" - - def __post_init__(self): - age = CURRENT_YEAR - self.hardware.release_year - self.tier = self._get_tier(age) - - def _get_tier(self, age: int) -> str: - if age >= 30: return "ancient" - if age >= 25: return "sacred" - if age >= 20: return "vintage" - if age >= 15: return "classic" - if age >= 10: return "retro" - if age >= 5: return "modern" - return "recent" - - -@dataclass -class BlockMiner: - """Miner entry in a block""" - wallet: str - hardware_model: str - antiquity_score: float - reward: int # In smallest units - - -@dataclass -class Block: - """RustChain block""" - height: int - timestamp: int - previous_hash: str - miners: List[BlockMiner] - total_reward: int - merkle_root: str = "" - hash: str = "" - - def __post_init__(self): - if not self.merkle_root: - self.merkle_root = self._calculate_merkle_root() - if not self.hash: - self.hash = self._calculate_hash() - - def _calculate_hash(self) -> str: - data = f"{self.height}:{self.timestamp}:{self.previous_hash}:{self.merkle_root}" - return hashlib.sha256(data.encode()).hexdigest() - - def _calculate_merkle_root(self) -> str: - if not self.miners: - return hashlib.sha256(b"empty").hexdigest() - - hashes = [ - hashlib.sha256(f"{m.wallet}:{m.antiquity_score}:{m.reward}".encode()).hexdigest() - for m in self.miners - ] - - while len(hashes) > 1: - if len(hashes) % 2 == 1: - hashes.append(hashes[-1]) - hashes = [ - hashlib.sha256((hashes[i] + hashes[i+1]).encode()).hexdigest() - for i in range(0, len(hashes), 2) - ] - - return hashes[0] - - -# ============================================================================= -# Antiquity Score Calculation -# ============================================================================= - -def compute_antiquity_score(release_year: int, uptime_days: int) -> float: - """ - Calculate Antiquity Score per RIP-0001 spec. - - Formula: AS = (current_year - release_year) * log10(uptime_days + 1) - - This is NOT Proof of Work! Higher scores come from: - - Older hardware (larger age factor) - - Longer uptime (log scale to prevent gaming) - - Examples: - >>> compute_antiquity_score(1992, 276) # 486 DX2 - 80.46 # (2025-1992) * log10(277) - - >>> compute_antiquity_score(2023, 30) # Modern CPU - 2.96 # (2025-2023) * log10(31) - """ - age = max(0, CURRENT_YEAR - release_year) - uptime_factor = math.log10(uptime_days + 1) - return age * uptime_factor - - -def compute_reward(antiquity_score: float, base_reward: int) -> int: - """ - Calculate miner reward based on Antiquity Score. - - Formula: Reward = R * min(1.0, AS / AS_max) - - Args: - antiquity_score: Node's AS value - base_reward: Base block reward in smallest units - - Returns: - Calculated reward in smallest units - """ - reward_factor = min(1.0, antiquity_score / AS_MAX) - return int(base_reward * reward_factor) - - -# ============================================================================= -# Validator Selection -# ============================================================================= - -def select_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: - """ - Select block validator using weighted lottery. - - Higher Antiquity Score = higher probability of selection. - This is NOT computational competition - it's a fair lottery - weighted by hardware preservation merit. - - Args: - proofs: List of validated proofs from eligible miners - - Returns: - Selected validator's proof, or None if no proofs - """ - if not proofs: - return None - - total_as = sum(p.antiquity_score for p in proofs) - if total_as == 0: - return random.choice(proofs) - - # Weighted random selection - r = random.uniform(0, total_as) - cumulative = 0.0 - - for proof in proofs: - cumulative += proof.antiquity_score - if r <= cumulative: - return proof - - return proofs[-1] - - -# ============================================================================= -# Proof of Antiquity Engine -# ============================================================================= - -class ProofOfAntiquity: - """ - Proof of Antiquity consensus engine. - - This is NOT Proof of Work! We validate: - 1. Hardware authenticity via deep entropy checks - 2. Hardware age via device signature database - 3. Node uptime via continuous validation - 4. No computational puzzles - just verification - - Block selection uses weighted lottery based on Antiquity Score. - """ - - def __init__(self): - self.pending_proofs: List[ValidatedProof] = [] - self.block_start_time: int = int(time.time()) - self.known_hardware: Dict[str, str] = {} # hash -> wallet - self.drifted_nodes: set = set() # Quarantined nodes - self.current_block_height: int = 0 - - def submit_proof( - self, - wallet: str, - hardware: HardwareProof, - anti_emulation_hash: str, - ) -> Dict[str, Any]: - """ - Submit a mining proof for the current block. - - Args: - wallet: Miner's wallet address - hardware: Hardware information - anti_emulation_hash: Hash from entropy verification - - Returns: - Result dict with acceptance status - """ - current_time = int(time.time()) - elapsed = current_time - self.block_start_time - - # Check if block window is still open - if elapsed >= BLOCK_TIME_SECONDS: - return {"success": False, "error": "Block window has closed"} - - # Check for drift lock - if wallet in self.drifted_nodes: - return {"success": False, "error": "Node is quarantined due to drift lock"} - - # Check for duplicate wallet submission - if any(p.wallet == wallet for p in self.pending_proofs): - return {"success": False, "error": "Already submitted proof for this block"} - - # Check max miners - if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: - return {"success": False, "error": "Block has reached maximum miners"} - - # Calculate Antiquity Score - antiquity_score = compute_antiquity_score( - hardware.release_year, - hardware.uptime_days - ) - - # Check minimum AS threshold - if antiquity_score < AS_MIN: - return { - "success": False, - "error": f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" - } - - # Check for duplicate hardware - if hardware.hardware_hash in self.known_hardware: - existing_wallet = self.known_hardware[hardware.hardware_hash] - if existing_wallet != wallet: - return { - "success": False, - "error": f"Hardware already registered to {existing_wallet}" - } - - # Create validated proof - validated = ValidatedProof( - wallet=wallet, - hardware=hardware, - antiquity_score=antiquity_score, - anti_emulation_hash=anti_emulation_hash, - validated_at=current_time, - ) - - self.pending_proofs.append(validated) - self.known_hardware[hardware.hardware_hash] = wallet - - return { - "success": True, - "message": "Proof accepted, waiting for block completion", - "pending_miners": len(self.pending_proofs), - "your_antiquity_score": antiquity_score, - "your_tier": validated.tier, - "block_completes_in": BLOCK_TIME_SECONDS - elapsed, - } - - def produce_block(self, previous_hash: str) -> Optional[Block]: - """ - Process all pending proofs and create a new block. - - Uses weighted lottery based on Antiquity Score for reward distribution. - This is NOT a competition - all valid miners share the reward - proportionally to their Antiquity Score. - - Args: - previous_hash: Hash of previous block - - Returns: - New block if proofs exist, None otherwise - """ - if not self.pending_proofs: - self._reset_block() - return None - - # Calculate base reward for this height - base_reward_rtc = calculate_block_reward(self.current_block_height + 1) - base_reward = int(float(base_reward_rtc) * ONE_RTC) - - # Calculate total AS for weighted distribution - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - # Calculate rewards for each miner (proportional to AS) - miners = [] - total_distributed = 0 - - for proof in self.pending_proofs: - # Weighted share based on AS - share = proof.antiquity_score / total_as if total_as > 0 else 1.0 / len(self.pending_proofs) - reward = int(base_reward * share) - total_distributed += reward - - miners.append(BlockMiner( - wallet=proof.wallet, - hardware_model=proof.hardware.cpu_model, - antiquity_score=proof.antiquity_score, - reward=reward, - )) - - # Create new block - self.current_block_height += 1 - block = Block( - height=self.current_block_height, - timestamp=int(time.time()), - previous_hash=previous_hash, - miners=miners, - total_reward=total_distributed, - ) - - print(f"Block #{block.height} created! " - f"Reward: {total_distributed / ONE_RTC:.2f} RTC " - f"split among {len(miners)} miners") - - # Reset for next block - self._reset_block() - - return block - - def validate_block(self, block: Block, previous_block: Optional[Block]) -> bool: - """ - Validate an incoming block. - - Checks: - - Height is sequential - - Previous hash matches - - Timestamp is reasonable - - All miners have valid AS - - Total reward doesn't exceed allowed - - Args: - block: Block to validate - previous_block: Previous block in chain - - Returns: - True if valid, False otherwise - """ - # Check height - expected_height = (previous_block.height + 1) if previous_block else 1 - if block.height != expected_height: - return False - - # Check previous hash - expected_prev = previous_block.hash if previous_block else "0" * 64 - if block.previous_hash != expected_prev: - return False - - # Check timestamp (not too far in future) - if block.timestamp > int(time.time()) + 120: # 2 min tolerance - return False - - # Check miners have valid AS - for miner in block.miners: - if miner.antiquity_score < AS_MIN: - return False - - # Check total reward - max_reward = int(float(calculate_block_reward(block.height)) * ONE_RTC) - if block.total_reward > max_reward * 1.01: # 1% tolerance for rounding - return False - - return True - - def _reset_block(self): - """Reset state for next block""" - self.pending_proofs.clear() - self.block_start_time = int(time.time()) - - def get_status(self) -> Dict[str, Any]: - """Get current block status""" - elapsed = int(time.time()) - self.block_start_time - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - return { - "current_block_height": self.current_block_height, - "pending_proofs": len(self.pending_proofs), - "total_antiquity_score": total_as, - "block_age_seconds": elapsed, - "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), - "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, - } - - def quarantine_node(self, wallet: str, reason: str): - """Quarantine a node due to drift lock violation""" - self.drifted_nodes.add(wallet) - print(f"Node {wallet} quarantined: {reason}") - - def release_node(self, wallet: str): - """Release a node from quarantine""" - self.drifted_nodes.discard(wallet) - print(f"Node {wallet} released from quarantine") - - -# ============================================================================= -# Demonstration -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN PROOF OF ANTIQUITY - NOT PROOF OF WORK!") - print("=" * 60) - print() - print("Formula: AS = (current_year - release_year) * log10(uptime_days + 1)") - print() - - examples = [ - ("Intel 486 DX2-66", 1992, 276), - ("PowerPC G4 1.25GHz", 2002, 276), - ("Core 2 Duo E8400", 2008, 180), - ("Ryzen 9 7950X", 2022, 30), - ] - - for model, year, uptime in examples: - score = compute_antiquity_score(year, uptime) - age = CURRENT_YEAR - year - - print(f"Hardware: {model} ({year})") - print(f" Age: {age} years") - print(f" Uptime: {uptime} days") - print(f" Antiquity Score: {score:.2f}") - print() - - print("Remember: Older hardware WINS, not faster hardware!") +""" +RustChain Proof of Antiquity Consensus (RIP-0001) +================================================= + +Core consensus mechanism that rewards vintage hardware preservation. + +REMEMBER: This is NOT Proof of Work! +- No computational puzzles +- Rewards hardware age, not speed +- Older hardware wins over newer hardware +- Anti-emulation via deep entropy + +Formula: AS = (current_year - release_year) * log10(uptime_days + 1) +""" + +import hashlib +import math +import random +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Any +from decimal import Decimal + +from ..config.chain_params import ( + CURRENT_YEAR, + AS_MAX, + AS_MIN, + BLOCK_REWARD, + BLOCK_TIME_SECONDS, + MAX_MINERS_PER_BLOCK, + ONE_RTC, + calculate_block_reward, +) + + +# ============================================================================= +# Data Structures +# ============================================================================= + +@dataclass +class HardwareProof: + """Hardware attestation for mining eligibility""" + cpu_model: str + release_year: int + uptime_days: int + hardware_hash: str + entropy_proof: Optional[bytes] = None + + +@dataclass +class ValidatedProof: + """A validated mining proof ready for block inclusion""" + wallet: str + hardware: HardwareProof + antiquity_score: float + anti_emulation_hash: str + validated_at: int + tier: str = "" + + def __post_init__(self): + age = CURRENT_YEAR - self.hardware.release_year + self.tier = self._get_tier(age) + + def _get_tier(self, age: int) -> str: + if age >= 30: return "ancient" + if age >= 25: return "sacred" + if age >= 20: return "vintage" + if age >= 15: return "classic" + if age >= 10: return "retro" + if age >= 5: return "modern" + return "recent" + + +@dataclass +class BlockMiner: + """Miner entry in a block""" + wallet: str + hardware_model: str + antiquity_score: float + reward: int # In smallest units + + +@dataclass +class Block: + """RustChain block""" + height: int + timestamp: int + previous_hash: str + miners: List[BlockMiner] + total_reward: int + merkle_root: str = "" + hash: str = "" + + def __post_init__(self): + if not self.merkle_root: + self.merkle_root = self._calculate_merkle_root() + if not self.hash: + self.hash = self._calculate_hash() + + def _calculate_hash(self) -> str: + data = f"{self.height}:{self.timestamp}:{self.previous_hash}:{self.merkle_root}" + return hashlib.sha256(data.encode()).hexdigest() + + def _calculate_merkle_root(self) -> str: + if not self.miners: + return hashlib.sha256(b"empty").hexdigest() + + hashes = [ + hashlib.sha256(f"{m.wallet}:{m.antiquity_score}:{m.reward}".encode()).hexdigest() + for m in self.miners + ] + + while len(hashes) > 1: + if len(hashes) % 2 == 1: + hashes.append(hashes[-1]) + hashes = [ + hashlib.sha256((hashes[i] + hashes[i+1]).encode()).hexdigest() + for i in range(0, len(hashes), 2) + ] + + return hashes[0] + + +# ============================================================================= +# Antiquity Score Calculation +# ============================================================================= + +def compute_antiquity_score(release_year: int, uptime_days: int) -> float: + """ + Calculate Antiquity Score per RIP-0001 spec. + + Formula: AS = (current_year - release_year) * log10(uptime_days + 1) + + This is NOT Proof of Work! Higher scores come from: + - Older hardware (larger age factor) + - Longer uptime (log scale to prevent gaming) + + Examples: + >>> compute_antiquity_score(1992, 276) # 486 DX2 + 80.46 # (2025-1992) * log10(277) + + >>> compute_antiquity_score(2023, 30) # Modern CPU + 2.96 # (2025-2023) * log10(31) + """ + age = max(0, CURRENT_YEAR - release_year) + uptime_factor = math.log10(uptime_days + 1) + return age * uptime_factor + + +def compute_reward(antiquity_score: float, base_reward: int) -> int: + """ + Calculate miner reward based on Antiquity Score. + + Formula: Reward = R * min(1.0, AS / AS_max) + + Args: + antiquity_score: Node's AS value + base_reward: Base block reward in smallest units + + Returns: + Calculated reward in smallest units + """ + reward_factor = min(1.0, antiquity_score / AS_MAX) + return int(base_reward * reward_factor) + + +# ============================================================================= +# Validator Selection +# ============================================================================= + +def select_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: + """ + Select block validator using weighted lottery. + + Higher Antiquity Score = higher probability of selection. + This is NOT computational competition - it's a fair lottery + weighted by hardware preservation merit. + + Args: + proofs: List of validated proofs from eligible miners + + Returns: + Selected validator's proof, or None if no proofs + """ + if not proofs: + return None + + total_as = sum(p.antiquity_score for p in proofs) + if total_as == 0: + return random.choice(proofs) + + # Weighted random selection + r = random.uniform(0, total_as) + cumulative = 0.0 + + for proof in proofs: + cumulative += proof.antiquity_score + if r <= cumulative: + return proof + + return proofs[-1] + + +# ============================================================================= +# Proof of Antiquity Engine +# ============================================================================= + +class ProofOfAntiquity: + """ + Proof of Antiquity consensus engine. + + This is NOT Proof of Work! We validate: + 1. Hardware authenticity via deep entropy checks + 2. Hardware age via device signature database + 3. Node uptime via continuous validation + 4. No computational puzzles - just verification + + Block selection uses weighted lottery based on Antiquity Score. + """ + + def __init__(self): + self.pending_proofs: List[ValidatedProof] = [] + self.block_start_time: int = int(time.time()) + self.known_hardware: Dict[str, str] = {} # hash -> wallet + self.drifted_nodes: set = set() # Quarantined nodes + self.current_block_height: int = 0 + + def submit_proof( + self, + wallet: str, + hardware: HardwareProof, + anti_emulation_hash: str, + ) -> Dict[str, Any]: + """ + Submit a mining proof for the current block. + + Args: + wallet: Miner's wallet address + hardware: Hardware information + anti_emulation_hash: Hash from entropy verification + + Returns: + Result dict with acceptance status + """ + current_time = int(time.time()) + elapsed = current_time - self.block_start_time + + # Check if block window is still open + if elapsed >= BLOCK_TIME_SECONDS: + return {"success": False, "error": "Block window has closed"} + + # Check for drift lock + if wallet in self.drifted_nodes: + return {"success": False, "error": "Node is quarantined due to drift lock"} + + # Check for duplicate wallet submission + if any(p.wallet == wallet for p in self.pending_proofs): + return {"success": False, "error": "Already submitted proof for this block"} + + # Check max miners + if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: + return {"success": False, "error": "Block has reached maximum miners"} + + # Calculate Antiquity Score + antiquity_score = compute_antiquity_score( + hardware.release_year, + hardware.uptime_days + ) + + # Check minimum AS threshold + if antiquity_score < AS_MIN: + return { + "success": False, + "error": f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" + } + + # Check for duplicate hardware + if hardware.hardware_hash in self.known_hardware: + existing_wallet = self.known_hardware[hardware.hardware_hash] + if existing_wallet != wallet: + return { + "success": False, + "error": f"Hardware already registered to {existing_wallet}" + } + + # Create validated proof + validated = ValidatedProof( + wallet=wallet, + hardware=hardware, + antiquity_score=antiquity_score, + anti_emulation_hash=anti_emulation_hash, + validated_at=current_time, + ) + + self.pending_proofs.append(validated) + self.known_hardware[hardware.hardware_hash] = wallet + + return { + "success": True, + "message": "Proof accepted, waiting for block completion", + "pending_miners": len(self.pending_proofs), + "your_antiquity_score": antiquity_score, + "your_tier": validated.tier, + "block_completes_in": BLOCK_TIME_SECONDS - elapsed, + } + + def produce_block(self, previous_hash: str) -> Optional[Block]: + """ + Process all pending proofs and create a new block. + + Uses weighted lottery based on Antiquity Score for reward distribution. + This is NOT a competition - all valid miners share the reward + proportionally to their Antiquity Score. + + Args: + previous_hash: Hash of previous block + + Returns: + New block if proofs exist, None otherwise + """ + if not self.pending_proofs: + self._reset_block() + return None + + # Calculate base reward for this height + base_reward_rtc = calculate_block_reward(self.current_block_height + 1) + base_reward = int(float(base_reward_rtc) * ONE_RTC) + + # Calculate total AS for weighted distribution + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + # Calculate rewards for each miner (proportional to AS) + miners = [] + total_distributed = 0 + + for proof in self.pending_proofs: + # Weighted share based on AS + share = proof.antiquity_score / total_as if total_as > 0 else 1.0 / len(self.pending_proofs) + reward = int(base_reward * share) + total_distributed += reward + + miners.append(BlockMiner( + wallet=proof.wallet, + hardware_model=proof.hardware.cpu_model, + antiquity_score=proof.antiquity_score, + reward=reward, + )) + + # Create new block + self.current_block_height += 1 + block = Block( + height=self.current_block_height, + timestamp=int(time.time()), + previous_hash=previous_hash, + miners=miners, + total_reward=total_distributed, + ) + + print(f"Block #{block.height} created! " + f"Reward: {total_distributed / ONE_RTC:.2f} RTC " + f"split among {len(miners)} miners") + + # Reset for next block + self._reset_block() + + return block + + def validate_block(self, block: Block, previous_block: Optional[Block]) -> bool: + """ + Validate an incoming block. + + Checks: + - Height is sequential + - Previous hash matches + - Timestamp is reasonable + - All miners have valid AS + - Total reward doesn't exceed allowed + + Args: + block: Block to validate + previous_block: Previous block in chain + + Returns: + True if valid, False otherwise + """ + # Check height + expected_height = (previous_block.height + 1) if previous_block else 1 + if block.height != expected_height: + return False + + # Check previous hash + expected_prev = previous_block.hash if previous_block else "0" * 64 + if block.previous_hash != expected_prev: + return False + + # Check timestamp (not too far in future) + if block.timestamp > int(time.time()) + 120: # 2 min tolerance + return False + + # Check miners have valid AS + for miner in block.miners: + if miner.antiquity_score < AS_MIN: + return False + + # Check total reward + max_reward = int(float(calculate_block_reward(block.height)) * ONE_RTC) + if block.total_reward > max_reward * 1.01: # 1% tolerance for rounding + return False + + return True + + def _reset_block(self): + """Reset state for next block""" + self.pending_proofs.clear() + self.block_start_time = int(time.time()) + + def get_status(self) -> Dict[str, Any]: + """Get current block status""" + elapsed = int(time.time()) - self.block_start_time + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + return { + "current_block_height": self.current_block_height, + "pending_proofs": len(self.pending_proofs), + "total_antiquity_score": total_as, + "block_age_seconds": elapsed, + "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), + "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, + } + + def quarantine_node(self, wallet: str, reason: str): + """Quarantine a node due to drift lock violation""" + self.drifted_nodes.add(wallet) + print(f"Node {wallet} quarantined: {reason}") + + def release_node(self, wallet: str): + """Release a node from quarantine""" + self.drifted_nodes.discard(wallet) + print(f"Node {wallet} released from quarantine") + + +# ============================================================================= +# Demonstration +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN PROOF OF ANTIQUITY - NOT PROOF OF WORK!") + print("=" * 60) + print() + print("Formula: AS = (current_year - release_year) * log10(uptime_days + 1)") + print() + + examples = [ + ("Intel 486 DX2-66", 1992, 276), + ("PowerPC G4 1.25GHz", 2002, 276), + ("Core 2 Duo E8400", 2008, 180), + ("Ryzen 9 7950X", 2022, 30), + ] + + for model, year, uptime in examples: + score = compute_antiquity_score(year, uptime) + age = CURRENT_YEAR - year + + print(f"Hardware: {model} ({year})") + print(f" Age: {age} years") + print(f" Uptime: {uptime} days") + print(f" Antiquity Score: {score:.2f}") + print() + + print("Remember: Older hardware WINS, not faster hardware!") diff --git a/rips/rustchain-core/governance/proposals.py b/rips/rustchain-core/governance/proposals.py index 84cc5c6a..db298f38 100644 --- a/rips/rustchain-core/governance/proposals.py +++ b/rips/rustchain-core/governance/proposals.py @@ -1,584 +1,584 @@ -""" -RustChain Governance Proposals (RIP-0002, RIP-0005, RIP-0006) -============================================================= - -Proposal lifecycle and voting system with Sophia AI integration. - -Lifecycle: -1. Draft -> Submitted -2. Sophia Review (Endorse/Veto/Analyze) -3. Voting Period (7 days) -4. Passed/Rejected/Vetoed -5. Execution (if passed) -""" - -import hashlib -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Any, Callable -from enum import Enum, auto -from decimal import Decimal - -from ..config.chain_params import ( - VOTING_PERIOD_DAYS, - QUORUM_PERCENTAGE, - EXECUTION_DELAY_BLOCKS, - REPUTATION_DECAY_WEEKLY, - TOTAL_SUPPLY, -) - - -# ============================================================================= -# Enums -# ============================================================================= - -class ProposalStatus(Enum): - """Proposal lifecycle status""" - DRAFT = auto() - SUBMITTED = auto() - SOPHIA_REVIEW = auto() - VOTING = auto() - PASSED = auto() - REJECTED = auto() - VETOED = auto() - EXECUTED = auto() - EXPIRED = auto() - - -class ProposalType(Enum): - """Types of governance proposals""" - PARAMETER_CHANGE = auto() - MONETARY_POLICY = auto() - PROTOCOL_UPGRADE = auto() - VALIDATOR_CHANGE = auto() - SMART_CONTRACT = auto() - COMMUNITY = auto() - - -class SophiaDecision(Enum): - """Sophia AI evaluation decisions""" - PENDING = auto() - ENDORSE = auto() # Boosts support probability - VETO = auto() # Locks the proposal - ANALYZE = auto() # Neutral, logs public rationale - - -# ============================================================================= -# Data Structures -# ============================================================================= - -@dataclass -class Vote: - """A single vote on a proposal""" - voter: str - support: bool - weight: int - timestamp: int - delegation_from: Optional[str] = None - - -@dataclass -class SophiaEvaluation: - """Sophia AI's evaluation of a proposal""" - decision: SophiaDecision - rationale: str - feasibility_score: float - risk_level: str # "low", "medium", "high" - aligned_precedent: List[str] - timestamp: int - - -@dataclass -class Proposal: - """A governance proposal""" - id: str - title: str - description: str - proposal_type: ProposalType - proposer: str - created_at: int - status: ProposalStatus = ProposalStatus.DRAFT - - # Contract binding (RIP-0005) - contract_hash: Optional[str] = None - requires_multi_sig: bool = False - timelock_blocks: int = EXECUTION_DELAY_BLOCKS - auto_expire: bool = True - - # Voting data - votes: List[Vote] = field(default_factory=list) - voting_starts_at: Optional[int] = None - voting_ends_at: Optional[int] = None - - # Sophia evaluation - sophia_evaluation: Optional[SophiaEvaluation] = None - - # Execution - executed_at: Optional[int] = None - execution_tx_hash: Optional[str] = None - - @property - def yes_votes(self) -> int: - return sum(v.weight for v in self.votes if v.support) - - @property - def no_votes(self) -> int: - return sum(v.weight for v in self.votes if not v.support) - - @property - def total_votes(self) -> int: - return sum(v.weight for v in self.votes) - - @property - def approval_percentage(self) -> float: - total = self.total_votes - if total == 0: - return 0.0 - return self.yes_votes / total - - def has_voted(self, voter: str) -> bool: - return any(v.voter == voter for v in self.votes) - - def to_dict(self) -> Dict[str, Any]: - return { - "id": self.id, - "title": self.title, - "description": self.description, - "type": self.proposal_type.name, - "proposer": self.proposer, - "status": self.status.name, - "created_at": self.created_at, - "contract_hash": self.contract_hash, - "yes_votes": self.yes_votes, - "no_votes": self.no_votes, - "total_votes": self.total_votes, - "approval_percentage": self.approval_percentage, - "sophia_decision": ( - self.sophia_evaluation.decision.name - if self.sophia_evaluation else "PENDING" - ), - } - - -# ============================================================================= -# Reputation System (RIP-0006) -# ============================================================================= - -@dataclass -class NodeReputation: - """Reputation score for a node/wallet""" - wallet: str - score: float = 50.0 # Start neutral (0-100) - participation_count: int = 0 - correct_predictions: int = 0 - uptime_contribution: float = 0.0 - sophia_alignment: float = 0.0 - last_activity: int = 0 - - def decay(self, weeks_inactive: int): - """Apply decay for inactivity""" - decay_factor = (1 - REPUTATION_DECAY_WEEKLY) ** weeks_inactive - self.score *= decay_factor - - def update_alignment(self, voted_with_sophia: bool): - """Update Sophia alignment score""" - weight = 0.1 - if voted_with_sophia: - self.sophia_alignment = min(1.0, self.sophia_alignment + weight) - else: - self.sophia_alignment = max(0.0, self.sophia_alignment - weight) - - -@dataclass -class Delegation: - """Voting power delegation""" - from_wallet: str - to_wallet: str - weight: float # Percentage (0.0 - 1.0) - created_at: int - expires_at: Optional[int] = None - - def is_active(self, current_time: int) -> bool: - if self.expires_at and current_time > self.expires_at: - return False - return True - - -# ============================================================================= -# Governance Engine -# ============================================================================= - -class GovernanceEngine: - """ - Main governance engine implementing RIP-0002, RIP-0005, RIP-0006. - - Lifecycle: - 1. Proposal created via create_proposal() - 2. Sophia evaluates via sophia_evaluate() - 3. If not vetoed, voting begins - 4. After voting period, proposal passes/fails - 5. Passed proposals execute after delay - """ - - def __init__(self, total_supply: int = TOTAL_SUPPLY): - self.proposals: Dict[str, Proposal] = {} - self.reputations: Dict[str, NodeReputation] = {} - self.delegations: Dict[str, List[Delegation]] = {} - self.total_supply = total_supply - self.proposal_counter = 0 - - def create_proposal( - self, - title: str, - description: str, - proposal_type: ProposalType, - proposer: str, - contract_hash: Optional[str] = None, - ) -> Proposal: - """Create a new governance proposal.""" - self.proposal_counter += 1 - proposal_id = f"RCP-{self.proposal_counter:04d}" - - proposal = Proposal( - id=proposal_id, - title=title, - description=description, - proposal_type=proposal_type, - proposer=proposer, - created_at=int(time.time()), - contract_hash=contract_hash, - status=ProposalStatus.SUBMITTED, - ) - - self.proposals[proposal_id] = proposal - self._update_reputation(proposer, "propose") - - return proposal - - def sophia_evaluate( - self, - proposal_id: str, - decision: SophiaDecision, - rationale: str, - feasibility_score: float = 0.5, - risk_level: str = "medium", - ) -> SophiaEvaluation: - """Record Sophia AI's evaluation (RIP-0002).""" - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - evaluation = SophiaEvaluation( - decision=decision, - rationale=rationale, - feasibility_score=feasibility_score, - risk_level=risk_level, - aligned_precedent=[], - timestamp=int(time.time()), - ) - - proposal.sophia_evaluation = evaluation - now = int(time.time()) - - if decision == SophiaDecision.VETO: - proposal.status = ProposalStatus.VETOED - print(f"SOPHIA VETO: {proposal_id} - {rationale}") - elif decision == SophiaDecision.ENDORSE: - proposal.status = ProposalStatus.VOTING - proposal.voting_starts_at = now - proposal.voting_ends_at = now + (VOTING_PERIOD_DAYS * 86400) - print(f"SOPHIA ENDORSE: {proposal_id}") - else: # ANALYZE - proposal.status = ProposalStatus.VOTING - proposal.voting_starts_at = now - proposal.voting_ends_at = now + (VOTING_PERIOD_DAYS * 86400) - print(f"SOPHIA ANALYZE: {proposal_id} - {rationale}") - - return evaluation - - def vote( - self, - proposal_id: str, - voter: str, - support: bool, - token_balance: int, - ) -> Vote: - """Cast a vote on a proposal.""" - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.VOTING: - raise ValueError(f"Proposal not in voting phase: {proposal.status}") - - now = int(time.time()) - if proposal.voting_ends_at and now > proposal.voting_ends_at: - raise ValueError("Voting period has ended") - - if proposal.has_voted(voter): - raise ValueError("Already voted on this proposal") - - # Calculate voting weight - reputation = self.reputations.get(voter) - rep_bonus = (reputation.score / 100.0) if reputation else 0.5 - base_weight = int(token_balance * (1 + rep_bonus * 0.2)) - - # Include delegated votes - delegated_weight = self._get_delegated_weight(voter, now) - total_weight = base_weight + delegated_weight - - vote = Vote( - voter=voter, - support=support, - weight=total_weight, - timestamp=now, - ) - - proposal.votes.append(vote) - self._update_reputation(voter, "vote") - - return vote - - def finalize_proposal(self, proposal_id: str) -> ProposalStatus: - """Finalize a proposal after voting period ends.""" - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.VOTING: - return proposal.status - - now = int(time.time()) - if proposal.voting_ends_at and now < proposal.voting_ends_at: - return proposal.status # Still voting - - # Check quorum - participation = proposal.total_votes / self.total_supply - - if participation < QUORUM_PERCENTAGE: - proposal.status = ProposalStatus.REJECTED - print(f"REJECTED (quorum): {proposal_id} - {participation:.1%} < {QUORUM_PERCENTAGE:.0%}") - return proposal.status - - # Check approval - if proposal.approval_percentage > 0.5: - proposal.status = ProposalStatus.PASSED - print(f"PASSED: {proposal_id} - {proposal.approval_percentage:.1%} approval") - self._update_sophia_alignment(proposal) - else: - proposal.status = ProposalStatus.REJECTED - print(f"REJECTED: {proposal_id} - {proposal.approval_percentage:.1%} approval") - - return proposal.status - - def execute_proposal(self, proposal_id: str) -> str: - """Execute a passed proposal (RIP-0005).""" - proposal = self.proposals.get(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - if proposal.status != ProposalStatus.PASSED: - raise ValueError(f"Cannot execute: status is {proposal.status}") - - # Vetoed proposals cannot execute - if (proposal.sophia_evaluation and - proposal.sophia_evaluation.decision == SophiaDecision.VETO): - raise ValueError("Vetoed proposals cannot be executed") - - now = int(time.time()) - tx_hash = hashlib.sha256(f"{proposal_id}:{now}".encode()).hexdigest() - - proposal.status = ProposalStatus.EXECUTED - proposal.executed_at = now - proposal.execution_tx_hash = tx_hash - - print(f"EXECUTED: {proposal_id} - TX: {tx_hash[:16]}...") - return tx_hash - - def delegate_voting_power( - self, - from_wallet: str, - to_wallet: str, - weight: float, - duration_days: Optional[int] = None, - ) -> Delegation: - """Delegate voting power to another wallet (RIP-0006).""" - if weight < 0 or weight > 1: - raise ValueError("Delegation weight must be between 0 and 1") - - now = int(time.time()) - expires_at = now + (duration_days * 86400) if duration_days else None - - delegation = Delegation( - from_wallet=from_wallet, - to_wallet=to_wallet, - weight=weight, - created_at=now, - expires_at=expires_at, - ) - - if to_wallet not in self.delegations: - self.delegations[to_wallet] = [] - self.delegations[to_wallet].append(delegation) - - return delegation - - def _get_delegated_weight(self, wallet: str, current_time: int) -> int: - """Get total delegated voting weight for a wallet.""" - delegations = self.delegations.get(wallet, []) - total = 0 - for d in delegations: - if d.is_active(current_time): - total += int(d.weight * 100) # Scale weight - return total - - def _update_reputation(self, wallet: str, activity_type: str): - """Update wallet reputation based on activity.""" - if wallet not in self.reputations: - self.reputations[wallet] = NodeReputation( - wallet=wallet, - last_activity=int(time.time()), - ) - - rep = self.reputations[wallet] - rep.participation_count += 1 - rep.last_activity = int(time.time()) - - if activity_type == "vote": - rep.score = min(100, rep.score + 0.5) - elif activity_type == "propose": - rep.score = min(100, rep.score + 1.0) - - def _update_sophia_alignment(self, proposal: Proposal): - """Update voter reputations based on Sophia alignment.""" - if not proposal.sophia_evaluation: - return - - sophia_decision = proposal.sophia_evaluation.decision - if sophia_decision == SophiaDecision.ANALYZE: - return - - sophia_supported = sophia_decision == SophiaDecision.ENDORSE - - for vote in proposal.votes: - voted_with_sophia = vote.support == sophia_supported - rep = self.reputations.get(vote.voter) - if rep: - rep.update_alignment(voted_with_sophia) - - def get_proposal(self, proposal_id: str) -> Optional[Proposal]: - """Get a proposal by ID.""" - return self.proposals.get(proposal_id) - - def get_active_proposals(self) -> List[Proposal]: - """Get all proposals currently in voting.""" - return [ - p for p in self.proposals.values() - if p.status == ProposalStatus.VOTING - ] - - def get_all_proposals(self) -> List[Proposal]: - """Get all proposals.""" - return list(self.proposals.values()) - - -# ============================================================================= -# Sophia AI Interface -# ============================================================================= - -class SophiaEvaluator: - """ - Interface for Sophia AI proposal evaluation. - - In production, this connects to Sophia's neural network. - For development, uses rule-based heuristics. - """ - - def __init__(self, governance: GovernanceEngine): - self.governance = governance - - def evaluate(self, proposal_id: str) -> SophiaEvaluation: - """ - Evaluate a proposal using Sophia's judgment. - - Factors considered: - - Proposal type and risk - - Historical precedent - - Community sentiment - - Technical feasibility - """ - proposal = self.governance.get_proposal(proposal_id) - if not proposal: - raise ValueError(f"Proposal {proposal_id} not found") - - # Rule-based evaluation (placeholder for neural network) - risk_scores = { - ProposalType.PARAMETER_CHANGE: 0.3, - ProposalType.MONETARY_POLICY: 0.7, - ProposalType.PROTOCOL_UPGRADE: 0.6, - ProposalType.VALIDATOR_CHANGE: 0.4, - ProposalType.SMART_CONTRACT: 0.5, - ProposalType.COMMUNITY: 0.2, - } - - risk = risk_scores.get(proposal.proposal_type, 0.5) - - # High risk -> more scrutiny - if risk > 0.6: - if "emergency" in proposal.title.lower(): - decision = SophiaDecision.ANALYZE - rationale = "Emergency proposal requires careful review" - else: - decision = SophiaDecision.ANALYZE - rationale = f"High-risk {proposal.proposal_type.name} proposal" - elif risk > 0.4: - decision = SophiaDecision.ANALYZE - rationale = "Moderate impact - community should decide" - else: - decision = SophiaDecision.ENDORSE - rationale = "Low-risk proposal aligned with community values" - - # Apply evaluation - return self.governance.sophia_evaluate( - proposal_id=proposal_id, - decision=decision, - rationale=rationale, - feasibility_score=1.0 - risk, - risk_level="high" if risk > 0.6 else "medium" if risk > 0.3 else "low", - ) - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN GOVERNANCE ENGINE TEST") - print("=" * 60) - - engine = GovernanceEngine() - sophia = SophiaEvaluator(engine) - - # Create proposal - proposal = engine.create_proposal( - title="Increase Block Reward", - description="Proposal to increase block reward from 1.5 to 2.0 RTC", - proposal_type=ProposalType.MONETARY_POLICY, - proposer="RTC1TestProposer", - ) - - print(f"\nCreated: {proposal.id} - {proposal.title}") - - # Sophia evaluates - evaluation = sophia.evaluate(proposal.id) - print(f"Sophia: {evaluation.decision.name} - {evaluation.rationale}") - - # Cast votes - if proposal.status == ProposalStatus.VOTING: - engine.vote(proposal.id, "RTC1Voter1", True, 1000000) - engine.vote(proposal.id, "RTC1Voter2", True, 500000) - engine.vote(proposal.id, "RTC1Voter3", False, 300000) - - print(f"\nVotes: {proposal.yes_votes} yes, {proposal.no_votes} no") - print(f"Approval: {proposal.approval_percentage:.1%}") +""" +RustChain Governance Proposals (RIP-0002, RIP-0005, RIP-0006) +============================================================= + +Proposal lifecycle and voting system with Sophia AI integration. + +Lifecycle: +1. Draft -> Submitted +2. Sophia Review (Endorse/Veto/Analyze) +3. Voting Period (7 days) +4. Passed/Rejected/Vetoed +5. Execution (if passed) +""" + +import hashlib +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Any, Callable +from enum import Enum, auto +from decimal import Decimal + +from ..config.chain_params import ( + VOTING_PERIOD_DAYS, + QUORUM_PERCENTAGE, + EXECUTION_DELAY_BLOCKS, + REPUTATION_DECAY_WEEKLY, + TOTAL_SUPPLY, +) + + +# ============================================================================= +# Enums +# ============================================================================= + +class ProposalStatus(Enum): + """Proposal lifecycle status""" + DRAFT = auto() + SUBMITTED = auto() + SOPHIA_REVIEW = auto() + VOTING = auto() + PASSED = auto() + REJECTED = auto() + VETOED = auto() + EXECUTED = auto() + EXPIRED = auto() + + +class ProposalType(Enum): + """Types of governance proposals""" + PARAMETER_CHANGE = auto() + MONETARY_POLICY = auto() + PROTOCOL_UPGRADE = auto() + VALIDATOR_CHANGE = auto() + SMART_CONTRACT = auto() + COMMUNITY = auto() + + +class SophiaDecision(Enum): + """Sophia AI evaluation decisions""" + PENDING = auto() + ENDORSE = auto() # Boosts support probability + VETO = auto() # Locks the proposal + ANALYZE = auto() # Neutral, logs public rationale + + +# ============================================================================= +# Data Structures +# ============================================================================= + +@dataclass +class Vote: + """A single vote on a proposal""" + voter: str + support: bool + weight: int + timestamp: int + delegation_from: Optional[str] = None + + +@dataclass +class SophiaEvaluation: + """Sophia AI's evaluation of a proposal""" + decision: SophiaDecision + rationale: str + feasibility_score: float + risk_level: str # "low", "medium", "high" + aligned_precedent: List[str] + timestamp: int + + +@dataclass +class Proposal: + """A governance proposal""" + id: str + title: str + description: str + proposal_type: ProposalType + proposer: str + created_at: int + status: ProposalStatus = ProposalStatus.DRAFT + + # Contract binding (RIP-0005) + contract_hash: Optional[str] = None + requires_multi_sig: bool = False + timelock_blocks: int = EXECUTION_DELAY_BLOCKS + auto_expire: bool = True + + # Voting data + votes: List[Vote] = field(default_factory=list) + voting_starts_at: Optional[int] = None + voting_ends_at: Optional[int] = None + + # Sophia evaluation + sophia_evaluation: Optional[SophiaEvaluation] = None + + # Execution + executed_at: Optional[int] = None + execution_tx_hash: Optional[str] = None + + @property + def yes_votes(self) -> int: + return sum(v.weight for v in self.votes if v.support) + + @property + def no_votes(self) -> int: + return sum(v.weight for v in self.votes if not v.support) + + @property + def total_votes(self) -> int: + return sum(v.weight for v in self.votes) + + @property + def approval_percentage(self) -> float: + total = self.total_votes + if total == 0: + return 0.0 + return self.yes_votes / total + + def has_voted(self, voter: str) -> bool: + return any(v.voter == voter for v in self.votes) + + def to_dict(self) -> Dict[str, Any]: + return { + "id": self.id, + "title": self.title, + "description": self.description, + "type": self.proposal_type.name, + "proposer": self.proposer, + "status": self.status.name, + "created_at": self.created_at, + "contract_hash": self.contract_hash, + "yes_votes": self.yes_votes, + "no_votes": self.no_votes, + "total_votes": self.total_votes, + "approval_percentage": self.approval_percentage, + "sophia_decision": ( + self.sophia_evaluation.decision.name + if self.sophia_evaluation else "PENDING" + ), + } + + +# ============================================================================= +# Reputation System (RIP-0006) +# ============================================================================= + +@dataclass +class NodeReputation: + """Reputation score for a node/wallet""" + wallet: str + score: float = 50.0 # Start neutral (0-100) + participation_count: int = 0 + correct_predictions: int = 0 + uptime_contribution: float = 0.0 + sophia_alignment: float = 0.0 + last_activity: int = 0 + + def decay(self, weeks_inactive: int): + """Apply decay for inactivity""" + decay_factor = (1 - REPUTATION_DECAY_WEEKLY) ** weeks_inactive + self.score *= decay_factor + + def update_alignment(self, voted_with_sophia: bool): + """Update Sophia alignment score""" + weight = 0.1 + if voted_with_sophia: + self.sophia_alignment = min(1.0, self.sophia_alignment + weight) + else: + self.sophia_alignment = max(0.0, self.sophia_alignment - weight) + + +@dataclass +class Delegation: + """Voting power delegation""" + from_wallet: str + to_wallet: str + weight: float # Percentage (0.0 - 1.0) + created_at: int + expires_at: Optional[int] = None + + def is_active(self, current_time: int) -> bool: + if self.expires_at and current_time > self.expires_at: + return False + return True + + +# ============================================================================= +# Governance Engine +# ============================================================================= + +class GovernanceEngine: + """ + Main governance engine implementing RIP-0002, RIP-0005, RIP-0006. + + Lifecycle: + 1. Proposal created via create_proposal() + 2. Sophia evaluates via sophia_evaluate() + 3. If not vetoed, voting begins + 4. After voting period, proposal passes/fails + 5. Passed proposals execute after delay + """ + + def __init__(self, total_supply: int = TOTAL_SUPPLY): + self.proposals: Dict[str, Proposal] = {} + self.reputations: Dict[str, NodeReputation] = {} + self.delegations: Dict[str, List[Delegation]] = {} + self.total_supply = total_supply + self.proposal_counter = 0 + + def create_proposal( + self, + title: str, + description: str, + proposal_type: ProposalType, + proposer: str, + contract_hash: Optional[str] = None, + ) -> Proposal: + """Create a new governance proposal.""" + self.proposal_counter += 1 + proposal_id = f"RCP-{self.proposal_counter:04d}" + + proposal = Proposal( + id=proposal_id, + title=title, + description=description, + proposal_type=proposal_type, + proposer=proposer, + created_at=int(time.time()), + contract_hash=contract_hash, + status=ProposalStatus.SUBMITTED, + ) + + self.proposals[proposal_id] = proposal + self._update_reputation(proposer, "propose") + + return proposal + + def sophia_evaluate( + self, + proposal_id: str, + decision: SophiaDecision, + rationale: str, + feasibility_score: float = 0.5, + risk_level: str = "medium", + ) -> SophiaEvaluation: + """Record Sophia AI's evaluation (RIP-0002).""" + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + evaluation = SophiaEvaluation( + decision=decision, + rationale=rationale, + feasibility_score=feasibility_score, + risk_level=risk_level, + aligned_precedent=[], + timestamp=int(time.time()), + ) + + proposal.sophia_evaluation = evaluation + now = int(time.time()) + + if decision == SophiaDecision.VETO: + proposal.status = ProposalStatus.VETOED + print(f"SOPHIA VETO: {proposal_id} - {rationale}") + elif decision == SophiaDecision.ENDORSE: + proposal.status = ProposalStatus.VOTING + proposal.voting_starts_at = now + proposal.voting_ends_at = now + (VOTING_PERIOD_DAYS * 86400) + print(f"SOPHIA ENDORSE: {proposal_id}") + else: # ANALYZE + proposal.status = ProposalStatus.VOTING + proposal.voting_starts_at = now + proposal.voting_ends_at = now + (VOTING_PERIOD_DAYS * 86400) + print(f"SOPHIA ANALYZE: {proposal_id} - {rationale}") + + return evaluation + + def vote( + self, + proposal_id: str, + voter: str, + support: bool, + token_balance: int, + ) -> Vote: + """Cast a vote on a proposal.""" + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.VOTING: + raise ValueError(f"Proposal not in voting phase: {proposal.status}") + + now = int(time.time()) + if proposal.voting_ends_at and now > proposal.voting_ends_at: + raise ValueError("Voting period has ended") + + if proposal.has_voted(voter): + raise ValueError("Already voted on this proposal") + + # Calculate voting weight + reputation = self.reputations.get(voter) + rep_bonus = (reputation.score / 100.0) if reputation else 0.5 + base_weight = int(token_balance * (1 + rep_bonus * 0.2)) + + # Include delegated votes + delegated_weight = self._get_delegated_weight(voter, now) + total_weight = base_weight + delegated_weight + + vote = Vote( + voter=voter, + support=support, + weight=total_weight, + timestamp=now, + ) + + proposal.votes.append(vote) + self._update_reputation(voter, "vote") + + return vote + + def finalize_proposal(self, proposal_id: str) -> ProposalStatus: + """Finalize a proposal after voting period ends.""" + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.VOTING: + return proposal.status + + now = int(time.time()) + if proposal.voting_ends_at and now < proposal.voting_ends_at: + return proposal.status # Still voting + + # Check quorum + participation = proposal.total_votes / self.total_supply + + if participation < QUORUM_PERCENTAGE: + proposal.status = ProposalStatus.REJECTED + print(f"REJECTED (quorum): {proposal_id} - {participation:.1%} < {QUORUM_PERCENTAGE:.0%}") + return proposal.status + + # Check approval + if proposal.approval_percentage > 0.5: + proposal.status = ProposalStatus.PASSED + print(f"PASSED: {proposal_id} - {proposal.approval_percentage:.1%} approval") + self._update_sophia_alignment(proposal) + else: + proposal.status = ProposalStatus.REJECTED + print(f"REJECTED: {proposal_id} - {proposal.approval_percentage:.1%} approval") + + return proposal.status + + def execute_proposal(self, proposal_id: str) -> str: + """Execute a passed proposal (RIP-0005).""" + proposal = self.proposals.get(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + if proposal.status != ProposalStatus.PASSED: + raise ValueError(f"Cannot execute: status is {proposal.status}") + + # Vetoed proposals cannot execute + if (proposal.sophia_evaluation and + proposal.sophia_evaluation.decision == SophiaDecision.VETO): + raise ValueError("Vetoed proposals cannot be executed") + + now = int(time.time()) + tx_hash = hashlib.sha256(f"{proposal_id}:{now}".encode()).hexdigest() + + proposal.status = ProposalStatus.EXECUTED + proposal.executed_at = now + proposal.execution_tx_hash = tx_hash + + print(f"EXECUTED: {proposal_id} - TX: {tx_hash[:16]}...") + return tx_hash + + def delegate_voting_power( + self, + from_wallet: str, + to_wallet: str, + weight: float, + duration_days: Optional[int] = None, + ) -> Delegation: + """Delegate voting power to another wallet (RIP-0006).""" + if weight < 0 or weight > 1: + raise ValueError("Delegation weight must be between 0 and 1") + + now = int(time.time()) + expires_at = now + (duration_days * 86400) if duration_days else None + + delegation = Delegation( + from_wallet=from_wallet, + to_wallet=to_wallet, + weight=weight, + created_at=now, + expires_at=expires_at, + ) + + if to_wallet not in self.delegations: + self.delegations[to_wallet] = [] + self.delegations[to_wallet].append(delegation) + + return delegation + + def _get_delegated_weight(self, wallet: str, current_time: int) -> int: + """Get total delegated voting weight for a wallet.""" + delegations = self.delegations.get(wallet, []) + total = 0 + for d in delegations: + if d.is_active(current_time): + total += int(d.weight * 100) # Scale weight + return total + + def _update_reputation(self, wallet: str, activity_type: str): + """Update wallet reputation based on activity.""" + if wallet not in self.reputations: + self.reputations[wallet] = NodeReputation( + wallet=wallet, + last_activity=int(time.time()), + ) + + rep = self.reputations[wallet] + rep.participation_count += 1 + rep.last_activity = int(time.time()) + + if activity_type == "vote": + rep.score = min(100, rep.score + 0.5) + elif activity_type == "propose": + rep.score = min(100, rep.score + 1.0) + + def _update_sophia_alignment(self, proposal: Proposal): + """Update voter reputations based on Sophia alignment.""" + if not proposal.sophia_evaluation: + return + + sophia_decision = proposal.sophia_evaluation.decision + if sophia_decision == SophiaDecision.ANALYZE: + return + + sophia_supported = sophia_decision == SophiaDecision.ENDORSE + + for vote in proposal.votes: + voted_with_sophia = vote.support == sophia_supported + rep = self.reputations.get(vote.voter) + if rep: + rep.update_alignment(voted_with_sophia) + + def get_proposal(self, proposal_id: str) -> Optional[Proposal]: + """Get a proposal by ID.""" + return self.proposals.get(proposal_id) + + def get_active_proposals(self) -> List[Proposal]: + """Get all proposals currently in voting.""" + return [ + p for p in self.proposals.values() + if p.status == ProposalStatus.VOTING + ] + + def get_all_proposals(self) -> List[Proposal]: + """Get all proposals.""" + return list(self.proposals.values()) + + +# ============================================================================= +# Sophia AI Interface +# ============================================================================= + +class SophiaEvaluator: + """ + Interface for Sophia AI proposal evaluation. + + In production, this connects to Sophia's neural network. + For development, uses rule-based heuristics. + """ + + def __init__(self, governance: GovernanceEngine): + self.governance = governance + + def evaluate(self, proposal_id: str) -> SophiaEvaluation: + """ + Evaluate a proposal using Sophia's judgment. + + Factors considered: + - Proposal type and risk + - Historical precedent + - Community sentiment + - Technical feasibility + """ + proposal = self.governance.get_proposal(proposal_id) + if not proposal: + raise ValueError(f"Proposal {proposal_id} not found") + + # Rule-based evaluation (placeholder for neural network) + risk_scores = { + ProposalType.PARAMETER_CHANGE: 0.3, + ProposalType.MONETARY_POLICY: 0.7, + ProposalType.PROTOCOL_UPGRADE: 0.6, + ProposalType.VALIDATOR_CHANGE: 0.4, + ProposalType.SMART_CONTRACT: 0.5, + ProposalType.COMMUNITY: 0.2, + } + + risk = risk_scores.get(proposal.proposal_type, 0.5) + + # High risk -> more scrutiny + if risk > 0.6: + if "emergency" in proposal.title.lower(): + decision = SophiaDecision.ANALYZE + rationale = "Emergency proposal requires careful review" + else: + decision = SophiaDecision.ANALYZE + rationale = f"High-risk {proposal.proposal_type.name} proposal" + elif risk > 0.4: + decision = SophiaDecision.ANALYZE + rationale = "Moderate impact - community should decide" + else: + decision = SophiaDecision.ENDORSE + rationale = "Low-risk proposal aligned with community values" + + # Apply evaluation + return self.governance.sophia_evaluate( + proposal_id=proposal_id, + decision=decision, + rationale=rationale, + feasibility_score=1.0 - risk, + risk_level="high" if risk > 0.6 else "medium" if risk > 0.3 else "low", + ) + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN GOVERNANCE ENGINE TEST") + print("=" * 60) + + engine = GovernanceEngine() + sophia = SophiaEvaluator(engine) + + # Create proposal + proposal = engine.create_proposal( + title="Increase Block Reward", + description="Proposal to increase block reward from 1.5 to 2.0 RTC", + proposal_type=ProposalType.MONETARY_POLICY, + proposer="RTC1TestProposer", + ) + + print(f"\nCreated: {proposal.id} - {proposal.title}") + + # Sophia evaluates + evaluation = sophia.evaluate(proposal.id) + print(f"Sophia: {evaluation.decision.name} - {evaluation.rationale}") + + # Cast votes + if proposal.status == ProposalStatus.VOTING: + engine.vote(proposal.id, "RTC1Voter1", True, 1000000) + engine.vote(proposal.id, "RTC1Voter2", True, 500000) + engine.vote(proposal.id, "RTC1Voter3", False, 300000) + + print(f"\nVotes: {proposal.yes_votes} yes, {proposal.no_votes} no") + print(f"Approval: {proposal.approval_percentage:.1%}") diff --git a/rips/rustchain-core/install_testnet.sh b/rips/rustchain-core/install_testnet.sh index d8491421..bd130b2b 100755 --- a/rips/rustchain-core/install_testnet.sh +++ b/rips/rustchain-core/install_testnet.sh @@ -1,215 +1,215 @@ -#!/bin/bash -# -# RustChain Testnet Bootstrap Installer -# ====================================== -# -# "Every vintage computer has historical potential" -# -# This script sets up a RustChain testnet validator node. -# The genesis block was born on a PowerMac G4 Mirror Door -# with 12 hardware entropy sources - TRUE Proof of Antiquity. -# -# Usage: -# curl -sSL https://rustchain.io/install.sh | bash -# OR -# ./install_testnet.sh -# - -set -e - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -PURPLE='\033[0;35m' -CYAN='\033[0;36m' -NC='\033[0m' # No Color - -RUSTCHAIN_DIR="$HOME/.rustchain" -RUSTCHAIN_VERSION="0.1.0-testnet" - -echo "" -echo -e "${PURPLE}╔══════════════════════════════════════════════════════════════════════════════╗${NC}" -echo -e "${PURPLE}║ ║${NC}" -echo -e "${PURPLE}║ ██████╗ ██╗ ██╗███████╗████████╗ ██████╗██╗ ██╗ █████╗ ██╗███╗ ██╗ ║${NC}" -echo -e "${PURPLE}║ ██╔══██╗██║ ██║██╔════╝╚══██╔══╝██╔════╝██║ ██║██╔══██╗██║████╗ ██║ ║${NC}" -echo -e "${PURPLE}║ ██████╔╝██║ ██║███████╗ ██║ ██║ ███████║███████║██║██╔██╗ ██║ ║${NC}" -echo -e "${PURPLE}║ ██╔══██╗██║ ██║╚════██║ ██║ ██║ ██╔══██║██╔══██║██║██║╚██╗██║ ║${NC}" -echo -e "${PURPLE}║ ██║ ██║╚██████╔╝███████║ ██║ ╚██████╗██║ ██║██║ ██║██║██║ ╚████║ ║${NC}" -echo -e "${PURPLE}║ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ║${NC}" -echo -e "${PURPLE}║ ║${NC}" -echo -e "${PURPLE}║ TESTNET BOOTSTRAP INSTALLER ║${NC}" -echo -e "${PURPLE}║ ║${NC}" -echo -e "${PURPLE}║ \"Every vintage computer has historical potential\" ║${NC}" -echo -e "${PURPLE}║ ║${NC}" -echo -e "${PURPLE}║ This is NOT Proof of Work. This is PROOF OF ANTIQUITY. ║${NC}" -echo -e "${PURPLE}║ Buy a $50 vintage PC. Earn rewards. Preserve history. ║${NC}" -echo -e "${PURPLE}║ ║${NC}" -echo -e "${PURPLE}╚══════════════════════════════════════════════════════════════════════════════╝${NC}" -echo "" - -# Check Python -echo -e "${CYAN}[1/6] Checking Python...${NC}" -if command -v python3 &> /dev/null; then - PYTHON_VERSION=$(python3 --version 2>&1 | cut -d' ' -f2) - echo -e " ${GREEN}✓${NC} Python $PYTHON_VERSION found" -else - echo -e " ${RED}✗${NC} Python 3 not found. Please install Python 3.8+" - exit 1 -fi - -# Create directories -echo -e "${CYAN}[2/6] Creating RustChain directory...${NC}" -mkdir -p "$RUSTCHAIN_DIR" -mkdir -p "$RUSTCHAIN_DIR/genesis" -mkdir -p "$RUSTCHAIN_DIR/data" -mkdir -p "$RUSTCHAIN_DIR/logs" -mkdir -p "$RUSTCHAIN_DIR/keys" -echo -e " ${GREEN}✓${NC} Created $RUSTCHAIN_DIR" - -# Download/copy genesis -echo -e "${CYAN}[3/6] Installing genesis block (from PowerMac G4)...${NC}" - -# Check if genesis exists locally -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -if [ -f "$SCRIPT_DIR/genesis/genesis_deep_entropy.json" ]; then - cp "$SCRIPT_DIR/genesis/genesis_deep_entropy.json" "$RUSTCHAIN_DIR/genesis/" - echo -e " ${GREEN}✓${NC} Genesis installed from local package" -else - # Try to download - echo -e " ${YELLOW}!${NC} Genesis not found locally, attempting download..." - # In production, this would download from IPFS or similar - echo -e " ${YELLOW}!${NC} Please manually copy genesis_deep_entropy.json to $RUSTCHAIN_DIR/genesis/" -fi - -# Verify genesis -if [ -f "$RUSTCHAIN_DIR/genesis/genesis_deep_entropy.json" ]; then - GENESIS_SIG=$(grep -o '"signature": "[^"]*"' "$RUSTCHAIN_DIR/genesis/genesis_deep_entropy.json" | head -1) - if [[ "$GENESIS_SIG" == *"PPC-G4-DEEP"* ]]; then - echo -e " ${GREEN}✓${NC} Genesis signature verified: PowerMac G4 Deep Entropy" - else - echo -e " ${YELLOW}!${NC} Genesis signature format unexpected" - fi -fi - -# Copy validator scripts -echo -e "${CYAN}[4/6] Installing validator scripts...${NC}" -if [ -f "$SCRIPT_DIR/validator/setup_validator.py" ]; then - cp -r "$SCRIPT_DIR"/* "$RUSTCHAIN_DIR/node/" 2>/dev/null || true - echo -e " ${GREEN}✓${NC} Validator scripts installed" -else - echo -e " ${YELLOW}!${NC} Validator scripts not found in package" -fi - -# Detect hardware -echo -e "${CYAN}[5/6] Detecting hardware profile...${NC}" - -# Get CPU info -if [ -f /proc/cpuinfo ]; then - CPU_MODEL=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) -elif [ "$(uname)" == "Darwin" ]; then - CPU_MODEL=$(sysctl -n machdep.cpu.brand_string 2>/dev/null || system_profiler SPHardwareDataType | grep "Chip" | head -1 | cut -d':' -f2 | xargs) -else - CPU_MODEL="Unknown" -fi - -# Get RAM -if [ -f /proc/meminfo ]; then - RAM_KB=$(grep "MemTotal" /proc/meminfo | awk '{print $2}') - RAM_MB=$((RAM_KB / 1024)) -elif [ "$(uname)" == "Darwin" ]; then - RAM_BYTES=$(sysctl -n hw.memsize 2>/dev/null || echo 0) - RAM_MB=$((RAM_BYTES / 1024 / 1024)) -else - RAM_MB=0 -fi - -# Determine tier (simplified) -ARCH=$(uname -m) -case "$ARCH" in - "ppc"|"ppc64"|"Power Macintosh") - TIER="vintage" - MULT="2.5x" - ;; - "i386"|"i486"|"i586"|"i686") - TIER="classic" - MULT="2.0x" - ;; - "x86_64"|"amd64") - TIER="modern" - MULT="1.0x" - ;; - "arm64"|"aarch64") - TIER="recent" - MULT="0.5x" - ;; - *) - TIER="unknown" - MULT="1.0x" - ;; -esac - -echo -e " ${GREEN}✓${NC} CPU: $CPU_MODEL" -echo -e " ${GREEN}✓${NC} RAM: ${RAM_MB} MB" -echo -e " ${GREEN}✓${NC} Architecture: $ARCH" -echo -e " ${GREEN}✓${NC} Hardware Tier: ${TIER^^} (${MULT} multiplier)" - -# Save config -echo -e "${CYAN}[6/6] Creating configuration...${NC}" -cat > "$RUSTCHAIN_DIR/config.json" << EOF -{ - "version": "$RUSTCHAIN_VERSION", - "network": "testnet", - "chain_id": 2718, - "genesis_file": "genesis/genesis_deep_entropy.json", - "data_dir": "data", - "log_dir": "logs", - "p2p_port": 9333, - "api_port": 9332, - "bootstrap_nodes": [ - "192.168.0.160:9333", - "192.168.0.125:9333", - "192.168.0.126:9333" - ], - "hardware_profile": { - "cpu_model": "$CPU_MODEL", - "ram_mb": $RAM_MB, - "architecture": "$ARCH", - "tier": "$TIER" - }, - "mining": { - "enabled": false, - "threads": 1 - } -} -EOF -echo -e " ${GREEN}✓${NC} Config saved to $RUSTCHAIN_DIR/config.json" - -# Done! -echo "" -echo -e "${GREEN}═══════════════════════════════════════════════════════════════════════════════${NC}" -echo -e "${GREEN} RUSTCHAIN TESTNET INSTALLATION COMPLETE ${NC}" -echo -e "${GREEN}═══════════════════════════════════════════════════════════════════════════════${NC}" -echo "" -echo -e " Installation directory: ${CYAN}$RUSTCHAIN_DIR${NC}" -echo -e " Network: ${CYAN}RustChain Testnet${NC}" -echo -e " Chain ID: ${CYAN}2718${NC}" -echo -e " Hardware Tier: ${CYAN}${TIER^^}${NC} (${MULT} reward multiplier)" -echo "" -echo -e "${YELLOW}Next Steps:${NC}" -echo "" -echo -e " 1. Register as a validator:" -echo -e " ${CYAN}cd $RUSTCHAIN_DIR && python3 node/validator/setup_validator.py --register${NC}" -echo "" -echo -e " 2. Start your validator node:" -echo -e " ${CYAN}python3 node/validator/setup_validator.py --start${NC}" -echo "" -echo -e " 3. Check your hardware tier:" -echo -e " ${CYAN}python3 node/validator/setup_validator.py --hardware-profile${NC}" -echo "" -echo -e "${PURPLE}═══════════════════════════════════════════════════════════════════════════════${NC}" -echo -e "${PURPLE} \"It's cheaper to buy a \$50 vintage PC than to emulate one\" ${NC}" -echo -e "${PURPLE} Preserve computing history. Earn rewards. Join the revolution. ${NC}" -echo -e "${PURPLE}═══════════════════════════════════════════════════════════════════════════════${NC}" -echo "" +#!/bin/bash +# +# RustChain Testnet Bootstrap Installer +# ====================================== +# +# "Every vintage computer has historical potential" +# +# This script sets up a RustChain testnet validator node. +# The genesis block was born on a PowerMac G4 Mirror Door +# with 12 hardware entropy sources - TRUE Proof of Antiquity. +# +# Usage: +# curl -sSL https://rustchain.io/install.sh | bash +# OR +# ./install_testnet.sh +# + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +PURPLE='\033[0;35m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +RUSTCHAIN_DIR="$HOME/.rustchain" +RUSTCHAIN_VERSION="0.1.0-testnet" + +echo "" +echo -e "${PURPLE}╔══════════════════════════════════════════════════════════════════════════════╗${NC}" +echo -e "${PURPLE}║ ║${NC}" +echo -e "${PURPLE}║ ██████╗ ██╗ ██╗███████╗████████╗ ██████╗██╗ ██╗ █████╗ ██╗███╗ ██╗ ║${NC}" +echo -e "${PURPLE}║ ██╔══██╗██║ ██║██╔════╝╚══██╔══╝██╔════╝██║ ██║██╔══██╗██║████╗ ██║ ║${NC}" +echo -e "${PURPLE}║ ██████╔╝██║ ██║███████╗ ██║ ██║ ███████║███████║██║██╔██╗ ██║ ║${NC}" +echo -e "${PURPLE}║ ██╔══██╗██║ ██║╚════██║ ██║ ██║ ██╔══██║██╔══██║██║██║╚██╗██║ ║${NC}" +echo -e "${PURPLE}║ ██║ ██║╚██████╔╝███████║ ██║ ╚██████╗██║ ██║██║ ██║██║██║ ╚████║ ║${NC}" +echo -e "${PURPLE}║ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ║${NC}" +echo -e "${PURPLE}║ ║${NC}" +echo -e "${PURPLE}║ TESTNET BOOTSTRAP INSTALLER ║${NC}" +echo -e "${PURPLE}║ ║${NC}" +echo -e "${PURPLE}║ \"Every vintage computer has historical potential\" ║${NC}" +echo -e "${PURPLE}║ ║${NC}" +echo -e "${PURPLE}║ This is NOT Proof of Work. This is PROOF OF ANTIQUITY. ║${NC}" +echo -e "${PURPLE}║ Buy a $50 vintage PC. Earn rewards. Preserve history. ║${NC}" +echo -e "${PURPLE}║ ║${NC}" +echo -e "${PURPLE}╚══════════════════════════════════════════════════════════════════════════════╝${NC}" +echo "" + +# Check Python +echo -e "${CYAN}[1/6] Checking Python...${NC}" +if command -v python3 &> /dev/null; then + PYTHON_VERSION=$(python3 --version 2>&1 | cut -d' ' -f2) + echo -e " ${GREEN}✓${NC} Python $PYTHON_VERSION found" +else + echo -e " ${RED}✗${NC} Python 3 not found. Please install Python 3.8+" + exit 1 +fi + +# Create directories +echo -e "${CYAN}[2/6] Creating RustChain directory...${NC}" +mkdir -p "$RUSTCHAIN_DIR" +mkdir -p "$RUSTCHAIN_DIR/genesis" +mkdir -p "$RUSTCHAIN_DIR/data" +mkdir -p "$RUSTCHAIN_DIR/logs" +mkdir -p "$RUSTCHAIN_DIR/keys" +echo -e " ${GREEN}✓${NC} Created $RUSTCHAIN_DIR" + +# Download/copy genesis +echo -e "${CYAN}[3/6] Installing genesis block (from PowerMac G4)...${NC}" + +# Check if genesis exists locally +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +if [ -f "$SCRIPT_DIR/genesis/genesis_deep_entropy.json" ]; then + cp "$SCRIPT_DIR/genesis/genesis_deep_entropy.json" "$RUSTCHAIN_DIR/genesis/" + echo -e " ${GREEN}✓${NC} Genesis installed from local package" +else + # Try to download + echo -e " ${YELLOW}!${NC} Genesis not found locally, attempting download..." + # In production, this would download from IPFS or similar + echo -e " ${YELLOW}!${NC} Please manually copy genesis_deep_entropy.json to $RUSTCHAIN_DIR/genesis/" +fi + +# Verify genesis +if [ -f "$RUSTCHAIN_DIR/genesis/genesis_deep_entropy.json" ]; then + GENESIS_SIG=$(grep -o '"signature": "[^"]*"' "$RUSTCHAIN_DIR/genesis/genesis_deep_entropy.json" | head -1) + if [[ "$GENESIS_SIG" == *"PPC-G4-DEEP"* ]]; then + echo -e " ${GREEN}✓${NC} Genesis signature verified: PowerMac G4 Deep Entropy" + else + echo -e " ${YELLOW}!${NC} Genesis signature format unexpected" + fi +fi + +# Copy validator scripts +echo -e "${CYAN}[4/6] Installing validator scripts...${NC}" +if [ -f "$SCRIPT_DIR/validator/setup_validator.py" ]; then + cp -r "$SCRIPT_DIR"/* "$RUSTCHAIN_DIR/node/" 2>/dev/null || true + echo -e " ${GREEN}✓${NC} Validator scripts installed" +else + echo -e " ${YELLOW}!${NC} Validator scripts not found in package" +fi + +# Detect hardware +echo -e "${CYAN}[5/6] Detecting hardware profile...${NC}" + +# Get CPU info +if [ -f /proc/cpuinfo ]; then + CPU_MODEL=$(grep "model name" /proc/cpuinfo | head -1 | cut -d':' -f2 | xargs) +elif [ "$(uname)" == "Darwin" ]; then + CPU_MODEL=$(sysctl -n machdep.cpu.brand_string 2>/dev/null || system_profiler SPHardwareDataType | grep "Chip" | head -1 | cut -d':' -f2 | xargs) +else + CPU_MODEL="Unknown" +fi + +# Get RAM +if [ -f /proc/meminfo ]; then + RAM_KB=$(grep "MemTotal" /proc/meminfo | awk '{print $2}') + RAM_MB=$((RAM_KB / 1024)) +elif [ "$(uname)" == "Darwin" ]; then + RAM_BYTES=$(sysctl -n hw.memsize 2>/dev/null || echo 0) + RAM_MB=$((RAM_BYTES / 1024 / 1024)) +else + RAM_MB=0 +fi + +# Determine tier (simplified) +ARCH=$(uname -m) +case "$ARCH" in + "ppc"|"ppc64"|"Power Macintosh") + TIER="vintage" + MULT="2.5x" + ;; + "i386"|"i486"|"i586"|"i686") + TIER="classic" + MULT="2.0x" + ;; + "x86_64"|"amd64") + TIER="modern" + MULT="1.0x" + ;; + "arm64"|"aarch64") + TIER="recent" + MULT="0.5x" + ;; + *) + TIER="unknown" + MULT="1.0x" + ;; +esac + +echo -e " ${GREEN}✓${NC} CPU: $CPU_MODEL" +echo -e " ${GREEN}✓${NC} RAM: ${RAM_MB} MB" +echo -e " ${GREEN}✓${NC} Architecture: $ARCH" +echo -e " ${GREEN}✓${NC} Hardware Tier: ${TIER^^} (${MULT} multiplier)" + +# Save config +echo -e "${CYAN}[6/6] Creating configuration...${NC}" +cat > "$RUSTCHAIN_DIR/config.json" << EOF +{ + "version": "$RUSTCHAIN_VERSION", + "network": "testnet", + "chain_id": 2718, + "genesis_file": "genesis/genesis_deep_entropy.json", + "data_dir": "data", + "log_dir": "logs", + "p2p_port": 9333, + "api_port": 9332, + "bootstrap_nodes": [ + "192.168.0.160:9333", + "192.168.0.125:9333", + "192.168.0.126:9333" + ], + "hardware_profile": { + "cpu_model": "$CPU_MODEL", + "ram_mb": $RAM_MB, + "architecture": "$ARCH", + "tier": "$TIER" + }, + "mining": { + "enabled": false, + "threads": 1 + } +} +EOF +echo -e " ${GREEN}✓${NC} Config saved to $RUSTCHAIN_DIR/config.json" + +# Done! +echo "" +echo -e "${GREEN}═══════════════════════════════════════════════════════════════════════════════${NC}" +echo -e "${GREEN} RUSTCHAIN TESTNET INSTALLATION COMPLETE ${NC}" +echo -e "${GREEN}═══════════════════════════════════════════════════════════════════════════════${NC}" +echo "" +echo -e " Installation directory: ${CYAN}$RUSTCHAIN_DIR${NC}" +echo -e " Network: ${CYAN}RustChain Testnet${NC}" +echo -e " Chain ID: ${CYAN}2718${NC}" +echo -e " Hardware Tier: ${CYAN}${TIER^^}${NC} (${MULT} reward multiplier)" +echo "" +echo -e "${YELLOW}Next Steps:${NC}" +echo "" +echo -e " 1. Register as a validator:" +echo -e " ${CYAN}cd $RUSTCHAIN_DIR && python3 node/validator/setup_validator.py --register${NC}" +echo "" +echo -e " 2. Start your validator node:" +echo -e " ${CYAN}python3 node/validator/setup_validator.py --start${NC}" +echo "" +echo -e " 3. Check your hardware tier:" +echo -e " ${CYAN}python3 node/validator/setup_validator.py --hardware-profile${NC}" +echo "" +echo -e "${PURPLE}═══════════════════════════════════════════════════════════════════════════════${NC}" +echo -e "${PURPLE} \"It's cheaper to buy a \$50 vintage PC than to emulate one\" ${NC}" +echo -e "${PURPLE} Preserve computing history. Earn rewards. Join the revolution. ${NC}" +echo -e "${PURPLE}═══════════════════════════════════════════════════════════════════════════════${NC}" +echo "" diff --git a/rips/rustchain-core/ledger/utxo_ledger.py b/rips/rustchain-core/ledger/utxo_ledger.py index bf775ccd..ad22ef9c 100644 --- a/rips/rustchain-core/ledger/utxo_ledger.py +++ b/rips/rustchain-core/ledger/utxo_ledger.py @@ -1,530 +1,530 @@ -""" -RustChain UTXO Ledger (Ergo-Compatible) -======================================= - -Implements an Ergo-style UTXO (Unspent Transaction Output) model. - -Security Principles: -- All inputs must be validated before spending -- Double-spend prevention via UTXO consumption -- Cryptographic proofs for ownership -- Immutable transaction history - -Why UTXO over Account Model: -- Better parallelization for validation -- Simpler state verification -- Enhanced privacy (fresh addresses per tx) -- Cleaner audit trail -""" - -import hashlib -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Set, Tuple, Any -from enum import Enum - -from ..config.chain_params import ONE_RTC, GENESIS_HASH - - -# ============================================================================= -# UTXO Box (Ergo-Compatible) -# ============================================================================= - -@dataclass -class Box: - """ - UTXO Box - the fundamental unit of value in RustChain. - - Inspired by Ergo's box model: - - Each box has a unique ID - - Contains value (RTC) and optional tokens - - Protected by a spending condition (proposition) - - Immutable once created, can only be spent (destroyed) - """ - box_id: bytes # 32-byte unique identifier - value: int # Value in smallest units (nanoRTC) - proposition_bytes: bytes # Spending condition (simplified ErgoTree) - creation_height: int # Block height when created - transaction_id: bytes # TX that created this box - output_index: int # Index in transaction outputs - - # Additional data - tokens: List[Tuple[bytes, int]] = field(default_factory=list) # (token_id, amount) - registers: Dict[str, bytes] = field(default_factory=dict) # R4-R9 - - def __post_init__(self): - if not self.box_id: - self.box_id = self._compute_id() - - def _compute_id(self) -> bytes: - """Compute unique box ID from contents""" - hasher = hashlib.sha256() - hasher.update(self.value.to_bytes(8, 'little')) - hasher.update(self.proposition_bytes) - hasher.update(self.creation_height.to_bytes(8, 'little')) - hasher.update(self.transaction_id) - hasher.update(self.output_index.to_bytes(2, 'little')) - return hasher.digest() - - @staticmethod - def p2pk_proposition(public_key: bytes) -> bytes: - """Create Pay-to-Public-Key proposition""" - # Simplified: real impl would be proper ErgoTree encoding - return b'\x00\x08' + public_key - - @staticmethod - def wallet_to_proposition(wallet_address: str) -> bytes: - """Convert RustChain wallet address to proposition""" - return Box.p2pk_proposition(wallet_address.encode()) - - -# ============================================================================= -# Transaction Types -# ============================================================================= - -class TransactionType(Enum): - """Transaction types in RustChain""" - TRANSFER = "transfer" - MINING_REWARD = "mining_reward" - BADGE_MINT = "badge_mint" - GOVERNANCE_VOTE = "governance_vote" - CONTRACT_CALL = "contract_call" - - -@dataclass -class TransactionInput: - """Reference to a box being spent""" - box_id: bytes # ID of box being spent - spending_proof: bytes # Proof that authorizes spending - extension: Dict[str, bytes] = field(default_factory=dict) - - -@dataclass -class Transaction: - """ - UTXO Transaction - - Security Model: - - All inputs must exist in UTXO set - - All inputs must have valid spending proofs - - Sum(outputs) + fee <= Sum(inputs) - - No double-spending (atomic consumption) - """ - tx_id: bytes = field(default=b'') - tx_type: TransactionType = TransactionType.TRANSFER - inputs: List[TransactionInput] = field(default_factory=list) - outputs: List[Box] = field(default_factory=list) - data_inputs: List[bytes] = field(default_factory=list) # Read-only inputs - timestamp: int = 0 - fee: int = 0 - - def __post_init__(self): - if not self.tx_id: - self.tx_id = self._compute_id() - if not self.timestamp: - self.timestamp = int(time.time()) - - def _compute_id(self) -> bytes: - """Compute transaction ID""" - hasher = hashlib.sha256() - for inp in self.inputs: - hasher.update(inp.box_id) - for out in self.outputs: - hasher.update(out.box_id) - hasher.update(self.timestamp.to_bytes(8, 'little')) - return hasher.digest() - - def total_input_value(self, utxo_set: 'UtxoSet') -> int: - """Calculate total value of inputs""" - total = 0 - for inp in self.inputs: - box = utxo_set.get_box(inp.box_id) - if box: - total += box.value - return total - - def total_output_value(self) -> int: - """Calculate total value of outputs""" - return sum(out.value for out in self.outputs) - - @classmethod - def mining_reward( - cls, - miner_wallet: str, - reward_amount: int, - block_height: int, - antiquity_score: float, - hardware_model: str, - ) -> 'Transaction': - """Create a mining reward transaction (coinbase)""" - output = Box( - box_id=b'', - value=reward_amount, - proposition_bytes=Box.wallet_to_proposition(miner_wallet), - creation_height=block_height, - transaction_id=b'\x00' * 32, # Genesis/coinbase marker - output_index=0, - registers={ - 'R4': int(antiquity_score * 100).to_bytes(8, 'little'), - 'R5': hardware_model.encode()[:32], - } - ) - - return cls( - tx_type=TransactionType.MINING_REWARD, - inputs=[], # Coinbase has no inputs - outputs=[output], - fee=0, - ) - - -# ============================================================================= -# UTXO Set -# ============================================================================= - -class UtxoSet: - """ - Unspent Transaction Output Set - - Security Features: - - Atomic updates (spend + create in single operation) - - Double-spend prevention - - Efficient balance queries - - Merkle proof support for light clients - """ - - def __init__(self): - self._boxes: Dict[bytes, Box] = {} - self._by_address: Dict[str, Set[bytes]] = {} - self._spent: Set[bytes] = set() # Track spent boxes for history - - def add_box(self, box: Box, owner_address: str): - """Add a box to the UTXO set""" - if box.box_id in self._boxes: - raise ValueError(f"Box {box.box_id.hex()[:16]} already exists") - - self._boxes[box.box_id] = box - - if owner_address not in self._by_address: - self._by_address[owner_address] = set() - self._by_address[owner_address].add(box.box_id) - - def spend_box(self, box_id: bytes) -> Optional[Box]: - """ - Spend (remove) a box from the UTXO set. - - Security: Once spent, a box cannot be re-added. - """ - if box_id in self._spent: - raise ValueError(f"Double-spend attempt: {box_id.hex()[:16]}") - - if box_id not in self._boxes: - return None - - box = self._boxes.pop(box_id) - self._spent.add(box_id) - - # Remove from address index - for addr_boxes in self._by_address.values(): - addr_boxes.discard(box_id) - - return box - - def get_box(self, box_id: bytes) -> Optional[Box]: - """Get a box by ID""" - return self._boxes.get(box_id) - - def get_boxes_for_address(self, address: str) -> List[Box]: - """Get all unspent boxes for an address""" - box_ids = self._by_address.get(address, set()) - return [self._boxes[bid] for bid in box_ids if bid in self._boxes] - - def get_balance(self, address: str) -> int: - """Get total balance for an address""" - return sum(box.value for box in self.get_boxes_for_address(address)) - - def apply_transaction(self, tx: Transaction, block_height: int) -> bool: - """ - Atomically apply a transaction. - - Security: Either all inputs are spent and all outputs created, - or nothing changes (atomic operation). - - Args: - tx: Transaction to apply - block_height: Current block height - - Returns: - True if successful, False if validation fails - """ - # Validate: all inputs must exist and not be spent - input_boxes = [] - for inp in tx.inputs: - box = self.get_box(inp.box_id) - if not box: - return False # Input doesn't exist - input_boxes.append(box) - - # Validate: outputs don't exceed inputs (except for coinbase) - if tx.inputs: # Not coinbase - total_in = sum(b.value for b in input_boxes) - total_out = tx.total_output_value() + tx.fee - if total_out > total_in: - return False # Spending more than available - - # Atomic application: spend inputs, create outputs - spent_boxes = [] - try: - # Spend all inputs - for inp in tx.inputs: - spent = self.spend_box(inp.box_id) - if not spent: - raise ValueError("Failed to spend input") - spent_boxes.append(spent) - - # Create all outputs - for idx, output in enumerate(tx.outputs): - output.transaction_id = tx.tx_id - output.output_index = idx - output.creation_height = block_height - - # Derive owner address from proposition - owner = self._proposition_to_address(output.proposition_bytes) - self.add_box(output, owner) - - return True - - except Exception as e: - # Rollback on failure (restore spent boxes) - # In production, this would be more sophisticated - print(f"Transaction failed: {e}") - return False - - def _proposition_to_address(self, prop: bytes) -> str: - """Convert proposition bytes back to address (simplified)""" - if prop.startswith(b'\x00\x08'): - return prop[2:].decode('utf-8', errors='ignore') - return f"RTC_UNKNOWN_{prop[:8].hex()}" - - def compute_state_root(self) -> bytes: - """ - Compute Merkle root of all UTXOs. - - Used for: - - State commitment in block headers - - Light client verification - - Cross-chain proofs - """ - if not self._boxes: - return hashlib.sha256(b"empty").digest() - - # Sort box IDs for deterministic ordering - sorted_ids = sorted(self._boxes.keys()) - hashes = [hashlib.sha256(bid).digest() for bid in sorted_ids] - - # Build Merkle tree - while len(hashes) > 1: - if len(hashes) % 2 == 1: - hashes.append(hashes[-1]) - hashes = [ - hashlib.sha256(hashes[i] + hashes[i+1]).digest() - for i in range(0, len(hashes), 2) - ] - - return hashes[0] - - -# ============================================================================= -# Transaction Pool (Mempool) -# ============================================================================= - -class TransactionPool: - """ - In-memory pool of pending transactions. - - Security Features: - - Fee-based prioritization - - Double-spend prevention - - Size limits to prevent DoS - - Expiration of old transactions - """ - - MAX_POOL_SIZE = 10_000 - MAX_TX_AGE_SECONDS = 3600 # 1 hour - - def __init__(self, utxo_set: UtxoSet): - self._pending: Dict[bytes, Transaction] = {} - self._by_input: Dict[bytes, bytes] = {} # input_box_id -> tx_id - self._utxo_set = utxo_set - - def add_transaction(self, tx: Transaction) -> bool: - """ - Add transaction to the pool. - - Validates: - - Transaction is well-formed - - All inputs exist in UTXO set - - No double-spending within pool - - Fee is sufficient - """ - # Check pool size - if len(self._pending) >= self.MAX_POOL_SIZE: - return False - - # Check for existing tx - if tx.tx_id in self._pending: - return False - - # Check for double-spend within pool - for inp in tx.inputs: - if inp.box_id in self._by_input: - return False - if not self._utxo_set.get_box(inp.box_id): - return False - - # Add to pool - self._pending[tx.tx_id] = tx - for inp in tx.inputs: - self._by_input[inp.box_id] = tx.tx_id - - return True - - def remove_transaction(self, tx_id: bytes) -> Optional[Transaction]: - """Remove transaction from pool""" - tx = self._pending.pop(tx_id, None) - if tx: - for inp in tx.inputs: - self._by_input.pop(inp.box_id, None) - return tx - - def get_transactions_for_block(self, max_count: int = 100) -> List[Transaction]: - """Get highest-priority transactions for block inclusion""" - # Sort by fee (highest first) - sorted_txs = sorted( - self._pending.values(), - key=lambda t: t.fee, - reverse=True - ) - return sorted_txs[:max_count] - - def clear_expired(self): - """Remove expired transactions""" - now = int(time.time()) - expired = [ - tx_id for tx_id, tx in self._pending.items() - if now - tx.timestamp > self.MAX_TX_AGE_SECONDS - ] - for tx_id in expired: - self.remove_transaction(tx_id) - - -# ============================================================================= -# Balance Tracker (Convenience Layer) -# ============================================================================= - -class BalanceTracker: - """High-level balance tracking built on UTXO set""" - - def __init__(self, utxo_set: UtxoSet): - self._utxo_set = utxo_set - - def get_balance(self, address: str) -> Dict[str, Any]: - """Get detailed balance for an address""" - boxes = self._utxo_set.get_boxes_for_address(address) - total = sum(b.value for b in boxes) - - # Collect tokens - tokens: Dict[bytes, int] = {} - for box in boxes: - for token_id, amount in box.tokens: - tokens[token_id] = tokens.get(token_id, 0) + amount - - return { - "address": address, - "balance_nano": total, - "balance_rtc": total / ONE_RTC, - "utxo_count": len(boxes), - "tokens": {tid.hex(): amt for tid, amt in tokens.items()}, - } - - def transfer( - self, - from_address: str, - to_address: str, - amount: int, - fee: int = 1000, # Default 0.00001 RTC - ) -> Optional[Transaction]: - """ - Create a transfer transaction. - - Selects UTXOs to cover amount + fee, creates change output. - """ - boxes = self._utxo_set.get_boxes_for_address(from_address) - available = sum(b.value for b in boxes) - - if available < amount + fee: - return None # Insufficient funds - - # Select inputs (simple: use all boxes, create change) - inputs = [ - TransactionInput(box_id=b.box_id, spending_proof=b'\x00') - for b in boxes - ] - - # Create outputs - outputs = [ - Box( - box_id=b'', - value=amount, - proposition_bytes=Box.wallet_to_proposition(to_address), - creation_height=0, - transaction_id=b'', - output_index=0, - ) - ] - - # Change output - change = available - amount - fee - if change > 0: - outputs.append(Box( - box_id=b'', - value=change, - proposition_bytes=Box.wallet_to_proposition(from_address), - creation_height=0, - transaction_id=b'', - output_index=1, - )) - - return Transaction( - tx_type=TransactionType.TRANSFER, - inputs=inputs, - outputs=outputs, - fee=fee, - ) - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN UTXO LEDGER TEST") - print("=" * 60) - - utxo = UtxoSet() - - # Simulate mining reward - tx = Transaction.mining_reward( - miner_wallet="RTC1TestMiner", - reward_amount=150_000_000, # 1.5 RTC - block_height=1, - antiquity_score=75.5, - hardware_model="486DX2-66", - ) - - utxo.apply_transaction(tx, block_height=1) - - balance = BalanceTracker(utxo).get_balance("RTC1TestMiner") - print(f"Miner balance: {balance['balance_rtc']} RTC") - print(f"UTXO count: {balance['utxo_count']}") +""" +RustChain UTXO Ledger (Ergo-Compatible) +======================================= + +Implements an Ergo-style UTXO (Unspent Transaction Output) model. + +Security Principles: +- All inputs must be validated before spending +- Double-spend prevention via UTXO consumption +- Cryptographic proofs for ownership +- Immutable transaction history + +Why UTXO over Account Model: +- Better parallelization for validation +- Simpler state verification +- Enhanced privacy (fresh addresses per tx) +- Cleaner audit trail +""" + +import hashlib +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Set, Tuple, Any +from enum import Enum + +from ..config.chain_params import ONE_RTC, GENESIS_HASH + + +# ============================================================================= +# UTXO Box (Ergo-Compatible) +# ============================================================================= + +@dataclass +class Box: + """ + UTXO Box - the fundamental unit of value in RustChain. + + Inspired by Ergo's box model: + - Each box has a unique ID + - Contains value (RTC) and optional tokens + - Protected by a spending condition (proposition) + - Immutable once created, can only be spent (destroyed) + """ + box_id: bytes # 32-byte unique identifier + value: int # Value in smallest units (nanoRTC) + proposition_bytes: bytes # Spending condition (simplified ErgoTree) + creation_height: int # Block height when created + transaction_id: bytes # TX that created this box + output_index: int # Index in transaction outputs + + # Additional data + tokens: List[Tuple[bytes, int]] = field(default_factory=list) # (token_id, amount) + registers: Dict[str, bytes] = field(default_factory=dict) # R4-R9 + + def __post_init__(self): + if not self.box_id: + self.box_id = self._compute_id() + + def _compute_id(self) -> bytes: + """Compute unique box ID from contents""" + hasher = hashlib.sha256() + hasher.update(self.value.to_bytes(8, 'little')) + hasher.update(self.proposition_bytes) + hasher.update(self.creation_height.to_bytes(8, 'little')) + hasher.update(self.transaction_id) + hasher.update(self.output_index.to_bytes(2, 'little')) + return hasher.digest() + + @staticmethod + def p2pk_proposition(public_key: bytes) -> bytes: + """Create Pay-to-Public-Key proposition""" + # Simplified: real impl would be proper ErgoTree encoding + return b'\x00\x08' + public_key + + @staticmethod + def wallet_to_proposition(wallet_address: str) -> bytes: + """Convert RustChain wallet address to proposition""" + return Box.p2pk_proposition(wallet_address.encode()) + + +# ============================================================================= +# Transaction Types +# ============================================================================= + +class TransactionType(Enum): + """Transaction types in RustChain""" + TRANSFER = "transfer" + MINING_REWARD = "mining_reward" + BADGE_MINT = "badge_mint" + GOVERNANCE_VOTE = "governance_vote" + CONTRACT_CALL = "contract_call" + + +@dataclass +class TransactionInput: + """Reference to a box being spent""" + box_id: bytes # ID of box being spent + spending_proof: bytes # Proof that authorizes spending + extension: Dict[str, bytes] = field(default_factory=dict) + + +@dataclass +class Transaction: + """ + UTXO Transaction + + Security Model: + - All inputs must exist in UTXO set + - All inputs must have valid spending proofs + - Sum(outputs) + fee <= Sum(inputs) + - No double-spending (atomic consumption) + """ + tx_id: bytes = field(default=b'') + tx_type: TransactionType = TransactionType.TRANSFER + inputs: List[TransactionInput] = field(default_factory=list) + outputs: List[Box] = field(default_factory=list) + data_inputs: List[bytes] = field(default_factory=list) # Read-only inputs + timestamp: int = 0 + fee: int = 0 + + def __post_init__(self): + if not self.tx_id: + self.tx_id = self._compute_id() + if not self.timestamp: + self.timestamp = int(time.time()) + + def _compute_id(self) -> bytes: + """Compute transaction ID""" + hasher = hashlib.sha256() + for inp in self.inputs: + hasher.update(inp.box_id) + for out in self.outputs: + hasher.update(out.box_id) + hasher.update(self.timestamp.to_bytes(8, 'little')) + return hasher.digest() + + def total_input_value(self, utxo_set: 'UtxoSet') -> int: + """Calculate total value of inputs""" + total = 0 + for inp in self.inputs: + box = utxo_set.get_box(inp.box_id) + if box: + total += box.value + return total + + def total_output_value(self) -> int: + """Calculate total value of outputs""" + return sum(out.value for out in self.outputs) + + @classmethod + def mining_reward( + cls, + miner_wallet: str, + reward_amount: int, + block_height: int, + antiquity_score: float, + hardware_model: str, + ) -> 'Transaction': + """Create a mining reward transaction (coinbase)""" + output = Box( + box_id=b'', + value=reward_amount, + proposition_bytes=Box.wallet_to_proposition(miner_wallet), + creation_height=block_height, + transaction_id=b'\x00' * 32, # Genesis/coinbase marker + output_index=0, + registers={ + 'R4': int(antiquity_score * 100).to_bytes(8, 'little'), + 'R5': hardware_model.encode()[:32], + } + ) + + return cls( + tx_type=TransactionType.MINING_REWARD, + inputs=[], # Coinbase has no inputs + outputs=[output], + fee=0, + ) + + +# ============================================================================= +# UTXO Set +# ============================================================================= + +class UtxoSet: + """ + Unspent Transaction Output Set + + Security Features: + - Atomic updates (spend + create in single operation) + - Double-spend prevention + - Efficient balance queries + - Merkle proof support for light clients + """ + + def __init__(self): + self._boxes: Dict[bytes, Box] = {} + self._by_address: Dict[str, Set[bytes]] = {} + self._spent: Set[bytes] = set() # Track spent boxes for history + + def add_box(self, box: Box, owner_address: str): + """Add a box to the UTXO set""" + if box.box_id in self._boxes: + raise ValueError(f"Box {box.box_id.hex()[:16]} already exists") + + self._boxes[box.box_id] = box + + if owner_address not in self._by_address: + self._by_address[owner_address] = set() + self._by_address[owner_address].add(box.box_id) + + def spend_box(self, box_id: bytes) -> Optional[Box]: + """ + Spend (remove) a box from the UTXO set. + + Security: Once spent, a box cannot be re-added. + """ + if box_id in self._spent: + raise ValueError(f"Double-spend attempt: {box_id.hex()[:16]}") + + if box_id not in self._boxes: + return None + + box = self._boxes.pop(box_id) + self._spent.add(box_id) + + # Remove from address index + for addr_boxes in self._by_address.values(): + addr_boxes.discard(box_id) + + return box + + def get_box(self, box_id: bytes) -> Optional[Box]: + """Get a box by ID""" + return self._boxes.get(box_id) + + def get_boxes_for_address(self, address: str) -> List[Box]: + """Get all unspent boxes for an address""" + box_ids = self._by_address.get(address, set()) + return [self._boxes[bid] for bid in box_ids if bid in self._boxes] + + def get_balance(self, address: str) -> int: + """Get total balance for an address""" + return sum(box.value for box in self.get_boxes_for_address(address)) + + def apply_transaction(self, tx: Transaction, block_height: int) -> bool: + """ + Atomically apply a transaction. + + Security: Either all inputs are spent and all outputs created, + or nothing changes (atomic operation). + + Args: + tx: Transaction to apply + block_height: Current block height + + Returns: + True if successful, False if validation fails + """ + # Validate: all inputs must exist and not be spent + input_boxes = [] + for inp in tx.inputs: + box = self.get_box(inp.box_id) + if not box: + return False # Input doesn't exist + input_boxes.append(box) + + # Validate: outputs don't exceed inputs (except for coinbase) + if tx.inputs: # Not coinbase + total_in = sum(b.value for b in input_boxes) + total_out = tx.total_output_value() + tx.fee + if total_out > total_in: + return False # Spending more than available + + # Atomic application: spend inputs, create outputs + spent_boxes = [] + try: + # Spend all inputs + for inp in tx.inputs: + spent = self.spend_box(inp.box_id) + if not spent: + raise ValueError("Failed to spend input") + spent_boxes.append(spent) + + # Create all outputs + for idx, output in enumerate(tx.outputs): + output.transaction_id = tx.tx_id + output.output_index = idx + output.creation_height = block_height + + # Derive owner address from proposition + owner = self._proposition_to_address(output.proposition_bytes) + self.add_box(output, owner) + + return True + + except Exception as e: + # Rollback on failure (restore spent boxes) + # In production, this would be more sophisticated + print(f"Transaction failed: {e}") + return False + + def _proposition_to_address(self, prop: bytes) -> str: + """Convert proposition bytes back to address (simplified)""" + if prop.startswith(b'\x00\x08'): + return prop[2:].decode('utf-8', errors='ignore') + return f"RTC_UNKNOWN_{prop[:8].hex()}" + + def compute_state_root(self) -> bytes: + """ + Compute Merkle root of all UTXOs. + + Used for: + - State commitment in block headers + - Light client verification + - Cross-chain proofs + """ + if not self._boxes: + return hashlib.sha256(b"empty").digest() + + # Sort box IDs for deterministic ordering + sorted_ids = sorted(self._boxes.keys()) + hashes = [hashlib.sha256(bid).digest() for bid in sorted_ids] + + # Build Merkle tree + while len(hashes) > 1: + if len(hashes) % 2 == 1: + hashes.append(hashes[-1]) + hashes = [ + hashlib.sha256(hashes[i] + hashes[i+1]).digest() + for i in range(0, len(hashes), 2) + ] + + return hashes[0] + + +# ============================================================================= +# Transaction Pool (Mempool) +# ============================================================================= + +class TransactionPool: + """ + In-memory pool of pending transactions. + + Security Features: + - Fee-based prioritization + - Double-spend prevention + - Size limits to prevent DoS + - Expiration of old transactions + """ + + MAX_POOL_SIZE = 10_000 + MAX_TX_AGE_SECONDS = 3600 # 1 hour + + def __init__(self, utxo_set: UtxoSet): + self._pending: Dict[bytes, Transaction] = {} + self._by_input: Dict[bytes, bytes] = {} # input_box_id -> tx_id + self._utxo_set = utxo_set + + def add_transaction(self, tx: Transaction) -> bool: + """ + Add transaction to the pool. + + Validates: + - Transaction is well-formed + - All inputs exist in UTXO set + - No double-spending within pool + - Fee is sufficient + """ + # Check pool size + if len(self._pending) >= self.MAX_POOL_SIZE: + return False + + # Check for existing tx + if tx.tx_id in self._pending: + return False + + # Check for double-spend within pool + for inp in tx.inputs: + if inp.box_id in self._by_input: + return False + if not self._utxo_set.get_box(inp.box_id): + return False + + # Add to pool + self._pending[tx.tx_id] = tx + for inp in tx.inputs: + self._by_input[inp.box_id] = tx.tx_id + + return True + + def remove_transaction(self, tx_id: bytes) -> Optional[Transaction]: + """Remove transaction from pool""" + tx = self._pending.pop(tx_id, None) + if tx: + for inp in tx.inputs: + self._by_input.pop(inp.box_id, None) + return tx + + def get_transactions_for_block(self, max_count: int = 100) -> List[Transaction]: + """Get highest-priority transactions for block inclusion""" + # Sort by fee (highest first) + sorted_txs = sorted( + self._pending.values(), + key=lambda t: t.fee, + reverse=True + ) + return sorted_txs[:max_count] + + def clear_expired(self): + """Remove expired transactions""" + now = int(time.time()) + expired = [ + tx_id for tx_id, tx in self._pending.items() + if now - tx.timestamp > self.MAX_TX_AGE_SECONDS + ] + for tx_id in expired: + self.remove_transaction(tx_id) + + +# ============================================================================= +# Balance Tracker (Convenience Layer) +# ============================================================================= + +class BalanceTracker: + """High-level balance tracking built on UTXO set""" + + def __init__(self, utxo_set: UtxoSet): + self._utxo_set = utxo_set + + def get_balance(self, address: str) -> Dict[str, Any]: + """Get detailed balance for an address""" + boxes = self._utxo_set.get_boxes_for_address(address) + total = sum(b.value for b in boxes) + + # Collect tokens + tokens: Dict[bytes, int] = {} + for box in boxes: + for token_id, amount in box.tokens: + tokens[token_id] = tokens.get(token_id, 0) + amount + + return { + "address": address, + "balance_nano": total, + "balance_rtc": total / ONE_RTC, + "utxo_count": len(boxes), + "tokens": {tid.hex(): amt for tid, amt in tokens.items()}, + } + + def transfer( + self, + from_address: str, + to_address: str, + amount: int, + fee: int = 1000, # Default 0.00001 RTC + ) -> Optional[Transaction]: + """ + Create a transfer transaction. + + Selects UTXOs to cover amount + fee, creates change output. + """ + boxes = self._utxo_set.get_boxes_for_address(from_address) + available = sum(b.value for b in boxes) + + if available < amount + fee: + return None # Insufficient funds + + # Select inputs (simple: use all boxes, create change) + inputs = [ + TransactionInput(box_id=b.box_id, spending_proof=b'\x00') + for b in boxes + ] + + # Create outputs + outputs = [ + Box( + box_id=b'', + value=amount, + proposition_bytes=Box.wallet_to_proposition(to_address), + creation_height=0, + transaction_id=b'', + output_index=0, + ) + ] + + # Change output + change = available - amount - fee + if change > 0: + outputs.append(Box( + box_id=b'', + value=change, + proposition_bytes=Box.wallet_to_proposition(from_address), + creation_height=0, + transaction_id=b'', + output_index=1, + )) + + return Transaction( + tx_type=TransactionType.TRANSFER, + inputs=inputs, + outputs=outputs, + fee=fee, + ) + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN UTXO LEDGER TEST") + print("=" * 60) + + utxo = UtxoSet() + + # Simulate mining reward + tx = Transaction.mining_reward( + miner_wallet="RTC1TestMiner", + reward_amount=150_000_000, # 1.5 RTC + block_height=1, + antiquity_score=75.5, + hardware_model="486DX2-66", + ) + + utxo.apply_transaction(tx, block_height=1) + + balance = BalanceTracker(utxo).get_balance("RTC1TestMiner") + print(f"Miner balance: {balance['balance_rtc']} RTC") + print(f"UTXO count: {balance['utxo_count']}") diff --git a/rips/rustchain-core/main.py b/rips/rustchain-core/main.py index 3adebdd4..d49dec75 100644 --- a/rips/rustchain-core/main.py +++ b/rips/rustchain-core/main.py @@ -1,424 +1,424 @@ -#!/usr/bin/env python3 -""" -RustChain Node - Proof of Antiquity Blockchain -============================================== - -"Every vintage computer has historical potential" -- Flamekeeper Scott - -This is NOT Proof of Work! RustChain rewards: -- Hardware age (older = better) -- Node uptime (longer = better) -- Hardware authenticity (verified via deep entropy) - -Usage: - python -m rustchain-core.main [options] - -Options: - --port PORT API port (default: 8085) - --data-dir DIR Data directory (default: ./rustchain_data) - --mining Enable mining - --validator-id ID Custom validator ID -""" - -import argparse -import signal -import sys -import time -import threading -from pathlib import Path - -# Local imports -from config.chain_params import ( - CHAIN_ID, - TOTAL_SUPPLY, - BLOCK_TIME_SECONDS, - DEFAULT_PORT, - PROTOCOL_VERSION, - FOUNDER_WALLETS, - PREMINE_AMOUNT, -) -from consensus.poa import ProofOfAntiquity, HardwareProof, compute_antiquity_score -from ledger.utxo_ledger import UtxoSet, Transaction, BalanceTracker -from validator.score import HardwareValidator, HardwareInfo -from validator.entropy import EntropyProfileBuilder, derive_validator_id -from governance.proposals import GovernanceEngine, ProposalType, SophiaDecision -from networking.p2p import NetworkManager, PeerId -from api.rpc import RustChainApi, ApiServer - - -# ============================================================================= -# RustChain Node -# ============================================================================= - -class RustChainNode: - """ - Full RustChain node implementing Proof of Antiquity. - - This node: - - Validates hardware via deep entropy - - Calculates Antiquity Scores - - Processes blocks via weighted lottery - - Manages governance proposals - - Tracks wallets and balances - """ - - VERSION = "0.1.0" - - def __init__( - self, - data_dir: str = "./rustchain_data", - api_port: int = DEFAULT_PORT, - enable_mining: bool = True, - ): - self.data_dir = Path(data_dir) - self.data_dir.mkdir(parents=True, exist_ok=True) - - self.chain_id = CHAIN_ID - self.version = self.VERSION - self.api_port = api_port - self.is_mining = enable_mining - - # Generate validator ID from entropy - print("Collecting entropy fingerprint...") - self.validator_id = derive_validator_id() - print(f"Validator ID: {self.validator_id[:32]}...") - - # Initialize components - self.poa = ProofOfAntiquity() - self.utxo_set = UtxoSet() - self.balance_tracker = BalanceTracker(self.utxo_set) - self.hardware_validator = HardwareValidator() - self.governance = GovernanceEngine(TOTAL_SUPPLY) - - # Network - self.network = NetworkManager( - listen_port=api_port + 1, - chain_id=CHAIN_ID, - validator_id=self.validator_id, - ) - - # State - self._start_time = time.time() - self._running = False - self._block_thread = None - - # Initialize genesis - self._initialize_genesis() - - def _initialize_genesis(self): - """Initialize genesis block and founder wallets""" - print() - print("=" * 60) - print("RUSTCHAIN - PROOF OF ANTIQUITY") - print("=" * 60) - print() - print('"Every vintage computer has historical potential"') - print() - print(f"Chain ID: {CHAIN_ID}") - print(f"Total Supply: {TOTAL_SUPPLY:,} RTC") - print(f"Block Time: {BLOCK_TIME_SECONDS // 60} minutes") - print(f"Founder Wallets: {len(FOUNDER_WALLETS)}") - print() - - # Initialize founder wallets with premine - founder_amount = int((PREMINE_AMOUNT / len(FOUNDER_WALLETS)) * 100_000_000) - for wallet in FOUNDER_WALLETS: - # Create founder UTXO - tx = Transaction.mining_reward( - miner_wallet=wallet, - reward_amount=founder_amount, - block_height=0, - antiquity_score=100.0, - hardware_model="Genesis", - ) - self.utxo_set.apply_transaction(tx, block_height=0) - print(f" Founder: {wallet[:40]}...") - - print() - print("Genesis initialized!") - - def start(self): - """Start the node""" - self._running = True - - # Start network - self.network.start() - - # Start block processor - self._block_thread = threading.Thread( - target=self._block_processor, - daemon=True - ) - self._block_thread.start() - - print() - print(f"Node started!") - print(f" API: http://0.0.0.0:{self.api_port}") - print(f" P2P: port {self.api_port + 1}") - print(f" Mining: {'enabled' if self.is_mining else 'disabled'}") - print() - - def stop(self): - """Stop the node""" - self._running = False - self.network.stop() - print("Node stopped") - - def _block_processor(self): - """Background block processor""" - while self._running: - time.sleep(10) # Check every 10 seconds - - status = self.poa.get_status() - if status["time_remaining_seconds"] <= 0: - self._process_block() - - def _process_block(self): - """Process pending proofs and create new block""" - previous_hash = "0" * 64 # TODO: Get from chain - - block = self.poa.produce_block(previous_hash) - if block: - # Apply mining rewards - for miner in block.miners: - tx = Transaction.mining_reward( - miner_wallet=miner.wallet, - reward_amount=miner.reward, - block_height=block.height, - antiquity_score=miner.antiquity_score, - hardware_model=miner.hardware_model, - ) - self.utxo_set.apply_transaction(tx, block.height) - - # Broadcast block - self.network.broadcast_block(block.__dict__) - - print(f"Block #{block.height} produced! " - f"{len(block.miners)} miners, " - f"{block.total_reward / 100_000_000:.2f} RTC distributed") - - # ========================================================================= - # API Methods - # ========================================================================= - - def get_block_height(self) -> int: - return self.poa.current_block_height - - def get_total_minted(self) -> float: - # TODO: Track properly - return float(PREMINE_AMOUNT) - - def get_mining_pool(self) -> float: - return float(TOTAL_SUPPLY - PREMINE_AMOUNT) - - def get_wallet_count(self) -> int: - return len(self.utxo_set._by_address) - - def get_pending_proofs(self) -> int: - return len(self.poa.pending_proofs) - - def get_block_age(self) -> int: - return self.poa.get_status()["block_age_seconds"] - - def get_time_to_next_block(self) -> int: - return self.poa.get_status()["time_remaining_seconds"] - - def get_uptime(self) -> int: - return int(time.time() - self._start_time) - - def get_block(self, height: int): - # TODO: Store blocks - return None - - def get_block_by_hash(self, block_hash: str): - # TODO: Store blocks - return None - - def get_wallet(self, address: str): - return self.balance_tracker.get_balance(address) - - def get_balance(self, address: str) -> int: - return self.utxo_set.get_balance(address) - - def submit_mining_proof( - self, - wallet: str, - hardware_model: str, - release_year: int, - uptime_days: int, - entropy_hash: str = "", - ): - """Submit a mining proof""" - # Validate hardware - hardware = HardwareInfo( - cpu_model=hardware_model, - release_year=release_year, - uptime_days=uptime_days, - ) - - validation = self.hardware_validator.validate_miner( - wallet=wallet, - hardware=hardware, - current_block=self.poa.current_block_height, - ) - - if not validation["eligible"]: - return { - "success": False, - "errors": validation["errors"], - } - - # Submit to PoA - proof = HardwareProof( - cpu_model=hardware_model, - release_year=release_year, - uptime_days=uptime_days, - hardware_hash=hardware.generate_hardware_hash(), - ) - - result = self.poa.submit_proof( - wallet=wallet, - hardware=proof, - anti_emulation_hash=entropy_hash or "0" * 64, - ) - - return result - - def get_mining_status(self): - return self.poa.get_status() - - def calculate_antiquity_score(self, release_year: int, uptime_days: int): - score = compute_antiquity_score(release_year, uptime_days) - return { - "release_year": release_year, - "uptime_days": uptime_days, - "antiquity_score": score, - "eligible": score >= 1.0, - } - - def create_proposal( - self, - title: str, - description: str, - proposal_type: str, - proposer: str, - contract_hash: str = None, - ): - ptype = ProposalType[proposal_type.upper()] - proposal = self.governance.create_proposal( - title=title, - description=description, - proposal_type=ptype, - proposer=proposer, - contract_hash=contract_hash, - ) - return proposal.to_dict() - - def vote_proposal(self, proposal_id: str, voter: str, support: bool): - balance = self.utxo_set.get_balance(voter) - vote = self.governance.vote( - proposal_id=proposal_id, - voter=voter, - support=support, - token_balance=balance, - ) - return {"success": True, "weight": vote.weight} - - def get_proposals(self): - return [p.to_dict() for p in self.governance.get_all_proposals()] - - def get_proposal(self, proposal_id: str): - p = self.governance.get_proposal(proposal_id) - return p.to_dict() if p else None - - def get_peers(self): - return [ - { - "address": p.peer_id.to_string(), - "reputation": p.reputation, - "best_height": p.best_block_height, - } - for p in self.network.peer_manager.get_peers() - ] - - def get_entropy_profile(self): - builder = EntropyProfileBuilder() - profile = builder.collect_full_profile() - return { - "validator_id": profile.validator_id, - "confidence_score": profile.confidence_score, - "combined_hash": profile.combined_hash, - } - - -# ============================================================================= -# Main Entry Point -# ============================================================================= - -def main(): - parser = argparse.ArgumentParser( - description="RustChain Node - Proof of Antiquity Blockchain" - ) - parser.add_argument( - "--port", type=int, default=DEFAULT_PORT, - help=f"API port (default: {DEFAULT_PORT})" - ) - parser.add_argument( - "--data-dir", type=str, default="./rustchain_data", - help="Data directory" - ) - parser.add_argument( - "--mining", action="store_true", - help="Enable mining" - ) - parser.add_argument( - "--validator-id", type=str, default=None, - help="Custom validator ID (auto-generated if not provided)" - ) - - args = parser.parse_args() - - # Create node - node = RustChainNode( - data_dir=args.data_dir, - api_port=args.port, - enable_mining=args.mining, - ) - - # Create and start API server - api = RustChainApi(node) - api_server = ApiServer(api, port=args.port) - - # Handle shutdown - def shutdown(signum, frame): - print("\nShutting down...") - api_server.stop() - node.stop() - sys.exit(0) - - signal.signal(signal.SIGINT, shutdown) - signal.signal(signal.SIGTERM, shutdown) - - # Start - node.start() - api_server.start() - - print() - print("=" * 60) - print("RUSTCHAIN NODE RUNNING") - print("=" * 60) - print() - print("Remember: This is NOT Proof of Work!") - print("Older hardware wins, not faster hardware.") - print() - print("Press Ctrl+C to stop...") - print() - - # Keep running - while True: - time.sleep(1) - - -if __name__ == "__main__": - main() +#!/usr/bin/env python3 +""" +RustChain Node - Proof of Antiquity Blockchain +============================================== + +"Every vintage computer has historical potential" +- Flamekeeper Scott + +This is NOT Proof of Work! RustChain rewards: +- Hardware age (older = better) +- Node uptime (longer = better) +- Hardware authenticity (verified via deep entropy) + +Usage: + python -m rustchain-core.main [options] + +Options: + --port PORT API port (default: 8085) + --data-dir DIR Data directory (default: ./rustchain_data) + --mining Enable mining + --validator-id ID Custom validator ID +""" + +import argparse +import signal +import sys +import time +import threading +from pathlib import Path + +# Local imports +from config.chain_params import ( + CHAIN_ID, + TOTAL_SUPPLY, + BLOCK_TIME_SECONDS, + DEFAULT_PORT, + PROTOCOL_VERSION, + FOUNDER_WALLETS, + PREMINE_AMOUNT, +) +from consensus.poa import ProofOfAntiquity, HardwareProof, compute_antiquity_score +from ledger.utxo_ledger import UtxoSet, Transaction, BalanceTracker +from validator.score import HardwareValidator, HardwareInfo +from validator.entropy import EntropyProfileBuilder, derive_validator_id +from governance.proposals import GovernanceEngine, ProposalType, SophiaDecision +from networking.p2p import NetworkManager, PeerId +from api.rpc import RustChainApi, ApiServer + + +# ============================================================================= +# RustChain Node +# ============================================================================= + +class RustChainNode: + """ + Full RustChain node implementing Proof of Antiquity. + + This node: + - Validates hardware via deep entropy + - Calculates Antiquity Scores + - Processes blocks via weighted lottery + - Manages governance proposals + - Tracks wallets and balances + """ + + VERSION = "0.1.0" + + def __init__( + self, + data_dir: str = "./rustchain_data", + api_port: int = DEFAULT_PORT, + enable_mining: bool = True, + ): + self.data_dir = Path(data_dir) + self.data_dir.mkdir(parents=True, exist_ok=True) + + self.chain_id = CHAIN_ID + self.version = self.VERSION + self.api_port = api_port + self.is_mining = enable_mining + + # Generate validator ID from entropy + print("Collecting entropy fingerprint...") + self.validator_id = derive_validator_id() + print(f"Validator ID: {self.validator_id[:32]}...") + + # Initialize components + self.poa = ProofOfAntiquity() + self.utxo_set = UtxoSet() + self.balance_tracker = BalanceTracker(self.utxo_set) + self.hardware_validator = HardwareValidator() + self.governance = GovernanceEngine(TOTAL_SUPPLY) + + # Network + self.network = NetworkManager( + listen_port=api_port + 1, + chain_id=CHAIN_ID, + validator_id=self.validator_id, + ) + + # State + self._start_time = time.time() + self._running = False + self._block_thread = None + + # Initialize genesis + self._initialize_genesis() + + def _initialize_genesis(self): + """Initialize genesis block and founder wallets""" + print() + print("=" * 60) + print("RUSTCHAIN - PROOF OF ANTIQUITY") + print("=" * 60) + print() + print('"Every vintage computer has historical potential"') + print() + print(f"Chain ID: {CHAIN_ID}") + print(f"Total Supply: {TOTAL_SUPPLY:,} RTC") + print(f"Block Time: {BLOCK_TIME_SECONDS // 60} minutes") + print(f"Founder Wallets: {len(FOUNDER_WALLETS)}") + print() + + # Initialize founder wallets with premine + founder_amount = int((PREMINE_AMOUNT / len(FOUNDER_WALLETS)) * 100_000_000) + for wallet in FOUNDER_WALLETS: + # Create founder UTXO + tx = Transaction.mining_reward( + miner_wallet=wallet, + reward_amount=founder_amount, + block_height=0, + antiquity_score=100.0, + hardware_model="Genesis", + ) + self.utxo_set.apply_transaction(tx, block_height=0) + print(f" Founder: {wallet[:40]}...") + + print() + print("Genesis initialized!") + + def start(self): + """Start the node""" + self._running = True + + # Start network + self.network.start() + + # Start block processor + self._block_thread = threading.Thread( + target=self._block_processor, + daemon=True + ) + self._block_thread.start() + + print() + print(f"Node started!") + print(f" API: http://0.0.0.0:{self.api_port}") + print(f" P2P: port {self.api_port + 1}") + print(f" Mining: {'enabled' if self.is_mining else 'disabled'}") + print() + + def stop(self): + """Stop the node""" + self._running = False + self.network.stop() + print("Node stopped") + + def _block_processor(self): + """Background block processor""" + while self._running: + time.sleep(10) # Check every 10 seconds + + status = self.poa.get_status() + if status["time_remaining_seconds"] <= 0: + self._process_block() + + def _process_block(self): + """Process pending proofs and create new block""" + previous_hash = "0" * 64 # TODO: Get from chain + + block = self.poa.produce_block(previous_hash) + if block: + # Apply mining rewards + for miner in block.miners: + tx = Transaction.mining_reward( + miner_wallet=miner.wallet, + reward_amount=miner.reward, + block_height=block.height, + antiquity_score=miner.antiquity_score, + hardware_model=miner.hardware_model, + ) + self.utxo_set.apply_transaction(tx, block.height) + + # Broadcast block + self.network.broadcast_block(block.__dict__) + + print(f"Block #{block.height} produced! " + f"{len(block.miners)} miners, " + f"{block.total_reward / 100_000_000:.2f} RTC distributed") + + # ========================================================================= + # API Methods + # ========================================================================= + + def get_block_height(self) -> int: + return self.poa.current_block_height + + def get_total_minted(self) -> float: + # TODO: Track properly + return float(PREMINE_AMOUNT) + + def get_mining_pool(self) -> float: + return float(TOTAL_SUPPLY - PREMINE_AMOUNT) + + def get_wallet_count(self) -> int: + return len(self.utxo_set._by_address) + + def get_pending_proofs(self) -> int: + return len(self.poa.pending_proofs) + + def get_block_age(self) -> int: + return self.poa.get_status()["block_age_seconds"] + + def get_time_to_next_block(self) -> int: + return self.poa.get_status()["time_remaining_seconds"] + + def get_uptime(self) -> int: + return int(time.time() - self._start_time) + + def get_block(self, height: int): + # TODO: Store blocks + return None + + def get_block_by_hash(self, block_hash: str): + # TODO: Store blocks + return None + + def get_wallet(self, address: str): + return self.balance_tracker.get_balance(address) + + def get_balance(self, address: str) -> int: + return self.utxo_set.get_balance(address) + + def submit_mining_proof( + self, + wallet: str, + hardware_model: str, + release_year: int, + uptime_days: int, + entropy_hash: str = "", + ): + """Submit a mining proof""" + # Validate hardware + hardware = HardwareInfo( + cpu_model=hardware_model, + release_year=release_year, + uptime_days=uptime_days, + ) + + validation = self.hardware_validator.validate_miner( + wallet=wallet, + hardware=hardware, + current_block=self.poa.current_block_height, + ) + + if not validation["eligible"]: + return { + "success": False, + "errors": validation["errors"], + } + + # Submit to PoA + proof = HardwareProof( + cpu_model=hardware_model, + release_year=release_year, + uptime_days=uptime_days, + hardware_hash=hardware.generate_hardware_hash(), + ) + + result = self.poa.submit_proof( + wallet=wallet, + hardware=proof, + anti_emulation_hash=entropy_hash or "0" * 64, + ) + + return result + + def get_mining_status(self): + return self.poa.get_status() + + def calculate_antiquity_score(self, release_year: int, uptime_days: int): + score = compute_antiquity_score(release_year, uptime_days) + return { + "release_year": release_year, + "uptime_days": uptime_days, + "antiquity_score": score, + "eligible": score >= 1.0, + } + + def create_proposal( + self, + title: str, + description: str, + proposal_type: str, + proposer: str, + contract_hash: str = None, + ): + ptype = ProposalType[proposal_type.upper()] + proposal = self.governance.create_proposal( + title=title, + description=description, + proposal_type=ptype, + proposer=proposer, + contract_hash=contract_hash, + ) + return proposal.to_dict() + + def vote_proposal(self, proposal_id: str, voter: str, support: bool): + balance = self.utxo_set.get_balance(voter) + vote = self.governance.vote( + proposal_id=proposal_id, + voter=voter, + support=support, + token_balance=balance, + ) + return {"success": True, "weight": vote.weight} + + def get_proposals(self): + return [p.to_dict() for p in self.governance.get_all_proposals()] + + def get_proposal(self, proposal_id: str): + p = self.governance.get_proposal(proposal_id) + return p.to_dict() if p else None + + def get_peers(self): + return [ + { + "address": p.peer_id.to_string(), + "reputation": p.reputation, + "best_height": p.best_block_height, + } + for p in self.network.peer_manager.get_peers() + ] + + def get_entropy_profile(self): + builder = EntropyProfileBuilder() + profile = builder.collect_full_profile() + return { + "validator_id": profile.validator_id, + "confidence_score": profile.confidence_score, + "combined_hash": profile.combined_hash, + } + + +# ============================================================================= +# Main Entry Point +# ============================================================================= + +def main(): + parser = argparse.ArgumentParser( + description="RustChain Node - Proof of Antiquity Blockchain" + ) + parser.add_argument( + "--port", type=int, default=DEFAULT_PORT, + help=f"API port (default: {DEFAULT_PORT})" + ) + parser.add_argument( + "--data-dir", type=str, default="./rustchain_data", + help="Data directory" + ) + parser.add_argument( + "--mining", action="store_true", + help="Enable mining" + ) + parser.add_argument( + "--validator-id", type=str, default=None, + help="Custom validator ID (auto-generated if not provided)" + ) + + args = parser.parse_args() + + # Create node + node = RustChainNode( + data_dir=args.data_dir, + api_port=args.port, + enable_mining=args.mining, + ) + + # Create and start API server + api = RustChainApi(node) + api_server = ApiServer(api, port=args.port) + + # Handle shutdown + def shutdown(signum, frame): + print("\nShutting down...") + api_server.stop() + node.stop() + sys.exit(0) + + signal.signal(signal.SIGINT, shutdown) + signal.signal(signal.SIGTERM, shutdown) + + # Start + node.start() + api_server.start() + + print() + print("=" * 60) + print("RUSTCHAIN NODE RUNNING") + print("=" * 60) + print() + print("Remember: This is NOT Proof of Work!") + print("Older hardware wins, not faster hardware.") + print() + print("Press Ctrl+C to stop...") + print() + + # Keep running + while True: + time.sleep(1) + + +if __name__ == "__main__": + main() diff --git a/rips/rustchain-core/networking/p2p.py b/rips/rustchain-core/networking/p2p.py index de7618a6..e176a136 100644 --- a/rips/rustchain-core/networking/p2p.py +++ b/rips/rustchain-core/networking/p2p.py @@ -1,546 +1,546 @@ -""" -RustChain P2P Networking (RIP-0005) -=================================== - -Peer-to-peer networking for block propagation, transaction gossip, -and validator coordination. - -Security Features: -- mTLS for peer authentication -- Message signing with validator keys -- DDoS protection via rate limiting -- Reputation-based peer selection -""" - -import hashlib -import json -import socket -import time -import threading -import queue -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Set, Any, Callable -from enum import Enum, auto - -from ..config.chain_params import ( - DEFAULT_PORT, - MTLS_PORT, - PROTOCOL_VERSION, - MAX_PEERS, - PEER_TIMEOUT_SECONDS, - SYNC_BATCH_SIZE, -) - - -# ============================================================================= -# Message Types -# ============================================================================= - -class MessageType(Enum): - """P2P message types""" - # Handshake - HELLO = auto() - HELLO_ACK = auto() - - # Block propagation - NEW_BLOCK = auto() - GET_BLOCKS = auto() - BLOCKS = auto() - - # Transaction gossip - NEW_TX = auto() - GET_TXS = auto() - TXS = auto() - - # Peer discovery - GET_PEERS = auto() - PEERS = auto() - - # Validator coordination - MINING_PROOF = auto() - VALIDATOR_STATUS = auto() - - # Entropy verification - ENTROPY_CHALLENGE = auto() - ENTROPY_RESPONSE = auto() - - -# ============================================================================= -# Data Structures -# ============================================================================= - -@dataclass -class PeerId: - """Unique peer identifier""" - address: str - port: int - public_key: bytes = b'' - - def __hash__(self): - return hash((self.address, self.port)) - - def __eq__(self, other): - if isinstance(other, PeerId): - return self.address == other.address and self.port == other.port - return False - - def to_string(self) -> str: - return f"{self.address}:{self.port}" - - -@dataclass -class PeerInfo: - """Information about a connected peer""" - peer_id: PeerId - protocol_version: str - chain_id: int - best_block_height: int - best_block_hash: str - connected_at: int - last_seen: int - reputation: float = 50.0 - latency_ms: float = 0.0 - - def is_alive(self, timeout: int = PEER_TIMEOUT_SECONDS) -> bool: - return (int(time.time()) - self.last_seen) < timeout - - -@dataclass -class Message: - """P2P message""" - msg_type: MessageType - sender: PeerId - payload: Dict[str, Any] - timestamp: int = 0 - signature: bytes = b'' - nonce: int = 0 - - def __post_init__(self): - if not self.timestamp: - self.timestamp = int(time.time()) - if not self.nonce: - self.nonce = int.from_bytes(hashlib.sha256(str(time.time()).encode()).digest()[:4], 'big') - - def to_bytes(self) -> bytes: - """Serialize message to bytes""" - data = { - "type": self.msg_type.name, - "sender": self.sender.to_string() if self.sender else "", - "payload": self.payload, - "timestamp": self.timestamp, - "nonce": self.nonce, - } - return json.dumps(data).encode() - - @classmethod - def from_bytes(cls, data: bytes, sender: PeerId) -> 'Message': - """Deserialize message from bytes""" - parsed = json.loads(data.decode()) - return cls( - msg_type=MessageType[parsed["type"]], - sender=sender, - payload=parsed["payload"], - timestamp=parsed["timestamp"], - nonce=parsed["nonce"], - ) - - def compute_hash(self) -> str: - """Compute message hash for signing""" - data = f"{self.msg_type.name}:{self.timestamp}:{self.nonce}:{json.dumps(self.payload, sort_keys=True)}" - return hashlib.sha256(data.encode()).hexdigest() - - -# ============================================================================= -# Peer Manager -# ============================================================================= - -class PeerManager: - """ - Manages peer connections and reputation. - - Security: - - Maintains peer reputation based on behavior - - Bans malicious peers - - Limits connections to prevent resource exhaustion - """ - - def __init__(self, max_peers: int = MAX_PEERS): - self.peers: Dict[str, PeerInfo] = {} - self.banned: Set[str] = set() - self.max_peers = max_peers - self._lock = threading.Lock() - - def add_peer(self, peer_info: PeerInfo) -> bool: - """Add a new peer""" - with self._lock: - peer_key = peer_info.peer_id.to_string() - - if peer_key in self.banned: - return False - - if len(self.peers) >= self.max_peers: - # Remove lowest reputation peer - if self.peers: - worst = min(self.peers.values(), key=lambda p: p.reputation) - if worst.reputation < peer_info.reputation: - del self.peers[worst.peer_id.to_string()] - else: - return False - - self.peers[peer_key] = peer_info - return True - - def remove_peer(self, peer_id: PeerId): - """Remove a peer""" - with self._lock: - peer_key = peer_id.to_string() - if peer_key in self.peers: - del self.peers[peer_key] - - def update_peer(self, peer_id: PeerId, **kwargs): - """Update peer information""" - with self._lock: - peer_key = peer_id.to_string() - if peer_key in self.peers: - peer = self.peers[peer_key] - for key, value in kwargs.items(): - if hasattr(peer, key): - setattr(peer, key, value) - peer.last_seen = int(time.time()) - - def adjust_reputation(self, peer_id: PeerId, delta: float): - """Adjust peer reputation""" - with self._lock: - peer_key = peer_id.to_string() - if peer_key in self.peers: - peer = self.peers[peer_key] - peer.reputation = max(0, min(100, peer.reputation + delta)) - - # Ban if reputation too low - if peer.reputation < 10: - self.ban_peer(peer_id, "Low reputation") - - def ban_peer(self, peer_id: PeerId, reason: str): - """Ban a malicious peer""" - with self._lock: - peer_key = peer_id.to_string() - self.banned.add(peer_key) - if peer_key in self.peers: - del self.peers[peer_key] - print(f"BANNED: {peer_key} - {reason}") - - def get_peers(self, count: int = 10) -> List[PeerInfo]: - """Get best peers by reputation""" - with self._lock: - alive_peers = [p for p in self.peers.values() if p.is_alive()] - sorted_peers = sorted(alive_peers, key=lambda p: p.reputation, reverse=True) - return sorted_peers[:count] - - def get_peer(self, peer_id: PeerId) -> Optional[PeerInfo]: - """Get specific peer info""" - with self._lock: - return self.peers.get(peer_id.to_string()) - - def cleanup_stale(self): - """Remove stale peers""" - with self._lock: - stale = [ - k for k, p in self.peers.items() - if not p.is_alive() - ] - for peer_key in stale: - del self.peers[peer_key] - - -# ============================================================================= -# Message Handler -# ============================================================================= - -class MessageHandler: - """ - Handles incoming P2P messages. - - Implements message validation, deduplication, and routing. - """ - - def __init__(self): - self.handlers: Dict[MessageType, List[Callable]] = {} - self.seen_messages: Set[str] = set() - self._lock = threading.Lock() - - def register_handler(self, msg_type: MessageType, handler: Callable): - """Register a message handler""" - if msg_type not in self.handlers: - self.handlers[msg_type] = [] - self.handlers[msg_type].append(handler) - - def handle_message(self, message: Message) -> bool: - """ - Handle an incoming message. - - Returns True if message was processed, False if duplicate/invalid. - """ - # Check for duplicate - msg_hash = message.compute_hash() - with self._lock: - if msg_hash in self.seen_messages: - return False - self.seen_messages.add(msg_hash) - - # Cleanup old messages periodically - if len(self.seen_messages) > 10000: - self.seen_messages.clear() - - # Validate timestamp (reject old messages) - now = int(time.time()) - if abs(now - message.timestamp) > 300: # 5 minute window - return False - - # Route to handlers - handlers = self.handlers.get(message.msg_type, []) - for handler in handlers: - try: - handler(message) - except Exception as e: - print(f"Handler error: {e}") - - return True - - -# ============================================================================= -# Network Manager -# ============================================================================= - -class NetworkManager: - """ - Main network manager for P2P communication. - - Features: - - Peer discovery and management - - Message broadcasting and routing - - Block and transaction propagation - - Sync coordination - """ - - def __init__( - self, - listen_port: int = DEFAULT_PORT, - chain_id: int = 2718, - validator_id: str = "", - ): - self.listen_port = listen_port - self.chain_id = chain_id - self.validator_id = validator_id - - self.peer_manager = PeerManager() - self.message_handler = MessageHandler() - - self.outbound_queue: queue.Queue = queue.Queue() - self.running = False - - self._local_peer_id = PeerId( - address="0.0.0.0", - port=listen_port, - ) - - # Register default handlers - self._register_default_handlers() - - def _register_default_handlers(self): - """Register default message handlers""" - self.message_handler.register_handler(MessageType.HELLO, self._handle_hello) - self.message_handler.register_handler(MessageType.GET_PEERS, self._handle_get_peers) - self.message_handler.register_handler(MessageType.PEERS, self._handle_peers) - - def _handle_hello(self, message: Message): - """Handle HELLO message""" - payload = message.payload - peer_info = PeerInfo( - peer_id=message.sender, - protocol_version=payload.get("version", PROTOCOL_VERSION), - chain_id=payload.get("chain_id", 0), - best_block_height=payload.get("best_height", 0), - best_block_hash=payload.get("best_hash", ""), - connected_at=int(time.time()), - last_seen=int(time.time()), - ) - - # Verify chain ID - if peer_info.chain_id != self.chain_id: - print(f"Rejecting peer {message.sender.to_string()}: wrong chain ID") - return - - self.peer_manager.add_peer(peer_info) - - # Send HELLO_ACK - self.send_message(message.sender, MessageType.HELLO_ACK, { - "version": PROTOCOL_VERSION, - "chain_id": self.chain_id, - "validator_id": self.validator_id, - }) - - def _handle_get_peers(self, message: Message): - """Handle GET_PEERS message""" - peers = self.peer_manager.get_peers(10) - peer_list = [ - { - "address": p.peer_id.address, - "port": p.peer_id.port, - "reputation": p.reputation, - } - for p in peers - ] - - self.send_message(message.sender, MessageType.PEERS, { - "peers": peer_list, - }) - - def _handle_peers(self, message: Message): - """Handle PEERS message""" - for peer_data in message.payload.get("peers", []): - peer_id = PeerId( - address=peer_data["address"], - port=peer_data["port"], - ) - # Try to connect to new peer - self.connect_to_peer(peer_id) - - def connect_to_peer(self, peer_id: PeerId) -> bool: - """Initiate connection to a peer""" - # Send HELLO message - self.send_message(peer_id, MessageType.HELLO, { - "version": PROTOCOL_VERSION, - "chain_id": self.chain_id, - "best_height": 0, # TODO: Get from chain - "best_hash": "", - "validator_id": self.validator_id, - }) - return True - - def send_message(self, peer_id: PeerId, msg_type: MessageType, payload: Dict[str, Any]): - """Send a message to a specific peer""" - message = Message( - msg_type=msg_type, - sender=self._local_peer_id, - payload=payload, - ) - - self.outbound_queue.put((peer_id, message)) - - def broadcast(self, msg_type: MessageType, payload: Dict[str, Any]): - """Broadcast a message to all peers""" - peers = self.peer_manager.get_peers() - for peer in peers: - self.send_message(peer.peer_id, msg_type, payload) - - def broadcast_block(self, block_data: Dict[str, Any]): - """Broadcast a new block to the network""" - self.broadcast(MessageType.NEW_BLOCK, {"block": block_data}) - - def broadcast_transaction(self, tx_data: Dict[str, Any]): - """Broadcast a new transaction to the network""" - self.broadcast(MessageType.NEW_TX, {"transaction": tx_data}) - - def request_blocks(self, peer_id: PeerId, start_height: int, count: int = SYNC_BATCH_SIZE): - """Request blocks from a peer""" - self.send_message(peer_id, MessageType.GET_BLOCKS, { - "start_height": start_height, - "count": count, - }) - - def start(self): - """Start the network manager""" - self.running = True - print(f"Network started on port {self.listen_port}") - - # Start peer cleanup thread - cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) - cleanup_thread.start() - - def stop(self): - """Stop the network manager""" - self.running = False - print("Network stopped") - - def _cleanup_loop(self): - """Periodic cleanup of stale peers""" - while self.running: - time.sleep(60) - self.peer_manager.cleanup_stale() - - def get_sync_status(self) -> Dict[str, Any]: - """Get synchronization status""" - peers = self.peer_manager.get_peers() - if not peers: - return { - "synced": False, - "best_peer_height": 0, - "connected_peers": 0, - } - - best_peer = max(peers, key=lambda p: p.best_block_height) - - return { - "synced": True, # TODO: Compare with local height - "best_peer_height": best_peer.best_block_height, - "connected_peers": len(peers), - "best_peer": best_peer.peer_id.to_string(), - } - - -# ============================================================================= -# Seed Nodes -# ============================================================================= - -SEED_NODES = [ - PeerId("seed1.rustchain.net", DEFAULT_PORT), - PeerId("seed2.rustchain.net", DEFAULT_PORT), - PeerId("seed3.rustchain.net", DEFAULT_PORT), -] - - -def bootstrap_network(manager: NetworkManager): - """Bootstrap network connections from seed nodes""" - for seed in SEED_NODES: - try: - manager.connect_to_peer(seed) - except Exception as e: - print(f"Failed to connect to seed {seed.to_string()}: {e}") - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN P2P NETWORKING TEST") - print("=" * 60) - - manager = NetworkManager( - listen_port=8085, - chain_id=2718, - validator_id="test_validator", - ) - - manager.start() - - # Simulate peer connection - peer_id = PeerId("192.168.1.100", 8085) - peer_info = PeerInfo( - peer_id=peer_id, - protocol_version=PROTOCOL_VERSION, - chain_id=2718, - best_block_height=100, - best_block_hash="abc123", - connected_at=int(time.time()), - last_seen=int(time.time()), - ) - - manager.peer_manager.add_peer(peer_info) - - status = manager.get_sync_status() - print(f"\nSync Status: {status}") - print(f"Connected Peers: {status['connected_peers']}") - - manager.stop() +""" +RustChain P2P Networking (RIP-0005) +=================================== + +Peer-to-peer networking for block propagation, transaction gossip, +and validator coordination. + +Security Features: +- mTLS for peer authentication +- Message signing with validator keys +- DDoS protection via rate limiting +- Reputation-based peer selection +""" + +import hashlib +import json +import socket +import time +import threading +import queue +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Set, Any, Callable +from enum import Enum, auto + +from ..config.chain_params import ( + DEFAULT_PORT, + MTLS_PORT, + PROTOCOL_VERSION, + MAX_PEERS, + PEER_TIMEOUT_SECONDS, + SYNC_BATCH_SIZE, +) + + +# ============================================================================= +# Message Types +# ============================================================================= + +class MessageType(Enum): + """P2P message types""" + # Handshake + HELLO = auto() + HELLO_ACK = auto() + + # Block propagation + NEW_BLOCK = auto() + GET_BLOCKS = auto() + BLOCKS = auto() + + # Transaction gossip + NEW_TX = auto() + GET_TXS = auto() + TXS = auto() + + # Peer discovery + GET_PEERS = auto() + PEERS = auto() + + # Validator coordination + MINING_PROOF = auto() + VALIDATOR_STATUS = auto() + + # Entropy verification + ENTROPY_CHALLENGE = auto() + ENTROPY_RESPONSE = auto() + + +# ============================================================================= +# Data Structures +# ============================================================================= + +@dataclass +class PeerId: + """Unique peer identifier""" + address: str + port: int + public_key: bytes = b'' + + def __hash__(self): + return hash((self.address, self.port)) + + def __eq__(self, other): + if isinstance(other, PeerId): + return self.address == other.address and self.port == other.port + return False + + def to_string(self) -> str: + return f"{self.address}:{self.port}" + + +@dataclass +class PeerInfo: + """Information about a connected peer""" + peer_id: PeerId + protocol_version: str + chain_id: int + best_block_height: int + best_block_hash: str + connected_at: int + last_seen: int + reputation: float = 50.0 + latency_ms: float = 0.0 + + def is_alive(self, timeout: int = PEER_TIMEOUT_SECONDS) -> bool: + return (int(time.time()) - self.last_seen) < timeout + + +@dataclass +class Message: + """P2P message""" + msg_type: MessageType + sender: PeerId + payload: Dict[str, Any] + timestamp: int = 0 + signature: bytes = b'' + nonce: int = 0 + + def __post_init__(self): + if not self.timestamp: + self.timestamp = int(time.time()) + if not self.nonce: + self.nonce = int.from_bytes(hashlib.sha256(str(time.time()).encode()).digest()[:4], 'big') + + def to_bytes(self) -> bytes: + """Serialize message to bytes""" + data = { + "type": self.msg_type.name, + "sender": self.sender.to_string() if self.sender else "", + "payload": self.payload, + "timestamp": self.timestamp, + "nonce": self.nonce, + } + return json.dumps(data).encode() + + @classmethod + def from_bytes(cls, data: bytes, sender: PeerId) -> 'Message': + """Deserialize message from bytes""" + parsed = json.loads(data.decode()) + return cls( + msg_type=MessageType[parsed["type"]], + sender=sender, + payload=parsed["payload"], + timestamp=parsed["timestamp"], + nonce=parsed["nonce"], + ) + + def compute_hash(self) -> str: + """Compute message hash for signing""" + data = f"{self.msg_type.name}:{self.timestamp}:{self.nonce}:{json.dumps(self.payload, sort_keys=True)}" + return hashlib.sha256(data.encode()).hexdigest() + + +# ============================================================================= +# Peer Manager +# ============================================================================= + +class PeerManager: + """ + Manages peer connections and reputation. + + Security: + - Maintains peer reputation based on behavior + - Bans malicious peers + - Limits connections to prevent resource exhaustion + """ + + def __init__(self, max_peers: int = MAX_PEERS): + self.peers: Dict[str, PeerInfo] = {} + self.banned: Set[str] = set() + self.max_peers = max_peers + self._lock = threading.Lock() + + def add_peer(self, peer_info: PeerInfo) -> bool: + """Add a new peer""" + with self._lock: + peer_key = peer_info.peer_id.to_string() + + if peer_key in self.banned: + return False + + if len(self.peers) >= self.max_peers: + # Remove lowest reputation peer + if self.peers: + worst = min(self.peers.values(), key=lambda p: p.reputation) + if worst.reputation < peer_info.reputation: + del self.peers[worst.peer_id.to_string()] + else: + return False + + self.peers[peer_key] = peer_info + return True + + def remove_peer(self, peer_id: PeerId): + """Remove a peer""" + with self._lock: + peer_key = peer_id.to_string() + if peer_key in self.peers: + del self.peers[peer_key] + + def update_peer(self, peer_id: PeerId, **kwargs): + """Update peer information""" + with self._lock: + peer_key = peer_id.to_string() + if peer_key in self.peers: + peer = self.peers[peer_key] + for key, value in kwargs.items(): + if hasattr(peer, key): + setattr(peer, key, value) + peer.last_seen = int(time.time()) + + def adjust_reputation(self, peer_id: PeerId, delta: float): + """Adjust peer reputation""" + with self._lock: + peer_key = peer_id.to_string() + if peer_key in self.peers: + peer = self.peers[peer_key] + peer.reputation = max(0, min(100, peer.reputation + delta)) + + # Ban if reputation too low + if peer.reputation < 10: + self.ban_peer(peer_id, "Low reputation") + + def ban_peer(self, peer_id: PeerId, reason: str): + """Ban a malicious peer""" + with self._lock: + peer_key = peer_id.to_string() + self.banned.add(peer_key) + if peer_key in self.peers: + del self.peers[peer_key] + print(f"BANNED: {peer_key} - {reason}") + + def get_peers(self, count: int = 10) -> List[PeerInfo]: + """Get best peers by reputation""" + with self._lock: + alive_peers = [p for p in self.peers.values() if p.is_alive()] + sorted_peers = sorted(alive_peers, key=lambda p: p.reputation, reverse=True) + return sorted_peers[:count] + + def get_peer(self, peer_id: PeerId) -> Optional[PeerInfo]: + """Get specific peer info""" + with self._lock: + return self.peers.get(peer_id.to_string()) + + def cleanup_stale(self): + """Remove stale peers""" + with self._lock: + stale = [ + k for k, p in self.peers.items() + if not p.is_alive() + ] + for peer_key in stale: + del self.peers[peer_key] + + +# ============================================================================= +# Message Handler +# ============================================================================= + +class MessageHandler: + """ + Handles incoming P2P messages. + + Implements message validation, deduplication, and routing. + """ + + def __init__(self): + self.handlers: Dict[MessageType, List[Callable]] = {} + self.seen_messages: Set[str] = set() + self._lock = threading.Lock() + + def register_handler(self, msg_type: MessageType, handler: Callable): + """Register a message handler""" + if msg_type not in self.handlers: + self.handlers[msg_type] = [] + self.handlers[msg_type].append(handler) + + def handle_message(self, message: Message) -> bool: + """ + Handle an incoming message. + + Returns True if message was processed, False if duplicate/invalid. + """ + # Check for duplicate + msg_hash = message.compute_hash() + with self._lock: + if msg_hash in self.seen_messages: + return False + self.seen_messages.add(msg_hash) + + # Cleanup old messages periodically + if len(self.seen_messages) > 10000: + self.seen_messages.clear() + + # Validate timestamp (reject old messages) + now = int(time.time()) + if abs(now - message.timestamp) > 300: # 5 minute window + return False + + # Route to handlers + handlers = self.handlers.get(message.msg_type, []) + for handler in handlers: + try: + handler(message) + except Exception as e: + print(f"Handler error: {e}") + + return True + + +# ============================================================================= +# Network Manager +# ============================================================================= + +class NetworkManager: + """ + Main network manager for P2P communication. + + Features: + - Peer discovery and management + - Message broadcasting and routing + - Block and transaction propagation + - Sync coordination + """ + + def __init__( + self, + listen_port: int = DEFAULT_PORT, + chain_id: int = 2718, + validator_id: str = "", + ): + self.listen_port = listen_port + self.chain_id = chain_id + self.validator_id = validator_id + + self.peer_manager = PeerManager() + self.message_handler = MessageHandler() + + self.outbound_queue: queue.Queue = queue.Queue() + self.running = False + + self._local_peer_id = PeerId( + address="0.0.0.0", + port=listen_port, + ) + + # Register default handlers + self._register_default_handlers() + + def _register_default_handlers(self): + """Register default message handlers""" + self.message_handler.register_handler(MessageType.HELLO, self._handle_hello) + self.message_handler.register_handler(MessageType.GET_PEERS, self._handle_get_peers) + self.message_handler.register_handler(MessageType.PEERS, self._handle_peers) + + def _handle_hello(self, message: Message): + """Handle HELLO message""" + payload = message.payload + peer_info = PeerInfo( + peer_id=message.sender, + protocol_version=payload.get("version", PROTOCOL_VERSION), + chain_id=payload.get("chain_id", 0), + best_block_height=payload.get("best_height", 0), + best_block_hash=payload.get("best_hash", ""), + connected_at=int(time.time()), + last_seen=int(time.time()), + ) + + # Verify chain ID + if peer_info.chain_id != self.chain_id: + print(f"Rejecting peer {message.sender.to_string()}: wrong chain ID") + return + + self.peer_manager.add_peer(peer_info) + + # Send HELLO_ACK + self.send_message(message.sender, MessageType.HELLO_ACK, { + "version": PROTOCOL_VERSION, + "chain_id": self.chain_id, + "validator_id": self.validator_id, + }) + + def _handle_get_peers(self, message: Message): + """Handle GET_PEERS message""" + peers = self.peer_manager.get_peers(10) + peer_list = [ + { + "address": p.peer_id.address, + "port": p.peer_id.port, + "reputation": p.reputation, + } + for p in peers + ] + + self.send_message(message.sender, MessageType.PEERS, { + "peers": peer_list, + }) + + def _handle_peers(self, message: Message): + """Handle PEERS message""" + for peer_data in message.payload.get("peers", []): + peer_id = PeerId( + address=peer_data["address"], + port=peer_data["port"], + ) + # Try to connect to new peer + self.connect_to_peer(peer_id) + + def connect_to_peer(self, peer_id: PeerId) -> bool: + """Initiate connection to a peer""" + # Send HELLO message + self.send_message(peer_id, MessageType.HELLO, { + "version": PROTOCOL_VERSION, + "chain_id": self.chain_id, + "best_height": 0, # TODO: Get from chain + "best_hash": "", + "validator_id": self.validator_id, + }) + return True + + def send_message(self, peer_id: PeerId, msg_type: MessageType, payload: Dict[str, Any]): + """Send a message to a specific peer""" + message = Message( + msg_type=msg_type, + sender=self._local_peer_id, + payload=payload, + ) + + self.outbound_queue.put((peer_id, message)) + + def broadcast(self, msg_type: MessageType, payload: Dict[str, Any]): + """Broadcast a message to all peers""" + peers = self.peer_manager.get_peers() + for peer in peers: + self.send_message(peer.peer_id, msg_type, payload) + + def broadcast_block(self, block_data: Dict[str, Any]): + """Broadcast a new block to the network""" + self.broadcast(MessageType.NEW_BLOCK, {"block": block_data}) + + def broadcast_transaction(self, tx_data: Dict[str, Any]): + """Broadcast a new transaction to the network""" + self.broadcast(MessageType.NEW_TX, {"transaction": tx_data}) + + def request_blocks(self, peer_id: PeerId, start_height: int, count: int = SYNC_BATCH_SIZE): + """Request blocks from a peer""" + self.send_message(peer_id, MessageType.GET_BLOCKS, { + "start_height": start_height, + "count": count, + }) + + def start(self): + """Start the network manager""" + self.running = True + print(f"Network started on port {self.listen_port}") + + # Start peer cleanup thread + cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) + cleanup_thread.start() + + def stop(self): + """Stop the network manager""" + self.running = False + print("Network stopped") + + def _cleanup_loop(self): + """Periodic cleanup of stale peers""" + while self.running: + time.sleep(60) + self.peer_manager.cleanup_stale() + + def get_sync_status(self) -> Dict[str, Any]: + """Get synchronization status""" + peers = self.peer_manager.get_peers() + if not peers: + return { + "synced": False, + "best_peer_height": 0, + "connected_peers": 0, + } + + best_peer = max(peers, key=lambda p: p.best_block_height) + + return { + "synced": True, # TODO: Compare with local height + "best_peer_height": best_peer.best_block_height, + "connected_peers": len(peers), + "best_peer": best_peer.peer_id.to_string(), + } + + +# ============================================================================= +# Seed Nodes +# ============================================================================= + +SEED_NODES = [ + PeerId("seed1.rustchain.net", DEFAULT_PORT), + PeerId("seed2.rustchain.net", DEFAULT_PORT), + PeerId("seed3.rustchain.net", DEFAULT_PORT), +] + + +def bootstrap_network(manager: NetworkManager): + """Bootstrap network connections from seed nodes""" + for seed in SEED_NODES: + try: + manager.connect_to_peer(seed) + except Exception as e: + print(f"Failed to connect to seed {seed.to_string()}: {e}") + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN P2P NETWORKING TEST") + print("=" * 60) + + manager = NetworkManager( + listen_port=8085, + chain_id=2718, + validator_id="test_validator", + ) + + manager.start() + + # Simulate peer connection + peer_id = PeerId("192.168.1.100", 8085) + peer_info = PeerInfo( + peer_id=peer_id, + protocol_version=PROTOCOL_VERSION, + chain_id=2718, + best_block_height=100, + best_block_hash="abc123", + connected_at=int(time.time()), + last_seen=int(time.time()), + ) + + manager.peer_manager.add_peer(peer_info) + + status = manager.get_sync_status() + print(f"\nSync Status: {status}") + print(f"Connected Peers: {status['connected_peers']}") + + manager.stop() diff --git a/rips/rustchain-core/src/anti_spoof/mutating_challenge.py b/rips/rustchain-core/src/anti_spoof/mutating_challenge.py index 0cc64da0..c3b83219 100644 --- a/rips/rustchain-core/src/anti_spoof/mutating_challenge.py +++ b/rips/rustchain-core/src/anti_spoof/mutating_challenge.py @@ -1,578 +1,578 @@ -#!/usr/bin/env python3 -""" -RustChain Mutating Challenge System -=================================== - -Challenges randomly mutate each round, validated in round-robin by all nodes. -This makes pre-computation IMPOSSIBLE because: -1. Challenge parameters change unpredictably each block -2. Different validators challenge you with different mutations -3. You must respond in real-time with actual hardware -4. Mutation seeds are derived from previous block hash (unpredictable) - -Round-Robin Validation: -- Block N: Validator A challenges B, B challenges C, C challenges A -- Block N+1: Roles rotate, mutation parameters change -- Everyone validates everyone over time -- Consensus requires 2/3 agreement on hardware validity - -"The chain mutates. The emulator cannot adapt. Real hardware persists." -""" - -import hashlib -import secrets -import struct -import time -import json -from dataclasses import dataclass, field -from typing import List, Dict, Tuple, Optional -from enum import Enum, auto - -class MutationType(Enum): - """Types of challenge mutations""" - CACHE_STRIDE = auto() # Change cache access stride - MEMORY_PATTERN = auto() # Change memory access pattern - TIMING_WINDOW = auto() # Adjust expected timing window - PIPELINE_DEPTH = auto() # Change instruction pipeline test depth - THERMAL_RANGE = auto() # Adjust thermal sensor expectations - JITTER_THRESHOLD = auto() # Change jitter detection threshold - HASH_ROUNDS = auto() # Change cryptographic hash iterations - SERIAL_CHECK = auto() # Which serial to validate (OF, GPU, HD) - -@dataclass -class MutationParams: - """Parameters that mutate each challenge round""" - cache_stride: int = 64 # Bytes between cache accesses - cache_iterations: int = 256 # Number of cache probes - memory_pattern_seed: int = 0 # Seed for random memory access - memory_size_kb: int = 1024 # Size of memory test region - timing_min_ticks: int = 100 # Minimum expected response ticks - timing_max_ticks: int = 500000 # Maximum expected response ticks - pipeline_test_depth: int = 1000 # Instruction pipeline iterations - thermal_min_c: int = 15 # Minimum expected temperature - thermal_max_c: int = 85 # Maximum expected temperature - jitter_min_percent: int = 5 # Minimum jitter (emulator detection) - jitter_max_percent: int = 500 # Maximum jitter - hash_rounds: int = 1000 # SHA256 iterations for proof - serial_type: str = "openfirmware" # Which serial to check - - def to_bytes(self) -> bytes: - """Serialize for hashing""" - return struct.pack( - '>IIIIIIIIII', - self.cache_stride, - self.cache_iterations, - self.memory_pattern_seed, - self.memory_size_kb, - self.timing_min_ticks, - self.timing_max_ticks, - self.pipeline_test_depth, - self.jitter_min_percent, - self.jitter_max_percent, - self.hash_rounds - ) + self.serial_type.encode() - - def hash(self) -> str: - """Get deterministic hash of parameters""" - return hashlib.sha256(self.to_bytes()).hexdigest()[:16] - -class ChallengeMutator: - """ - Mutates challenge parameters based on blockchain state. - - Mutation is deterministic but unpredictable: - - Seed derived from previous block hash - - Parameters change in ways that stress different hardware aspects - - Emulators can't pre-compute because they don't know next block hash - """ - - # Mutation ranges (min, max) for each parameter - MUTATION_RANGES = { - 'cache_stride': (32, 512), # 32-512 byte strides - 'cache_iterations': (128, 1024), # Number of probes - 'memory_size_kb': (256, 8192), # 256KB to 8MB test region - 'pipeline_test_depth': (500, 5000), - 'hash_rounds': (500, 5000), - 'jitter_min_percent': (3, 10), # 0.3% to 1.0% minimum - } - - SERIAL_TYPES = ['openfirmware', 'gpu', 'storage', 'platform'] - - def __init__(self, genesis_seed: bytes = None): - """Initialize with genesis seed""" - self.genesis_seed = genesis_seed or secrets.token_bytes(32) - self.current_epoch = 0 - self.mutation_history: List[MutationParams] = [] - - def derive_seed(self, block_hash: bytes, validator_pubkey: str) -> bytes: - """ - Derive mutation seed from block hash and validator. - - This ensures: - - Different validators get different mutations - - Mutations are unpredictable until block is mined - - Mutations are deterministic (verifiable by all nodes) - """ - return hashlib.sha256( - self.genesis_seed + - block_hash + - validator_pubkey.encode() + - struct.pack('>Q', self.current_epoch) - ).digest() - - def mutate(self, block_hash: bytes, validator_pubkey: str) -> MutationParams: - """ - Generate mutated parameters for this block/validator pair. - - The mutation is deterministic - any node can verify it. - """ - seed = self.derive_seed(block_hash, validator_pubkey) - - # Use seed bytes to deterministically select parameters - params = MutationParams() - - # Each parameter gets different seed bytes - params.cache_stride = self._select_range( - seed[0:4], self.MUTATION_RANGES['cache_stride'] - ) - params.cache_iterations = self._select_range( - seed[4:8], self.MUTATION_RANGES['cache_iterations'] - ) - params.memory_pattern_seed = int.from_bytes(seed[8:12], 'big') - params.memory_size_kb = self._select_range( - seed[12:16], self.MUTATION_RANGES['memory_size_kb'] - ) - params.pipeline_test_depth = self._select_range( - seed[16:20], self.MUTATION_RANGES['pipeline_test_depth'] - ) - params.hash_rounds = self._select_range( - seed[20:24], self.MUTATION_RANGES['hash_rounds'] - ) - params.jitter_min_percent = self._select_range( - seed[24:28], self.MUTATION_RANGES['jitter_min_percent'] - ) - - # Select which serial to check this round - serial_idx = seed[28] % len(self.SERIAL_TYPES) - params.serial_type = self.SERIAL_TYPES[serial_idx] - - # Timing windows scale with test complexity - complexity = (params.cache_iterations * params.pipeline_test_depth) // 1000 - params.timing_min_ticks = 100 + complexity - params.timing_max_ticks = 500000 + complexity * 10 - - self.mutation_history.append(params) - return params - - def _select_range(self, seed_bytes: bytes, range_tuple: Tuple[int, int]) -> int: - """Select value in range using seed bytes""" - min_val, max_val = range_tuple - seed_int = int.from_bytes(seed_bytes, 'big') - return min_val + (seed_int % (max_val - min_val + 1)) - - def advance_epoch(self): - """Move to next epoch (e.g., every 100 blocks)""" - self.current_epoch += 1 - - -@dataclass -class RoundRobinState: - """Tracks round-robin challenge state""" - validators: List[str] # List of validator pubkeys - current_round: int = 0 # Current round number - challenges_this_round: Dict[str, str] = field(default_factory=dict) # challenger -> target - results_this_round: Dict[str, bool] = field(default_factory=dict) # target -> passed - - def get_challenge_pairs(self) -> List[Tuple[str, str]]: - """ - Get challenger->target pairs for this round. - - Round-robin ensures everyone challenges everyone over time. - Each validator challenges the next one in the rotated list. - """ - n = len(self.validators) - if n < 2: - return [] - - # Rotate list by round number - rotated = self.validators[self.current_round % n:] + \ - self.validators[:self.current_round % n] - - # Each validator challenges the next one - pairs = [] - for i in range(n): - challenger = rotated[i] - target = rotated[(i + 1) % n] - pairs.append((challenger, target)) - - return pairs - - def advance_round(self): - """Move to next round""" - self.current_round += 1 - self.challenges_this_round.clear() - self.results_this_round.clear() - - -@dataclass -class MutatingChallenge: - """A challenge with mutated parameters""" - challenge_id: str - block_height: int - block_hash: bytes - challenger: str - target: str - mutation_params: MutationParams - timestamp_ms: int - signature: bytes = b'' - - def to_dict(self) -> dict: - return { - 'challenge_id': self.challenge_id, - 'block_height': self.block_height, - 'block_hash': self.block_hash.hex(), - 'challenger': self.challenger, - 'target': self.target, - 'mutation_hash': self.mutation_params.hash(), - 'cache_stride': self.mutation_params.cache_stride, - 'cache_iterations': self.mutation_params.cache_iterations, - 'memory_pattern_seed': self.mutation_params.memory_pattern_seed, - 'memory_size_kb': self.mutation_params.memory_size_kb, - 'pipeline_depth': self.mutation_params.pipeline_test_depth, - 'hash_rounds': self.mutation_params.hash_rounds, - 'serial_type': self.mutation_params.serial_type, - 'timestamp_ms': self.timestamp_ms - } - - -@dataclass -class MutatingResponse: - """Response to a mutating challenge""" - challenge_id: str - responder: str - - # Hardware measurements using mutated parameters - cache_timing_ticks: int - memory_timing_ticks: int - pipeline_timing_ticks: int - jitter_variance: int - thermal_celsius: int - serial_value: str # Value of requested serial type - - # Proof of work with mutated hash rounds - proof_hash: bytes - - timestamp_ms: int - signature: bytes = b'' - - def compute_proof(self, challenge: MutatingChallenge, hardware_entropy: bytes) -> bytes: - """ - Compute proof hash using mutated parameters. - - This must be done in real-time with actual hardware entropy. - """ - data = ( - challenge.challenge_id.encode() + - hardware_entropy + - struct.pack('>Q', self.cache_timing_ticks) + - struct.pack('>Q', self.memory_timing_ticks) + - struct.pack('>Q', self.pipeline_timing_ticks) + - struct.pack('>I', self.jitter_variance) + - struct.pack('>i', self.thermal_celsius) + - self.serial_value.encode() - ) - - # Iterated hashing with mutated round count - result = data - for _ in range(challenge.mutation_params.hash_rounds): - result = hashlib.sha256(result).digest() - - return result - - -class MutatingChallengeNetwork: - """ - Full mutating challenge network with round-robin validation. - - Architecture: - 1. Each block triggers a new challenge round - 2. Challenge parameters mutate based on block hash - 3. Validators challenge each other in round-robin - 4. 2/3 consensus required to mark a validator as valid - 5. Failed validators lose rewards and eventually get slashed - """ - - CONSENSUS_THRESHOLD = 0.67 # 2/3 must agree - BLOCKS_PER_ROUND = 10 # Challenge every 10 blocks - MAX_FAILURES = 3 # Failures before slashing - - def __init__(self, validators: List[str], genesis_seed: bytes = None): - self.mutator = ChallengeMutator(genesis_seed) - self.round_robin = RoundRobinState(validators=validators) - self.validator_failures: Dict[str, int] = {v: 0 for v in validators} - self.validator_hardware: Dict[str, dict] = {} # Registered hardware profiles - self.pending_challenges: Dict[str, MutatingChallenge] = {} - - def register_hardware(self, validator: str, hardware_profile: dict): - """Register a validator's hardware profile""" - self.validator_hardware[validator] = hardware_profile - - def on_new_block(self, block_height: int, block_hash: bytes) -> List[MutatingChallenge]: - """ - Called when a new block is mined. - Returns challenges to be issued this block. - """ - # Only challenge every N blocks - if block_height % self.BLOCKS_PER_ROUND != 0: - return [] - - challenges = [] - pairs = self.round_robin.get_challenge_pairs() - - for challenger, target in pairs: - # Generate mutated parameters for this challenger/target/block - mutation = self.mutator.mutate(block_hash, target) - - challenge = MutatingChallenge( - challenge_id=f"{block_height}-{challenger[:8]}-{target[:8]}", - block_height=block_height, - block_hash=block_hash, - challenger=challenger, - target=target, - mutation_params=mutation, - timestamp_ms=int(time.time() * 1000) - ) - - self.pending_challenges[challenge.challenge_id] = challenge - self.round_robin.challenges_this_round[challenger] = target - challenges.append(challenge) - - return challenges - - def validate_response( - self, - response: MutatingResponse - ) -> Tuple[bool, float, List[str]]: - """ - Validate a response against its challenge. - - Returns: (valid, confidence_score, failure_reasons) - """ - challenge = self.pending_challenges.get(response.challenge_id) - if not challenge: - return False, 0.0, ["Unknown challenge ID"] - - params = challenge.mutation_params - failures = [] - confidence = 100.0 - - # 1. Check jitter (using mutated threshold) - min_jitter = params.jitter_min_percent - if response.jitter_variance < min_jitter: - failures.append( - f"Jitter too consistent ({response.jitter_variance/10:.1f}% < {min_jitter/10:.1f}%) " - f"- emulator detected" - ) - confidence -= 40.0 - - # 2. Check timing windows (using mutated ranges) - if response.cache_timing_ticks < params.timing_min_ticks: - failures.append(f"Cache timing too fast - possible speedhack") - confidence -= 25.0 - - # 3. Check thermal - if response.thermal_celsius < params.thermal_min_c or \ - response.thermal_celsius > params.thermal_max_c: - if response.thermal_celsius < 0: - failures.append("No thermal sensor - possible VM") - else: - failures.append(f"Thermal out of range ({response.thermal_celsius}C)") - confidence -= 15.0 - - # 4. Check serial (mutated serial type) - expected_hardware = self.validator_hardware.get(challenge.target, {}) - expected_serial = self._get_serial(expected_hardware, params.serial_type) - - if expected_serial and response.serial_value != expected_serial: - failures.append( - f"Serial mismatch for {params.serial_type}: " - f"got '{response.serial_value}', expected '{expected_serial}'" - ) - confidence -= 30.0 - elif not response.serial_value or response.serial_value == "UNKNOWN": - failures.append(f"Missing {params.serial_type} serial") - confidence -= 20.0 - - # 5. Verify proof hash (must have correct round count) - # In production, we'd recompute and verify - - valid = confidence >= 50.0 - - # Record result - self.round_robin.results_this_round[challenge.target] = valid - - # Update failure count - if not valid: - self.validator_failures[challenge.target] = \ - self.validator_failures.get(challenge.target, 0) + 1 - - return valid, confidence, failures - - def _get_serial(self, hardware: dict, serial_type: str) -> Optional[str]: - """Get serial value from hardware profile""" - if serial_type == 'openfirmware': - return hardware.get('openfirmware', {}).get('serial_number') - elif serial_type == 'gpu': - return hardware.get('gpu', {}).get('device_id') - elif serial_type == 'storage': - return hardware.get('storage', {}).get('serial') - elif serial_type == 'platform': - return hardware.get('cpu', {}).get('model') - return None - - def get_slashed_validators(self) -> List[str]: - """Return validators that should be slashed""" - return [ - v for v, failures in self.validator_failures.items() - if failures >= self.MAX_FAILURES - ] - - def end_round(self): - """End current challenge round and advance""" - self.round_robin.advance_round() - self.mutator.advance_epoch() - self.pending_challenges.clear() - - -def demo_mutating_challenges(): - """Demonstrate the mutating challenge system""" - - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ RUSTCHAIN MUTATING CHALLENGE SYSTEM - ROUND ROBIN DEMO ║ -║ ║ -║ "The chain mutates. The emulator cannot adapt." ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - # Setup network with 4 validators - validators = [ - "G4_MirrorDoor_125", - "G5_Dual_130", - "PowerBook_G4_115", - "MacPro_Trashcan_154" - ] - - network = MutatingChallengeNetwork(validators) - - # Register hardware profiles - network.register_hardware("G4_MirrorDoor_125", { - "cpu": {"model": "PowerMac3,6"}, - "openfirmware": {"serial_number": "G84243AZQ6P"}, - "gpu": {"device_id": "0x4966"}, - "storage": {"serial": "WD-WMAJ91385123"} - }) - network.register_hardware("G5_Dual_130", { - "cpu": {"model": "PowerMac7,3"}, - "openfirmware": {"serial_number": "G8435B2RQPR"}, - "gpu": {"device_id": "0x4152"}, - "storage": {"serial": "5QF5R18X"} - }) - network.register_hardware("PowerBook_G4_115", { - "cpu": {"model": "PowerBook6,8"}, - "openfirmware": {"serial_number": "4H509179RJ6"}, - "gpu": {"device_id": "0x0329"}, - "storage": {"serial": "MPB350X5G11H0C"} - }) - network.register_hardware("MacPro_Trashcan_154", { - "cpu": {"model": "MacPro6,1"}, - "openfirmware": {"serial_number": "TRASHCAN001"}, - "gpu": {"device_id": "0x6798"}, - "storage": {"serial": "S3T8NX0K"} - }) - - print(" Validators registered:") - for v in validators: - print(f" • {v}") - - # Simulate 3 blocks - for block_num in [10, 20, 30]: - block_hash = hashlib.sha256(f"block_{block_num}".encode()).digest() - - print(f"\n{'='*70}") - print(f" BLOCK {block_num} - Hash: {block_hash.hex()[:16]}...") - print(f"{'='*70}") - - challenges = network.on_new_block(block_num, block_hash) - - for challenge in challenges: - print(f"\n Challenge: {challenge.challenger[:15]} → {challenge.target[:15]}") - print(f" Mutation Hash: {challenge.mutation_params.hash()}") - print(f" Cache Stride: {challenge.mutation_params.cache_stride} bytes") - print(f" Cache Iterations: {challenge.mutation_params.cache_iterations}") - print(f" Memory Size: {challenge.mutation_params.memory_size_kb} KB") - print(f" Pipeline Depth: {challenge.mutation_params.pipeline_test_depth}") - print(f" Hash Rounds: {challenge.mutation_params.hash_rounds}") - print(f" Serial Check: {challenge.mutation_params.serial_type}") - - # Simulate response from real hardware - response = MutatingResponse( - challenge_id=challenge.challenge_id, - responder=challenge.target, - cache_timing_ticks=1500 + (block_num * 10), - memory_timing_ticks=45000 + (block_num * 100), - pipeline_timing_ticks=8000 + (block_num * 50), - jitter_variance=150 + (block_num % 50), # Natural variance - thermal_celsius=35 + (block_num % 20), - serial_value=network._get_serial( - network.validator_hardware[challenge.target], - challenge.mutation_params.serial_type - ) or "UNKNOWN", - proof_hash=b'', - timestamp_ms=int(time.time() * 1000) - ) - - valid, confidence, failures = network.validate_response(response) - - print(f"\n Response from {challenge.target[:15]}:") - print(f" Jitter: {response.jitter_variance/10:.1f}%") - print(f" Thermal: {response.thermal_celsius}°C") - print(f" Serial ({challenge.mutation_params.serial_type}): {response.serial_value}") - print(f" Valid: {'✓ YES' if valid else '✗ NO'} (Confidence: {confidence:.1f}%)") - - if failures: - for f in failures: - print(f" ⚠ {f}") - - network.end_round() - - print(f"\n{'='*70}") - print(" MUTATION ANALYSIS") - print(f"{'='*70}") - print(""" - Notice how parameters CHANGED each block: - • Cache stride varied from 32-512 bytes - • Hash rounds varied from 500-5000 - • Different serial types checked each round - - An emulator would need to: - 1. Predict the next block hash (IMPOSSIBLE) - 2. Pre-compute all possible mutations (INFEASIBLE) - 3. Have accurate timing for ALL parameter combinations (EXPENSIVE) - - Cost to build adaptive emulator: $100,000+ - Cost of real PowerMac G4: $30-50 - - RATIONAL CHOICE: BUY REAL HARDWARE -""") - - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ "The chain mutates. The emulator cannot adapt. ║ -║ Real hardware persists." ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - -if __name__ == "__main__": - demo_mutating_challenges() +#!/usr/bin/env python3 +""" +RustChain Mutating Challenge System +=================================== + +Challenges randomly mutate each round, validated in round-robin by all nodes. +This makes pre-computation IMPOSSIBLE because: +1. Challenge parameters change unpredictably each block +2. Different validators challenge you with different mutations +3. You must respond in real-time with actual hardware +4. Mutation seeds are derived from previous block hash (unpredictable) + +Round-Robin Validation: +- Block N: Validator A challenges B, B challenges C, C challenges A +- Block N+1: Roles rotate, mutation parameters change +- Everyone validates everyone over time +- Consensus requires 2/3 agreement on hardware validity + +"The chain mutates. The emulator cannot adapt. Real hardware persists." +""" + +import hashlib +import secrets +import struct +import time +import json +from dataclasses import dataclass, field +from typing import List, Dict, Tuple, Optional +from enum import Enum, auto + +class MutationType(Enum): + """Types of challenge mutations""" + CACHE_STRIDE = auto() # Change cache access stride + MEMORY_PATTERN = auto() # Change memory access pattern + TIMING_WINDOW = auto() # Adjust expected timing window + PIPELINE_DEPTH = auto() # Change instruction pipeline test depth + THERMAL_RANGE = auto() # Adjust thermal sensor expectations + JITTER_THRESHOLD = auto() # Change jitter detection threshold + HASH_ROUNDS = auto() # Change cryptographic hash iterations + SERIAL_CHECK = auto() # Which serial to validate (OF, GPU, HD) + +@dataclass +class MutationParams: + """Parameters that mutate each challenge round""" + cache_stride: int = 64 # Bytes between cache accesses + cache_iterations: int = 256 # Number of cache probes + memory_pattern_seed: int = 0 # Seed for random memory access + memory_size_kb: int = 1024 # Size of memory test region + timing_min_ticks: int = 100 # Minimum expected response ticks + timing_max_ticks: int = 500000 # Maximum expected response ticks + pipeline_test_depth: int = 1000 # Instruction pipeline iterations + thermal_min_c: int = 15 # Minimum expected temperature + thermal_max_c: int = 85 # Maximum expected temperature + jitter_min_percent: int = 5 # Minimum jitter (emulator detection) + jitter_max_percent: int = 500 # Maximum jitter + hash_rounds: int = 1000 # SHA256 iterations for proof + serial_type: str = "openfirmware" # Which serial to check + + def to_bytes(self) -> bytes: + """Serialize for hashing""" + return struct.pack( + '>IIIIIIIIII', + self.cache_stride, + self.cache_iterations, + self.memory_pattern_seed, + self.memory_size_kb, + self.timing_min_ticks, + self.timing_max_ticks, + self.pipeline_test_depth, + self.jitter_min_percent, + self.jitter_max_percent, + self.hash_rounds + ) + self.serial_type.encode() + + def hash(self) -> str: + """Get deterministic hash of parameters""" + return hashlib.sha256(self.to_bytes()).hexdigest()[:16] + +class ChallengeMutator: + """ + Mutates challenge parameters based on blockchain state. + + Mutation is deterministic but unpredictable: + - Seed derived from previous block hash + - Parameters change in ways that stress different hardware aspects + - Emulators can't pre-compute because they don't know next block hash + """ + + # Mutation ranges (min, max) for each parameter + MUTATION_RANGES = { + 'cache_stride': (32, 512), # 32-512 byte strides + 'cache_iterations': (128, 1024), # Number of probes + 'memory_size_kb': (256, 8192), # 256KB to 8MB test region + 'pipeline_test_depth': (500, 5000), + 'hash_rounds': (500, 5000), + 'jitter_min_percent': (3, 10), # 0.3% to 1.0% minimum + } + + SERIAL_TYPES = ['openfirmware', 'gpu', 'storage', 'platform'] + + def __init__(self, genesis_seed: bytes = None): + """Initialize with genesis seed""" + self.genesis_seed = genesis_seed or secrets.token_bytes(32) + self.current_epoch = 0 + self.mutation_history: List[MutationParams] = [] + + def derive_seed(self, block_hash: bytes, validator_pubkey: str) -> bytes: + """ + Derive mutation seed from block hash and validator. + + This ensures: + - Different validators get different mutations + - Mutations are unpredictable until block is mined + - Mutations are deterministic (verifiable by all nodes) + """ + return hashlib.sha256( + self.genesis_seed + + block_hash + + validator_pubkey.encode() + + struct.pack('>Q', self.current_epoch) + ).digest() + + def mutate(self, block_hash: bytes, validator_pubkey: str) -> MutationParams: + """ + Generate mutated parameters for this block/validator pair. + + The mutation is deterministic - any node can verify it. + """ + seed = self.derive_seed(block_hash, validator_pubkey) + + # Use seed bytes to deterministically select parameters + params = MutationParams() + + # Each parameter gets different seed bytes + params.cache_stride = self._select_range( + seed[0:4], self.MUTATION_RANGES['cache_stride'] + ) + params.cache_iterations = self._select_range( + seed[4:8], self.MUTATION_RANGES['cache_iterations'] + ) + params.memory_pattern_seed = int.from_bytes(seed[8:12], 'big') + params.memory_size_kb = self._select_range( + seed[12:16], self.MUTATION_RANGES['memory_size_kb'] + ) + params.pipeline_test_depth = self._select_range( + seed[16:20], self.MUTATION_RANGES['pipeline_test_depth'] + ) + params.hash_rounds = self._select_range( + seed[20:24], self.MUTATION_RANGES['hash_rounds'] + ) + params.jitter_min_percent = self._select_range( + seed[24:28], self.MUTATION_RANGES['jitter_min_percent'] + ) + + # Select which serial to check this round + serial_idx = seed[28] % len(self.SERIAL_TYPES) + params.serial_type = self.SERIAL_TYPES[serial_idx] + + # Timing windows scale with test complexity + complexity = (params.cache_iterations * params.pipeline_test_depth) // 1000 + params.timing_min_ticks = 100 + complexity + params.timing_max_ticks = 500000 + complexity * 10 + + self.mutation_history.append(params) + return params + + def _select_range(self, seed_bytes: bytes, range_tuple: Tuple[int, int]) -> int: + """Select value in range using seed bytes""" + min_val, max_val = range_tuple + seed_int = int.from_bytes(seed_bytes, 'big') + return min_val + (seed_int % (max_val - min_val + 1)) + + def advance_epoch(self): + """Move to next epoch (e.g., every 100 blocks)""" + self.current_epoch += 1 + + +@dataclass +class RoundRobinState: + """Tracks round-robin challenge state""" + validators: List[str] # List of validator pubkeys + current_round: int = 0 # Current round number + challenges_this_round: Dict[str, str] = field(default_factory=dict) # challenger -> target + results_this_round: Dict[str, bool] = field(default_factory=dict) # target -> passed + + def get_challenge_pairs(self) -> List[Tuple[str, str]]: + """ + Get challenger->target pairs for this round. + + Round-robin ensures everyone challenges everyone over time. + Each validator challenges the next one in the rotated list. + """ + n = len(self.validators) + if n < 2: + return [] + + # Rotate list by round number + rotated = self.validators[self.current_round % n:] + \ + self.validators[:self.current_round % n] + + # Each validator challenges the next one + pairs = [] + for i in range(n): + challenger = rotated[i] + target = rotated[(i + 1) % n] + pairs.append((challenger, target)) + + return pairs + + def advance_round(self): + """Move to next round""" + self.current_round += 1 + self.challenges_this_round.clear() + self.results_this_round.clear() + + +@dataclass +class MutatingChallenge: + """A challenge with mutated parameters""" + challenge_id: str + block_height: int + block_hash: bytes + challenger: str + target: str + mutation_params: MutationParams + timestamp_ms: int + signature: bytes = b'' + + def to_dict(self) -> dict: + return { + 'challenge_id': self.challenge_id, + 'block_height': self.block_height, + 'block_hash': self.block_hash.hex(), + 'challenger': self.challenger, + 'target': self.target, + 'mutation_hash': self.mutation_params.hash(), + 'cache_stride': self.mutation_params.cache_stride, + 'cache_iterations': self.mutation_params.cache_iterations, + 'memory_pattern_seed': self.mutation_params.memory_pattern_seed, + 'memory_size_kb': self.mutation_params.memory_size_kb, + 'pipeline_depth': self.mutation_params.pipeline_test_depth, + 'hash_rounds': self.mutation_params.hash_rounds, + 'serial_type': self.mutation_params.serial_type, + 'timestamp_ms': self.timestamp_ms + } + + +@dataclass +class MutatingResponse: + """Response to a mutating challenge""" + challenge_id: str + responder: str + + # Hardware measurements using mutated parameters + cache_timing_ticks: int + memory_timing_ticks: int + pipeline_timing_ticks: int + jitter_variance: int + thermal_celsius: int + serial_value: str # Value of requested serial type + + # Proof of work with mutated hash rounds + proof_hash: bytes + + timestamp_ms: int + signature: bytes = b'' + + def compute_proof(self, challenge: MutatingChallenge, hardware_entropy: bytes) -> bytes: + """ + Compute proof hash using mutated parameters. + + This must be done in real-time with actual hardware entropy. + """ + data = ( + challenge.challenge_id.encode() + + hardware_entropy + + struct.pack('>Q', self.cache_timing_ticks) + + struct.pack('>Q', self.memory_timing_ticks) + + struct.pack('>Q', self.pipeline_timing_ticks) + + struct.pack('>I', self.jitter_variance) + + struct.pack('>i', self.thermal_celsius) + + self.serial_value.encode() + ) + + # Iterated hashing with mutated round count + result = data + for _ in range(challenge.mutation_params.hash_rounds): + result = hashlib.sha256(result).digest() + + return result + + +class MutatingChallengeNetwork: + """ + Full mutating challenge network with round-robin validation. + + Architecture: + 1. Each block triggers a new challenge round + 2. Challenge parameters mutate based on block hash + 3. Validators challenge each other in round-robin + 4. 2/3 consensus required to mark a validator as valid + 5. Failed validators lose rewards and eventually get slashed + """ + + CONSENSUS_THRESHOLD = 0.67 # 2/3 must agree + BLOCKS_PER_ROUND = 10 # Challenge every 10 blocks + MAX_FAILURES = 3 # Failures before slashing + + def __init__(self, validators: List[str], genesis_seed: bytes = None): + self.mutator = ChallengeMutator(genesis_seed) + self.round_robin = RoundRobinState(validators=validators) + self.validator_failures: Dict[str, int] = {v: 0 for v in validators} + self.validator_hardware: Dict[str, dict] = {} # Registered hardware profiles + self.pending_challenges: Dict[str, MutatingChallenge] = {} + + def register_hardware(self, validator: str, hardware_profile: dict): + """Register a validator's hardware profile""" + self.validator_hardware[validator] = hardware_profile + + def on_new_block(self, block_height: int, block_hash: bytes) -> List[MutatingChallenge]: + """ + Called when a new block is mined. + Returns challenges to be issued this block. + """ + # Only challenge every N blocks + if block_height % self.BLOCKS_PER_ROUND != 0: + return [] + + challenges = [] + pairs = self.round_robin.get_challenge_pairs() + + for challenger, target in pairs: + # Generate mutated parameters for this challenger/target/block + mutation = self.mutator.mutate(block_hash, target) + + challenge = MutatingChallenge( + challenge_id=f"{block_height}-{challenger[:8]}-{target[:8]}", + block_height=block_height, + block_hash=block_hash, + challenger=challenger, + target=target, + mutation_params=mutation, + timestamp_ms=int(time.time() * 1000) + ) + + self.pending_challenges[challenge.challenge_id] = challenge + self.round_robin.challenges_this_round[challenger] = target + challenges.append(challenge) + + return challenges + + def validate_response( + self, + response: MutatingResponse + ) -> Tuple[bool, float, List[str]]: + """ + Validate a response against its challenge. + + Returns: (valid, confidence_score, failure_reasons) + """ + challenge = self.pending_challenges.get(response.challenge_id) + if not challenge: + return False, 0.0, ["Unknown challenge ID"] + + params = challenge.mutation_params + failures = [] + confidence = 100.0 + + # 1. Check jitter (using mutated threshold) + min_jitter = params.jitter_min_percent + if response.jitter_variance < min_jitter: + failures.append( + f"Jitter too consistent ({response.jitter_variance/10:.1f}% < {min_jitter/10:.1f}%) " + f"- emulator detected" + ) + confidence -= 40.0 + + # 2. Check timing windows (using mutated ranges) + if response.cache_timing_ticks < params.timing_min_ticks: + failures.append(f"Cache timing too fast - possible speedhack") + confidence -= 25.0 + + # 3. Check thermal + if response.thermal_celsius < params.thermal_min_c or \ + response.thermal_celsius > params.thermal_max_c: + if response.thermal_celsius < 0: + failures.append("No thermal sensor - possible VM") + else: + failures.append(f"Thermal out of range ({response.thermal_celsius}C)") + confidence -= 15.0 + + # 4. Check serial (mutated serial type) + expected_hardware = self.validator_hardware.get(challenge.target, {}) + expected_serial = self._get_serial(expected_hardware, params.serial_type) + + if expected_serial and response.serial_value != expected_serial: + failures.append( + f"Serial mismatch for {params.serial_type}: " + f"got '{response.serial_value}', expected '{expected_serial}'" + ) + confidence -= 30.0 + elif not response.serial_value or response.serial_value == "UNKNOWN": + failures.append(f"Missing {params.serial_type} serial") + confidence -= 20.0 + + # 5. Verify proof hash (must have correct round count) + # In production, we'd recompute and verify + + valid = confidence >= 50.0 + + # Record result + self.round_robin.results_this_round[challenge.target] = valid + + # Update failure count + if not valid: + self.validator_failures[challenge.target] = \ + self.validator_failures.get(challenge.target, 0) + 1 + + return valid, confidence, failures + + def _get_serial(self, hardware: dict, serial_type: str) -> Optional[str]: + """Get serial value from hardware profile""" + if serial_type == 'openfirmware': + return hardware.get('openfirmware', {}).get('serial_number') + elif serial_type == 'gpu': + return hardware.get('gpu', {}).get('device_id') + elif serial_type == 'storage': + return hardware.get('storage', {}).get('serial') + elif serial_type == 'platform': + return hardware.get('cpu', {}).get('model') + return None + + def get_slashed_validators(self) -> List[str]: + """Return validators that should be slashed""" + return [ + v for v, failures in self.validator_failures.items() + if failures >= self.MAX_FAILURES + ] + + def end_round(self): + """End current challenge round and advance""" + self.round_robin.advance_round() + self.mutator.advance_epoch() + self.pending_challenges.clear() + + +def demo_mutating_challenges(): + """Demonstrate the mutating challenge system""" + + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ RUSTCHAIN MUTATING CHALLENGE SYSTEM - ROUND ROBIN DEMO ║ +║ ║ +║ "The chain mutates. The emulator cannot adapt." ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + # Setup network with 4 validators + validators = [ + "G4_MirrorDoor_125", + "G5_Dual_130", + "PowerBook_G4_115", + "MacPro_Trashcan_154" + ] + + network = MutatingChallengeNetwork(validators) + + # Register hardware profiles + network.register_hardware("G4_MirrorDoor_125", { + "cpu": {"model": "PowerMac3,6"}, + "openfirmware": {"serial_number": "G84243AZQ6P"}, + "gpu": {"device_id": "0x4966"}, + "storage": {"serial": "WD-WMAJ91385123"} + }) + network.register_hardware("G5_Dual_130", { + "cpu": {"model": "PowerMac7,3"}, + "openfirmware": {"serial_number": "G8435B2RQPR"}, + "gpu": {"device_id": "0x4152"}, + "storage": {"serial": "5QF5R18X"} + }) + network.register_hardware("PowerBook_G4_115", { + "cpu": {"model": "PowerBook6,8"}, + "openfirmware": {"serial_number": "4H509179RJ6"}, + "gpu": {"device_id": "0x0329"}, + "storage": {"serial": "MPB350X5G11H0C"} + }) + network.register_hardware("MacPro_Trashcan_154", { + "cpu": {"model": "MacPro6,1"}, + "openfirmware": {"serial_number": "TRASHCAN001"}, + "gpu": {"device_id": "0x6798"}, + "storage": {"serial": "S3T8NX0K"} + }) + + print(" Validators registered:") + for v in validators: + print(f" • {v}") + + # Simulate 3 blocks + for block_num in [10, 20, 30]: + block_hash = hashlib.sha256(f"block_{block_num}".encode()).digest() + + print(f"\n{'='*70}") + print(f" BLOCK {block_num} - Hash: {block_hash.hex()[:16]}...") + print(f"{'='*70}") + + challenges = network.on_new_block(block_num, block_hash) + + for challenge in challenges: + print(f"\n Challenge: {challenge.challenger[:15]} → {challenge.target[:15]}") + print(f" Mutation Hash: {challenge.mutation_params.hash()}") + print(f" Cache Stride: {challenge.mutation_params.cache_stride} bytes") + print(f" Cache Iterations: {challenge.mutation_params.cache_iterations}") + print(f" Memory Size: {challenge.mutation_params.memory_size_kb} KB") + print(f" Pipeline Depth: {challenge.mutation_params.pipeline_test_depth}") + print(f" Hash Rounds: {challenge.mutation_params.hash_rounds}") + print(f" Serial Check: {challenge.mutation_params.serial_type}") + + # Simulate response from real hardware + response = MutatingResponse( + challenge_id=challenge.challenge_id, + responder=challenge.target, + cache_timing_ticks=1500 + (block_num * 10), + memory_timing_ticks=45000 + (block_num * 100), + pipeline_timing_ticks=8000 + (block_num * 50), + jitter_variance=150 + (block_num % 50), # Natural variance + thermal_celsius=35 + (block_num % 20), + serial_value=network._get_serial( + network.validator_hardware[challenge.target], + challenge.mutation_params.serial_type + ) or "UNKNOWN", + proof_hash=b'', + timestamp_ms=int(time.time() * 1000) + ) + + valid, confidence, failures = network.validate_response(response) + + print(f"\n Response from {challenge.target[:15]}:") + print(f" Jitter: {response.jitter_variance/10:.1f}%") + print(f" Thermal: {response.thermal_celsius}°C") + print(f" Serial ({challenge.mutation_params.serial_type}): {response.serial_value}") + print(f" Valid: {'✓ YES' if valid else '✗ NO'} (Confidence: {confidence:.1f}%)") + + if failures: + for f in failures: + print(f" ⚠ {f}") + + network.end_round() + + print(f"\n{'='*70}") + print(" MUTATION ANALYSIS") + print(f"{'='*70}") + print(""" + Notice how parameters CHANGED each block: + • Cache stride varied from 32-512 bytes + • Hash rounds varied from 500-5000 + • Different serial types checked each round + + An emulator would need to: + 1. Predict the next block hash (IMPOSSIBLE) + 2. Pre-compute all possible mutations (INFEASIBLE) + 3. Have accurate timing for ALL parameter combinations (EXPENSIVE) + + Cost to build adaptive emulator: $100,000+ + Cost of real PowerMac G4: $30-50 + + RATIONAL CHOICE: BUY REAL HARDWARE +""") + + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ "The chain mutates. The emulator cannot adapt. ║ +║ Real hardware persists." ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + +if __name__ == "__main__": + demo_mutating_challenges() diff --git a/rips/rustchain-core/src/anti_spoof/network_challenge.py b/rips/rustchain-core/src/anti_spoof/network_challenge.py index d857c296..42d3a4ac 100644 --- a/rips/rustchain-core/src/anti_spoof/network_challenge.py +++ b/rips/rustchain-core/src/anti_spoof/network_challenge.py @@ -1,635 +1,635 @@ -#!/usr/bin/env python3 -""" -RustChain Network Challenge Protocol -==================================== - -Validators challenge each other to prove they're running on real vintage hardware. -Each challenge is: -1. Time-bound (must respond within hardware-accurate window) -2. Hardware-specific (requires real cache timing, thermal sensors, etc.) -3. Cryptographically signed (can't replay or forge responses) - -The economic argument: -- Developing an accurate PowerPC emulator: $50,000+ in engineering time -- Buying a working PowerMac G4: $30-50 on eBay -- Rational choice: BUY REAL HARDWARE - -This is the "Proof of Antiquity" anti-spoofing layer. -""" - -import hashlib -import hmac -import json -import os -import secrets -import struct -import time -from dataclasses import dataclass, asdict -from typing import Optional, Dict, List, Tuple -from enum import Enum - -class ChallengeType(Enum): - FULL = 0x00 # All hardware tests - TIMEBASE = 0x01 # PowerPC timebase only - CACHE = 0x02 # L1/L2 cache timing - MEMORY = 0x03 # Memory access patterns - THERMAL = 0x04 # Thermal sensors - SERIAL = 0x05 # Hardware serials - PIPELINE = 0x06 # Instruction pipeline timing - -class HardwareTier(Enum): - ANCIENT = ("ancient", 30, 3.5) # 30+ years, 3.5x multiplier - SACRED = ("sacred", 25, 3.0) # 25-29 years - VINTAGE = ("vintage", 20, 2.5) # 20-24 years (PowerPC G3/G4) - CLASSIC = ("classic", 15, 2.0) # 15-19 years - RETRO = ("retro", 10, 1.5) # 10-14 years (Mac Pro Trashcan) - MODERN = ("modern", 5, 1.0) # 5-9 years - RECENT = ("recent", 0, 0.5) # 0-4 years (minimal reward) - -@dataclass -class Challenge: - """A cryptographic challenge sent to a validator""" - challenge_id: str - challenge_type: int - nonce: bytes # 32 bytes of randomness - timestamp: int # Unix timestamp in milliseconds - timeout_ms: int # Response must arrive within this window - expected_hardware: Dict # Expected hardware profile (from registration) - challenger_pubkey: str # Who issued this challenge - signature: bytes # Challenger's signature - - def to_bytes(self) -> bytes: - """Serialize for signing/verification""" - return ( - self.challenge_id.encode() + - struct.pack('>B', self.challenge_type) + - self.nonce + - struct.pack('>Q', self.timestamp) + - struct.pack('>I', self.timeout_ms) + - json.dumps(self.expected_hardware, sort_keys=True).encode() + - self.challenger_pubkey.encode() - ) - - def hash(self) -> bytes: - """SHA256 hash of challenge""" - return hashlib.sha256(self.to_bytes()).digest() - -@dataclass -class ChallengeResponse: - """Response to a challenge, proving real hardware""" - challenge_id: str - response_timestamp: int - timebase_value: int # PowerPC timebase register value - cache_l1_ticks: int - cache_l2_ticks: int - cache_ratio: float # L2/L1 - must be realistic (1.5-20x) - memory_ticks: int - thermal_celsius: int - hardware_serial: str - jitter_variance: int # Natural timing variance (emulators are too consistent) - pipeline_cycles: int - response_hash: bytes # Hash of all response data - responder_pubkey: str - signature: bytes - - def to_bytes(self) -> bytes: - """Serialize for signing/verification""" - return ( - self.challenge_id.encode() + - struct.pack('>Q', self.response_timestamp) + - struct.pack('>Q', self.timebase_value) + - struct.pack('>I', self.cache_l1_ticks) + - struct.pack('>I', self.cache_l2_ticks) + - struct.pack('>f', self.cache_ratio) + - struct.pack('>I', self.memory_ticks) + - struct.pack('>i', self.thermal_celsius) + - self.hardware_serial.encode() + - struct.pack('>I', self.jitter_variance) + - struct.pack('>I', self.pipeline_cycles) + - self.responder_pubkey.encode() - ) - - def hash(self) -> bytes: - """SHA256 hash of response""" - return hashlib.sha256(self.to_bytes()).digest() - -@dataclass -class ValidationResult: - """Result of validating a challenge response""" - valid: bool - confidence_score: float # 0-100% - timing_ok: bool - jitter_ok: bool - cache_ok: bool - thermal_ok: bool - serial_ok: bool - failure_reasons: List[str] - -class AntiSpoofValidator: - """ - Validates challenge responses to detect emulators. - - Detection methods: - 1. Timing window - Response must arrive in hardware-accurate time - 2. Jitter analysis - Real hardware has natural variance, emulators don't - 3. Cache ratio - L2/L1 ratio must match real cache hierarchy - 4. Thermal presence - Real hardware has thermal sensors - 5. Serial validation - Hardware serials must match registered profile - """ - - # Timing thresholds (in milliseconds) - MIN_RESPONSE_TIME_MS = 10 # Too fast = time manipulation - MAX_RESPONSE_TIME_MS = 30000 # Too slow = emulator overhead - - # Jitter thresholds (variance * 1000) - MIN_JITTER = 5 # 0.5% minimum variance (emulators are too consistent) - MAX_JITTER = 500 # 50% maximum variance (too much = something wrong) - - # Cache ratio thresholds - MIN_CACHE_RATIO = 1.5 # L2 should be at least 1.5x slower than L1 - MAX_CACHE_RATIO = 20.0 # But not absurdly different - - # Confidence thresholds - PENALTY_TIMING = 30.0 - PENALTY_JITTER = 40.0 - PENALTY_CACHE = 25.0 - PENALTY_THERMAL = 15.0 - PENALTY_SERIAL = 20.0 - - def __init__(self, known_hardware_profiles: Dict[str, Dict] = None): - """ - Initialize with known hardware profiles. - - known_hardware_profiles: Map of hardware_serial -> expected profile - """ - self.known_profiles = known_hardware_profiles or {} - self.challenge_history: Dict[str, Challenge] = {} - - def generate_challenge( - self, - target_pubkey: str, - expected_hardware: Dict, - challenger_privkey: bytes, # For signing - challenge_type: ChallengeType = ChallengeType.FULL - ) -> Challenge: - """Generate a new challenge for a validator""" - - challenge = Challenge( - challenge_id=secrets.token_hex(16), - challenge_type=challenge_type.value, - nonce=secrets.token_bytes(32), - timestamp=int(time.time() * 1000), - timeout_ms=self._get_timeout_for_hardware(expected_hardware), - expected_hardware=expected_hardware, - challenger_pubkey=hashlib.sha256(challenger_privkey).hexdigest()[:40], - signature=b'' # Will be filled - ) - - # Sign the challenge - challenge.signature = hmac.new( - challenger_privkey, - challenge.to_bytes(), - hashlib.sha256 - ).digest() - - # Store for later validation - self.challenge_history[challenge.challenge_id] = challenge - - return challenge - - def _get_timeout_for_hardware(self, hardware: Dict) -> int: - """Calculate appropriate timeout based on hardware age""" - tier = hardware.get('tier', 'modern') - - timeouts = { - 'ancient': 60000, # 60s for ancient hardware - 'sacred': 45000, - 'vintage': 30000, # 30s for vintage (G4) - 'classic': 20000, - 'retro': 15000, - 'modern': 10000, - 'recent': 5000 - } - return timeouts.get(tier, 30000) - - def validate_response( - self, - challenge: Challenge, - response: ChallengeResponse - ) -> ValidationResult: - """ - Validate a challenge response. - - Returns ValidationResult with confidence score and failure reasons. - """ - failures = [] - confidence = 100.0 - - # 1. Check timing window - response_time = response.response_timestamp - challenge.timestamp - timing_ok = self._check_timing(response_time, challenge.timeout_ms, failures) - if not timing_ok: - confidence -= self.PENALTY_TIMING - - # 2. Check jitter (emulator detection) - jitter_ok = self._check_jitter(response.jitter_variance, failures) - if not jitter_ok: - confidence -= self.PENALTY_JITTER - - # 3. Check cache ratio - cache_ok = self._check_cache_ratio( - response.cache_l1_ticks, - response.cache_l2_ticks, - response.cache_ratio, - failures - ) - if not cache_ok: - confidence -= self.PENALTY_CACHE - - # 4. Check thermal sensor - thermal_ok = self._check_thermal(response.thermal_celsius, failures) - if not thermal_ok: - confidence -= self.PENALTY_THERMAL - - # 5. Check hardware serial - serial_ok = self._check_serial( - response.hardware_serial, - challenge.expected_hardware, - failures - ) - if not serial_ok: - confidence -= self.PENALTY_SERIAL - - # 6. Verify response hash - computed_hash = response.hash() - if computed_hash != response.response_hash: - failures.append("Response hash mismatch - tampered data") - confidence -= 50.0 - - # Final determination - valid = confidence >= 50.0 - - return ValidationResult( - valid=valid, - confidence_score=max(0, confidence), - timing_ok=timing_ok, - jitter_ok=jitter_ok, - cache_ok=cache_ok, - thermal_ok=thermal_ok, - serial_ok=serial_ok, - failure_reasons=failures - ) - - def _check_timing( - self, - response_time_ms: int, - timeout_ms: int, - failures: List[str] - ) -> bool: - """Check if response timing is realistic""" - - if response_time_ms < self.MIN_RESPONSE_TIME_MS: - failures.append( - f"Response too fast ({response_time_ms}ms < {self.MIN_RESPONSE_TIME_MS}ms) " - f"- possible time manipulation" - ) - return False - - if response_time_ms > timeout_ms: - failures.append( - f"Response timed out ({response_time_ms}ms > {timeout_ms}ms) " - f"- possible emulator overhead" - ) - return False - - return True - - def _check_jitter(self, jitter: int, failures: List[str]) -> bool: - """ - Check timing jitter. - - Real hardware has natural variance due to: - - Thermal throttling - - Other processes - - Memory bus contention - - Cache state variations - - Emulators are unnaturally consistent. - """ - if jitter < self.MIN_JITTER: - failures.append( - f"Timing too consistent (jitter={jitter/10:.1f}%) " - f"- emulator detected (real hardware has natural variance)" - ) - return False - - if jitter > self.MAX_JITTER: - failures.append( - f"Timing too erratic (jitter={jitter/10:.1f}%) " - f"- unstable system or manipulation" - ) - return False - - return True - - def _check_cache_ratio( - self, - l1_ticks: int, - l2_ticks: int, - ratio: float, - failures: List[str] - ) -> bool: - """ - Check L1/L2 cache timing ratio. - - Real cache hierarchies have predictable timing relationships: - - L1: ~1-3 cycles - - L2: ~10-20 cycles - - L3: ~30-50 cycles - - RAM: ~100-300 cycles - - Emulators often don't model this correctly. - """ - if l1_ticks == 0 or l2_ticks == 0: - failures.append("Missing cache timing data - possible emulator") - return False - - if ratio < self.MIN_CACHE_RATIO: - failures.append( - f"L2/L1 cache ratio too low ({ratio:.2f}x < {self.MIN_CACHE_RATIO}x) " - f"- emulated cache doesn't match real hardware" - ) - return False - - if ratio > self.MAX_CACHE_RATIO: - failures.append( - f"L2/L1 cache ratio too high ({ratio:.2f}x > {self.MAX_CACHE_RATIO}x) " - f"- abnormal cache behavior" - ) - return False - - return True - - def _check_thermal(self, celsius: int, failures: List[str]) -> bool: - """ - Check thermal sensor reading. - - Real hardware has thermal sensors. VMs/emulators usually don't. - """ - if celsius < 0: - failures.append( - "No thermal sensor detected - possible VM/emulator" - ) - return False - - if celsius < 10 or celsius > 95: - failures.append( - f"Unrealistic thermal reading ({celsius}C) " - f"- should be 10-95C for operating hardware" - ) - return False - - return True - - def _check_serial( - self, - serial: str, - expected: Dict, - failures: List[str] - ) -> bool: - """ - Check hardware serial number. - - Must match registered hardware profile. - """ - if not serial or serial == "UNKNOWN" or len(serial) < 5: - failures.append( - "Missing or invalid hardware serial - generic VM detected" - ) - return False - - expected_serial = expected.get('openfirmware', {}).get('serial_number', '') - if expected_serial and serial != expected_serial: - failures.append( - f"Hardware serial mismatch (got '{serial}', expected '{expected_serial}') " - f"- hardware changed or spoofed" - ) - return False - - return True - - -class NetworkChallengeProtocol: - """ - Network protocol for mutual validator challenges. - - Validators periodically challenge each other to prove: - 1. They're running on real hardware (not emulators) - 2. The hardware matches their registered profile - 3. The hardware is operating correctly - - Failed challenges result in: - - Reduced block rewards - - Eventual slashing/removal from validator set - - Loss of antiquity bonuses - """ - - CHALLENGE_INTERVAL_BLOCKS = 100 # Challenge every 100 blocks - MAX_FAILURES_BEFORE_SLASH = 3 # 3 failures = slashed - FAILURE_PENALTY_PERCENT = 10 # 10% reward penalty per failure - - def __init__(self, validator_pubkey: str, hardware_profile: Dict): - self.pubkey = validator_pubkey - self.hardware = hardware_profile - self.validator = AntiSpoofValidator() - self.pending_challenges: Dict[str, Challenge] = {} - self.failure_count = 0 - - def should_challenge(self, block_height: int, target_pubkey: str) -> bool: - """Determine if we should challenge another validator this block""" - # Hash-based selection to ensure fairness - selection_hash = hashlib.sha256( - f"{block_height}:{self.pubkey}:{target_pubkey}".encode() - ).digest() - - # Challenge if first byte < threshold - threshold = 256 // (self.CHALLENGE_INTERVAL_BLOCKS // 10) - return selection_hash[0] < threshold - - def create_challenge(self, target_pubkey: str, target_hardware: Dict) -> Challenge: - """Create a challenge for another validator""" - # Use pubkey as signing key for demo (use real keys in production) - privkey = hashlib.sha256(self.pubkey.encode()).digest() - - challenge = self.validator.generate_challenge( - target_pubkey=target_pubkey, - expected_hardware=target_hardware, - challenger_privkey=privkey, - challenge_type=ChallengeType.FULL - ) - - self.pending_challenges[challenge.challenge_id] = challenge - return challenge - - def handle_response(self, response: ChallengeResponse) -> ValidationResult: - """Handle a response to one of our challenges""" - challenge = self.pending_challenges.get(response.challenge_id) - if not challenge: - return ValidationResult( - valid=False, - confidence_score=0, - timing_ok=False, - jitter_ok=False, - cache_ok=False, - thermal_ok=False, - serial_ok=False, - failure_reasons=["Unknown challenge ID"] - ) - - result = self.validator.validate_response(challenge, response) - - # Clean up - del self.pending_challenges[response.challenge_id] - - return result - - def calculate_reward_penalty(self, failures: int) -> float: - """Calculate reward penalty based on failure count""" - if failures >= self.MAX_FAILURES_BEFORE_SLASH: - return 1.0 # 100% penalty (slashed) - return failures * (self.FAILURE_PENALTY_PERCENT / 100.0) - - -def print_economic_analysis(): - """Print the economic argument for why spoofing is irrational""" - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ RUSTCHAIN PROOF OF ANTIQUITY - ECONOMIC ANALYSIS ║ -╚══════════════════════════════════════════════════════════════════════╝ - - Why spoofing is economically irrational: - - EMULATOR DEVELOPMENT COSTS: - ───────────────────────────────────────────────────────────────────── - • Accurate PowerPC timing model: $20,000+ (6+ months dev time) - • Cache hierarchy simulation: $10,000+ (requires reverse eng) - • OpenFirmware/NVRAM emulation: $5,000+ (Apple-specific) - • Thermal sensor spoofing: $2,000+ (per-model calibration) - • Continuous maintenance: $10,000+/year (OS updates, etc.) - ───────────────────────────────────────────────────────────────────── - TOTAL EMULATOR COST: $50,000+ initial + ongoing - - REAL HARDWARE COSTS: - ───────────────────────────────────────────────────────────────────── - • PowerMac G4 (2003): $30-50 on eBay - • PowerBook G4: $40-80 on eBay - • Power Mac G5: $50-100 on eBay - • iMac G3/G4: $20-40 on eBay - • Electricity: ~$5/month - ───────────────────────────────────────────────────────────────────── - TOTAL REAL HARDWARE: <$100 + minimal ongoing - - CONCLUSION: - ───────────────────────────────────────────────────────────────────── - Rational actor will ALWAYS buy real vintage hardware because: - - • 500x cheaper than developing an accurate emulator - • Zero maintenance (hardware just works) - • Contributes to preservation (positive externality) - • No risk of detection/slashing - • Supports the vintage computing community - - THIS IS THE GENIUS OF PROOF OF ANTIQUITY: - The network is secured by making fraud economically stupid. - -╔══════════════════════════════════════════════════════════════════════╗ -║ "It's cheaper to buy a $50 vintage Mac than to emulate one" ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - -if __name__ == "__main__": - print_economic_analysis() - - # Demo validation - print("\n Demo: Simulating challenge-response validation...\n") - - # Create validator with expected hardware profile - expected_hardware = { - "cpu": { - "model": "PowerMac3,6", - "architecture": "PowerPC G4 (7455/7457)", - "tier": "vintage" - }, - "openfirmware": { - "serial_number": "G84243AZQ6P" - } - } - - validator = AntiSpoofValidator() - - # Generate challenge - privkey = secrets.token_bytes(32) - challenge = validator.generate_challenge( - target_pubkey="target_validator_pubkey", - expected_hardware=expected_hardware, - challenger_privkey=privkey - ) - - print(f" Challenge ID: {challenge.challenge_id}") - print(f" Challenge Type: {ChallengeType(challenge.challenge_type).name}") - print(f" Timeout: {challenge.timeout_ms}ms") - - # Simulate a REAL hardware response - real_response = ChallengeResponse( - challenge_id=challenge.challenge_id, - response_timestamp=challenge.timestamp + 5000, # 5 second response - timebase_value=173470036125283, - cache_l1_ticks=150, - cache_l2_ticks=450, # 3x ratio - realistic - cache_ratio=3.0, - memory_ticks=15000, - thermal_celsius=43, - hardware_serial="G84243AZQ6P", - jitter_variance=25, # 2.5% variance - natural - pipeline_cycles=1200, - response_hash=b'', - responder_pubkey="responder_key", - signature=b'' - ) - real_response.response_hash = real_response.hash() - - print("\n --- REAL HARDWARE RESPONSE ---") - result = validator.validate_response(challenge, real_response) - print(f" Valid: {result.valid}") - print(f" Confidence: {result.confidence_score:.1f}%") - for reason in result.failure_reasons: - print(f" ⚠ {reason}") - - # Simulate an EMULATOR response - emu_response = ChallengeResponse( - challenge_id=challenge.challenge_id, - response_timestamp=challenge.timestamp + 5000, - timebase_value=173470036125283, - cache_l1_ticks=150, - cache_l2_ticks=160, # 1.07x ratio - too similar! Emulated cache - cache_ratio=1.07, - memory_ticks=15000, - thermal_celsius=-1, # No thermal sensor in emulator - hardware_serial="UNKNOWN", # Generic VM - jitter_variance=1, # Too consistent! Emulator detected - pipeline_cycles=1200, - response_hash=b'', - responder_pubkey="emulator_key", - signature=b'' - ) - emu_response.response_hash = emu_response.hash() - - print("\n --- EMULATOR RESPONSE ---") - result = validator.validate_response(challenge, emu_response) - print(f" Valid: {result.valid}") - print(f" Confidence: {result.confidence_score:.1f}%") - for reason in result.failure_reasons: - print(f" ✗ {reason}") - - print("\n Emulator DETECTED and REJECTED! ✓\n") +#!/usr/bin/env python3 +""" +RustChain Network Challenge Protocol +==================================== + +Validators challenge each other to prove they're running on real vintage hardware. +Each challenge is: +1. Time-bound (must respond within hardware-accurate window) +2. Hardware-specific (requires real cache timing, thermal sensors, etc.) +3. Cryptographically signed (can't replay or forge responses) + +The economic argument: +- Developing an accurate PowerPC emulator: $50,000+ in engineering time +- Buying a working PowerMac G4: $30-50 on eBay +- Rational choice: BUY REAL HARDWARE + +This is the "Proof of Antiquity" anti-spoofing layer. +""" + +import hashlib +import hmac +import json +import os +import secrets +import struct +import time +from dataclasses import dataclass, asdict +from typing import Optional, Dict, List, Tuple +from enum import Enum + +class ChallengeType(Enum): + FULL = 0x00 # All hardware tests + TIMEBASE = 0x01 # PowerPC timebase only + CACHE = 0x02 # L1/L2 cache timing + MEMORY = 0x03 # Memory access patterns + THERMAL = 0x04 # Thermal sensors + SERIAL = 0x05 # Hardware serials + PIPELINE = 0x06 # Instruction pipeline timing + +class HardwareTier(Enum): + ANCIENT = ("ancient", 30, 3.5) # 30+ years, 3.5x multiplier + SACRED = ("sacred", 25, 3.0) # 25-29 years + VINTAGE = ("vintage", 20, 2.5) # 20-24 years (PowerPC G3/G4) + CLASSIC = ("classic", 15, 2.0) # 15-19 years + RETRO = ("retro", 10, 1.5) # 10-14 years (Mac Pro Trashcan) + MODERN = ("modern", 5, 1.0) # 5-9 years + RECENT = ("recent", 0, 0.5) # 0-4 years (minimal reward) + +@dataclass +class Challenge: + """A cryptographic challenge sent to a validator""" + challenge_id: str + challenge_type: int + nonce: bytes # 32 bytes of randomness + timestamp: int # Unix timestamp in milliseconds + timeout_ms: int # Response must arrive within this window + expected_hardware: Dict # Expected hardware profile (from registration) + challenger_pubkey: str # Who issued this challenge + signature: bytes # Challenger's signature + + def to_bytes(self) -> bytes: + """Serialize for signing/verification""" + return ( + self.challenge_id.encode() + + struct.pack('>B', self.challenge_type) + + self.nonce + + struct.pack('>Q', self.timestamp) + + struct.pack('>I', self.timeout_ms) + + json.dumps(self.expected_hardware, sort_keys=True).encode() + + self.challenger_pubkey.encode() + ) + + def hash(self) -> bytes: + """SHA256 hash of challenge""" + return hashlib.sha256(self.to_bytes()).digest() + +@dataclass +class ChallengeResponse: + """Response to a challenge, proving real hardware""" + challenge_id: str + response_timestamp: int + timebase_value: int # PowerPC timebase register value + cache_l1_ticks: int + cache_l2_ticks: int + cache_ratio: float # L2/L1 - must be realistic (1.5-20x) + memory_ticks: int + thermal_celsius: int + hardware_serial: str + jitter_variance: int # Natural timing variance (emulators are too consistent) + pipeline_cycles: int + response_hash: bytes # Hash of all response data + responder_pubkey: str + signature: bytes + + def to_bytes(self) -> bytes: + """Serialize for signing/verification""" + return ( + self.challenge_id.encode() + + struct.pack('>Q', self.response_timestamp) + + struct.pack('>Q', self.timebase_value) + + struct.pack('>I', self.cache_l1_ticks) + + struct.pack('>I', self.cache_l2_ticks) + + struct.pack('>f', self.cache_ratio) + + struct.pack('>I', self.memory_ticks) + + struct.pack('>i', self.thermal_celsius) + + self.hardware_serial.encode() + + struct.pack('>I', self.jitter_variance) + + struct.pack('>I', self.pipeline_cycles) + + self.responder_pubkey.encode() + ) + + def hash(self) -> bytes: + """SHA256 hash of response""" + return hashlib.sha256(self.to_bytes()).digest() + +@dataclass +class ValidationResult: + """Result of validating a challenge response""" + valid: bool + confidence_score: float # 0-100% + timing_ok: bool + jitter_ok: bool + cache_ok: bool + thermal_ok: bool + serial_ok: bool + failure_reasons: List[str] + +class AntiSpoofValidator: + """ + Validates challenge responses to detect emulators. + + Detection methods: + 1. Timing window - Response must arrive in hardware-accurate time + 2. Jitter analysis - Real hardware has natural variance, emulators don't + 3. Cache ratio - L2/L1 ratio must match real cache hierarchy + 4. Thermal presence - Real hardware has thermal sensors + 5. Serial validation - Hardware serials must match registered profile + """ + + # Timing thresholds (in milliseconds) + MIN_RESPONSE_TIME_MS = 10 # Too fast = time manipulation + MAX_RESPONSE_TIME_MS = 30000 # Too slow = emulator overhead + + # Jitter thresholds (variance * 1000) + MIN_JITTER = 5 # 0.5% minimum variance (emulators are too consistent) + MAX_JITTER = 500 # 50% maximum variance (too much = something wrong) + + # Cache ratio thresholds + MIN_CACHE_RATIO = 1.5 # L2 should be at least 1.5x slower than L1 + MAX_CACHE_RATIO = 20.0 # But not absurdly different + + # Confidence thresholds + PENALTY_TIMING = 30.0 + PENALTY_JITTER = 40.0 + PENALTY_CACHE = 25.0 + PENALTY_THERMAL = 15.0 + PENALTY_SERIAL = 20.0 + + def __init__(self, known_hardware_profiles: Dict[str, Dict] = None): + """ + Initialize with known hardware profiles. + + known_hardware_profiles: Map of hardware_serial -> expected profile + """ + self.known_profiles = known_hardware_profiles or {} + self.challenge_history: Dict[str, Challenge] = {} + + def generate_challenge( + self, + target_pubkey: str, + expected_hardware: Dict, + challenger_privkey: bytes, # For signing + challenge_type: ChallengeType = ChallengeType.FULL + ) -> Challenge: + """Generate a new challenge for a validator""" + + challenge = Challenge( + challenge_id=secrets.token_hex(16), + challenge_type=challenge_type.value, + nonce=secrets.token_bytes(32), + timestamp=int(time.time() * 1000), + timeout_ms=self._get_timeout_for_hardware(expected_hardware), + expected_hardware=expected_hardware, + challenger_pubkey=hashlib.sha256(challenger_privkey).hexdigest()[:40], + signature=b'' # Will be filled + ) + + # Sign the challenge + challenge.signature = hmac.new( + challenger_privkey, + challenge.to_bytes(), + hashlib.sha256 + ).digest() + + # Store for later validation + self.challenge_history[challenge.challenge_id] = challenge + + return challenge + + def _get_timeout_for_hardware(self, hardware: Dict) -> int: + """Calculate appropriate timeout based on hardware age""" + tier = hardware.get('tier', 'modern') + + timeouts = { + 'ancient': 60000, # 60s for ancient hardware + 'sacred': 45000, + 'vintage': 30000, # 30s for vintage (G4) + 'classic': 20000, + 'retro': 15000, + 'modern': 10000, + 'recent': 5000 + } + return timeouts.get(tier, 30000) + + def validate_response( + self, + challenge: Challenge, + response: ChallengeResponse + ) -> ValidationResult: + """ + Validate a challenge response. + + Returns ValidationResult with confidence score and failure reasons. + """ + failures = [] + confidence = 100.0 + + # 1. Check timing window + response_time = response.response_timestamp - challenge.timestamp + timing_ok = self._check_timing(response_time, challenge.timeout_ms, failures) + if not timing_ok: + confidence -= self.PENALTY_TIMING + + # 2. Check jitter (emulator detection) + jitter_ok = self._check_jitter(response.jitter_variance, failures) + if not jitter_ok: + confidence -= self.PENALTY_JITTER + + # 3. Check cache ratio + cache_ok = self._check_cache_ratio( + response.cache_l1_ticks, + response.cache_l2_ticks, + response.cache_ratio, + failures + ) + if not cache_ok: + confidence -= self.PENALTY_CACHE + + # 4. Check thermal sensor + thermal_ok = self._check_thermal(response.thermal_celsius, failures) + if not thermal_ok: + confidence -= self.PENALTY_THERMAL + + # 5. Check hardware serial + serial_ok = self._check_serial( + response.hardware_serial, + challenge.expected_hardware, + failures + ) + if not serial_ok: + confidence -= self.PENALTY_SERIAL + + # 6. Verify response hash + computed_hash = response.hash() + if computed_hash != response.response_hash: + failures.append("Response hash mismatch - tampered data") + confidence -= 50.0 + + # Final determination + valid = confidence >= 50.0 + + return ValidationResult( + valid=valid, + confidence_score=max(0, confidence), + timing_ok=timing_ok, + jitter_ok=jitter_ok, + cache_ok=cache_ok, + thermal_ok=thermal_ok, + serial_ok=serial_ok, + failure_reasons=failures + ) + + def _check_timing( + self, + response_time_ms: int, + timeout_ms: int, + failures: List[str] + ) -> bool: + """Check if response timing is realistic""" + + if response_time_ms < self.MIN_RESPONSE_TIME_MS: + failures.append( + f"Response too fast ({response_time_ms}ms < {self.MIN_RESPONSE_TIME_MS}ms) " + f"- possible time manipulation" + ) + return False + + if response_time_ms > timeout_ms: + failures.append( + f"Response timed out ({response_time_ms}ms > {timeout_ms}ms) " + f"- possible emulator overhead" + ) + return False + + return True + + def _check_jitter(self, jitter: int, failures: List[str]) -> bool: + """ + Check timing jitter. + + Real hardware has natural variance due to: + - Thermal throttling + - Other processes + - Memory bus contention + - Cache state variations + + Emulators are unnaturally consistent. + """ + if jitter < self.MIN_JITTER: + failures.append( + f"Timing too consistent (jitter={jitter/10:.1f}%) " + f"- emulator detected (real hardware has natural variance)" + ) + return False + + if jitter > self.MAX_JITTER: + failures.append( + f"Timing too erratic (jitter={jitter/10:.1f}%) " + f"- unstable system or manipulation" + ) + return False + + return True + + def _check_cache_ratio( + self, + l1_ticks: int, + l2_ticks: int, + ratio: float, + failures: List[str] + ) -> bool: + """ + Check L1/L2 cache timing ratio. + + Real cache hierarchies have predictable timing relationships: + - L1: ~1-3 cycles + - L2: ~10-20 cycles + - L3: ~30-50 cycles + - RAM: ~100-300 cycles + + Emulators often don't model this correctly. + """ + if l1_ticks == 0 or l2_ticks == 0: + failures.append("Missing cache timing data - possible emulator") + return False + + if ratio < self.MIN_CACHE_RATIO: + failures.append( + f"L2/L1 cache ratio too low ({ratio:.2f}x < {self.MIN_CACHE_RATIO}x) " + f"- emulated cache doesn't match real hardware" + ) + return False + + if ratio > self.MAX_CACHE_RATIO: + failures.append( + f"L2/L1 cache ratio too high ({ratio:.2f}x > {self.MAX_CACHE_RATIO}x) " + f"- abnormal cache behavior" + ) + return False + + return True + + def _check_thermal(self, celsius: int, failures: List[str]) -> bool: + """ + Check thermal sensor reading. + + Real hardware has thermal sensors. VMs/emulators usually don't. + """ + if celsius < 0: + failures.append( + "No thermal sensor detected - possible VM/emulator" + ) + return False + + if celsius < 10 or celsius > 95: + failures.append( + f"Unrealistic thermal reading ({celsius}C) " + f"- should be 10-95C for operating hardware" + ) + return False + + return True + + def _check_serial( + self, + serial: str, + expected: Dict, + failures: List[str] + ) -> bool: + """ + Check hardware serial number. + + Must match registered hardware profile. + """ + if not serial or serial == "UNKNOWN" or len(serial) < 5: + failures.append( + "Missing or invalid hardware serial - generic VM detected" + ) + return False + + expected_serial = expected.get('openfirmware', {}).get('serial_number', '') + if expected_serial and serial != expected_serial: + failures.append( + f"Hardware serial mismatch (got '{serial}', expected '{expected_serial}') " + f"- hardware changed or spoofed" + ) + return False + + return True + + +class NetworkChallengeProtocol: + """ + Network protocol for mutual validator challenges. + + Validators periodically challenge each other to prove: + 1. They're running on real hardware (not emulators) + 2. The hardware matches their registered profile + 3. The hardware is operating correctly + + Failed challenges result in: + - Reduced block rewards + - Eventual slashing/removal from validator set + - Loss of antiquity bonuses + """ + + CHALLENGE_INTERVAL_BLOCKS = 100 # Challenge every 100 blocks + MAX_FAILURES_BEFORE_SLASH = 3 # 3 failures = slashed + FAILURE_PENALTY_PERCENT = 10 # 10% reward penalty per failure + + def __init__(self, validator_pubkey: str, hardware_profile: Dict): + self.pubkey = validator_pubkey + self.hardware = hardware_profile + self.validator = AntiSpoofValidator() + self.pending_challenges: Dict[str, Challenge] = {} + self.failure_count = 0 + + def should_challenge(self, block_height: int, target_pubkey: str) -> bool: + """Determine if we should challenge another validator this block""" + # Hash-based selection to ensure fairness + selection_hash = hashlib.sha256( + f"{block_height}:{self.pubkey}:{target_pubkey}".encode() + ).digest() + + # Challenge if first byte < threshold + threshold = 256 // (self.CHALLENGE_INTERVAL_BLOCKS // 10) + return selection_hash[0] < threshold + + def create_challenge(self, target_pubkey: str, target_hardware: Dict) -> Challenge: + """Create a challenge for another validator""" + # Use pubkey as signing key for demo (use real keys in production) + privkey = hashlib.sha256(self.pubkey.encode()).digest() + + challenge = self.validator.generate_challenge( + target_pubkey=target_pubkey, + expected_hardware=target_hardware, + challenger_privkey=privkey, + challenge_type=ChallengeType.FULL + ) + + self.pending_challenges[challenge.challenge_id] = challenge + return challenge + + def handle_response(self, response: ChallengeResponse) -> ValidationResult: + """Handle a response to one of our challenges""" + challenge = self.pending_challenges.get(response.challenge_id) + if not challenge: + return ValidationResult( + valid=False, + confidence_score=0, + timing_ok=False, + jitter_ok=False, + cache_ok=False, + thermal_ok=False, + serial_ok=False, + failure_reasons=["Unknown challenge ID"] + ) + + result = self.validator.validate_response(challenge, response) + + # Clean up + del self.pending_challenges[response.challenge_id] + + return result + + def calculate_reward_penalty(self, failures: int) -> float: + """Calculate reward penalty based on failure count""" + if failures >= self.MAX_FAILURES_BEFORE_SLASH: + return 1.0 # 100% penalty (slashed) + return failures * (self.FAILURE_PENALTY_PERCENT / 100.0) + + +def print_economic_analysis(): + """Print the economic argument for why spoofing is irrational""" + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ RUSTCHAIN PROOF OF ANTIQUITY - ECONOMIC ANALYSIS ║ +╚══════════════════════════════════════════════════════════════════════╝ + + Why spoofing is economically irrational: + + EMULATOR DEVELOPMENT COSTS: + ───────────────────────────────────────────────────────────────────── + • Accurate PowerPC timing model: $20,000+ (6+ months dev time) + • Cache hierarchy simulation: $10,000+ (requires reverse eng) + • OpenFirmware/NVRAM emulation: $5,000+ (Apple-specific) + • Thermal sensor spoofing: $2,000+ (per-model calibration) + • Continuous maintenance: $10,000+/year (OS updates, etc.) + ───────────────────────────────────────────────────────────────────── + TOTAL EMULATOR COST: $50,000+ initial + ongoing + + REAL HARDWARE COSTS: + ───────────────────────────────────────────────────────────────────── + • PowerMac G4 (2003): $30-50 on eBay + • PowerBook G4: $40-80 on eBay + • Power Mac G5: $50-100 on eBay + • iMac G3/G4: $20-40 on eBay + • Electricity: ~$5/month + ───────────────────────────────────────────────────────────────────── + TOTAL REAL HARDWARE: <$100 + minimal ongoing + + CONCLUSION: + ───────────────────────────────────────────────────────────────────── + Rational actor will ALWAYS buy real vintage hardware because: + + • 500x cheaper than developing an accurate emulator + • Zero maintenance (hardware just works) + • Contributes to preservation (positive externality) + • No risk of detection/slashing + • Supports the vintage computing community + + THIS IS THE GENIUS OF PROOF OF ANTIQUITY: + The network is secured by making fraud economically stupid. + +╔══════════════════════════════════════════════════════════════════════╗ +║ "It's cheaper to buy a $50 vintage Mac than to emulate one" ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + +if __name__ == "__main__": + print_economic_analysis() + + # Demo validation + print("\n Demo: Simulating challenge-response validation...\n") + + # Create validator with expected hardware profile + expected_hardware = { + "cpu": { + "model": "PowerMac3,6", + "architecture": "PowerPC G4 (7455/7457)", + "tier": "vintage" + }, + "openfirmware": { + "serial_number": "G84243AZQ6P" + } + } + + validator = AntiSpoofValidator() + + # Generate challenge + privkey = secrets.token_bytes(32) + challenge = validator.generate_challenge( + target_pubkey="target_validator_pubkey", + expected_hardware=expected_hardware, + challenger_privkey=privkey + ) + + print(f" Challenge ID: {challenge.challenge_id}") + print(f" Challenge Type: {ChallengeType(challenge.challenge_type).name}") + print(f" Timeout: {challenge.timeout_ms}ms") + + # Simulate a REAL hardware response + real_response = ChallengeResponse( + challenge_id=challenge.challenge_id, + response_timestamp=challenge.timestamp + 5000, # 5 second response + timebase_value=173470036125283, + cache_l1_ticks=150, + cache_l2_ticks=450, # 3x ratio - realistic + cache_ratio=3.0, + memory_ticks=15000, + thermal_celsius=43, + hardware_serial="G84243AZQ6P", + jitter_variance=25, # 2.5% variance - natural + pipeline_cycles=1200, + response_hash=b'', + responder_pubkey="responder_key", + signature=b'' + ) + real_response.response_hash = real_response.hash() + + print("\n --- REAL HARDWARE RESPONSE ---") + result = validator.validate_response(challenge, real_response) + print(f" Valid: {result.valid}") + print(f" Confidence: {result.confidence_score:.1f}%") + for reason in result.failure_reasons: + print(f" ⚠ {reason}") + + # Simulate an EMULATOR response + emu_response = ChallengeResponse( + challenge_id=challenge.challenge_id, + response_timestamp=challenge.timestamp + 5000, + timebase_value=173470036125283, + cache_l1_ticks=150, + cache_l2_ticks=160, # 1.07x ratio - too similar! Emulated cache + cache_ratio=1.07, + memory_ticks=15000, + thermal_celsius=-1, # No thermal sensor in emulator + hardware_serial="UNKNOWN", # Generic VM + jitter_variance=1, # Too consistent! Emulator detected + pipeline_cycles=1200, + response_hash=b'', + responder_pubkey="emulator_key", + signature=b'' + ) + emu_response.response_hash = emu_response.hash() + + print("\n --- EMULATOR RESPONSE ---") + result = validator.validate_response(challenge, emu_response) + print(f" Valid: {result.valid}") + print(f" Confidence: {result.confidence_score:.1f}%") + for reason in result.failure_reasons: + print(f" ✗ {reason}") + + print("\n Emulator DETECTED and REJECTED! ✓\n") diff --git a/rips/rustchain-core/src/mutator_oracle/multi_arch_oracles.py b/rips/rustchain-core/src/mutator_oracle/multi_arch_oracles.py index 169415d0..ccb157aa 100644 --- a/rips/rustchain-core/src/mutator_oracle/multi_arch_oracles.py +++ b/rips/rustchain-core/src/mutator_oracle/multi_arch_oracles.py @@ -1,477 +1,477 @@ -#!/usr/bin/env python3 -""" -RustChain Multi-Architecture Mutator Oracle Network -==================================================== - -Different CPU architectures contribute unique entropy through their -specific vector/SIMD instructions. The more diverse the oracle ring, -the harder it is to compromise. - -SUPPORTED ARCHITECTURES: -═══════════════════════════════════════════════════════════════════════ - -┌─────────────────┬──────────────┬────────────────────────────────────┐ -│ Architecture │ SIMD Unit │ Unique Entropy Source │ -├─────────────────┼──────────────┼────────────────────────────────────┤ -│ PowerPC G4/G5 │ AltiVec │ vperm (128-bit vector permute) │ -│ Intel x86_64 │ SSE/AVX │ PSHUFB, VPERM2F128 │ -│ Apple Silicon │ ARM NEON │ TBL/TBX (table lookup permute) │ -│ SPARC │ VIS │ FPACK, BMASK │ -│ PA-RISC │ MAX │ Permute instructions │ -│ 68k Mac │ (none) │ Unique bus timing, no cache │ -│ Alpha │ MVI │ PERR, UNPKBW │ -│ MIPS │ MSA │ VSHF (vector shuffle) │ -└─────────────────┴──────────────┴────────────────────────────────────┘ - -NETWORK TOPOLOGY: -═══════════════════════════════════════════════════════════════════════ - - ┌─────────────────┐ - │ ENTROPY MIXER │ - │ (XOR Ring) │ - └────────┬────────┘ - │ - ┌─────────┬───────┬───────┼───────┬───────┬─────────┐ - │ │ │ │ │ │ │ - ┌───▼───┐ ┌───▼───┐ ┌─▼─┐ ┌───▼───┐ ┌─▼─┐ ┌───▼───┐ ┌───▼───┐ - │ PPC │ │ PPC │ │x86│ │ ARM │ │M1 │ │ SPARC │ │ 68k │ - │ G4 │ │ G5 │ │ │ │ NEON │ │M2 │ │ │ │ │ - │AltiVec│ │AltiVec│ │SSE│ │ Pi │ │ │ │ VIS │ │Timing │ - └───────┘ └───────┘ └───┘ └───────┘ └───┘ └───────┘ └───────┘ - -Each architecture contributes entropy that ONLY that architecture -can generate. Compromising requires controlling ALL architectures. - -"Diversity is security. The chain speaks many silicon dialects." -""" - -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Set -from enum import Enum, auto -import hashlib -import struct -import secrets -import time - -class CPUArchitecture(Enum): - """Supported CPU architectures for oracle nodes""" - POWERPC_G3 = ("ppc_g3", "PowerPC G3", None, 1997) - POWERPC_G4 = ("ppc_g4", "PowerPC G4", "AltiVec", 1999) - POWERPC_G5 = ("ppc_g5", "PowerPC G5", "AltiVec", 2003) - INTEL_X86 = ("x86", "Intel x86", "SSE", 1999) - INTEL_X86_64 = ("x86_64", "Intel x86-64", "SSE/AVX", 2003) - ARM_32 = ("arm32", "ARM 32-bit", "NEON", 2005) - ARM_64 = ("arm64", "ARM 64-bit", "NEON", 2011) - APPLE_M1 = ("m1", "Apple M1", "NEON+AMX", 2020) - APPLE_M2 = ("m2", "Apple M2", "NEON+AMX", 2022) - MOTOROLA_68K = ("m68k", "Motorola 68k", None, 1979) - SPARC = ("sparc", "SPARC", "VIS", 1987) - MIPS = ("mips", "MIPS", "MSA", 1985) - PA_RISC = ("pa_risc", "PA-RISC", "MAX", 1986) - ALPHA = ("alpha", "DEC Alpha", "MVI", 1992) - RISC_V = ("riscv", "RISC-V", "V Extension", 2010) - - def __init__(self, arch_id: str, name: str, simd: Optional[str], year: int): - self.arch_id = arch_id - self.arch_name = name - self.simd_unit = simd - self.release_year = year - - @property - def antiquity_bonus(self) -> float: - """ - Older architectures get higher bonuses. - - ARM is heavily penalized regardless of age because: - - Billions of ARM devices exist (phones, tablets, Pis) - - Easy to create bot farms with cheap Android phones - - Raspberry Pi clusters are trivial to set up - - Only rare/exotic ARM (Apple Silicon with AMX) gets slight bonus. - """ - # ARM penalty - too easy to bot farm with phones/Pis - if self.arch_id in ['arm32', 'arm64']: - return 0.1 # 10% - heavily discouraged - - # Apple Silicon - AMX coprocessor is unique and can be used as mutator - # Gets same bonus as modern x86 since AMX provides unique entropy - if self.arch_id in ['m1', 'm2']: - return 1.0 # 1x - AMX mutator capability - - # Standard age-based tiers for rare architectures - age = 2025 - self.release_year - if age >= 40: return 3.5 # Ancient (68k 1979, MIPS 1985) - if age >= 32: return 3.0 # Sacred (Alpha 1992, SPARC 1987) - if age >= 20: return 2.5 # Vintage (G3, G4, G5, x86-64) - if age >= 12: return 2.0 # Classic (older x86) - return 1.0 # Modern - -@dataclass -class ArchitectureOracle: - """An oracle node for a specific CPU architecture""" - node_id: str - hostname: str - ip_address: str - architecture: CPUArchitecture - cpu_model: str - simd_enabled: bool - unique_features: List[str] = field(default_factory=list) - entropy_method: str = "" - last_entropy: bytes = b'' - - def __post_init__(self): - """Set architecture-specific entropy method""" - arch_methods = { - CPUArchitecture.POWERPC_G4: "altivec_vperm_collapse", - CPUArchitecture.POWERPC_G5: "altivec_vperm_collapse", - CPUArchitecture.INTEL_X86_64: "sse_pshufb_collapse", - CPUArchitecture.APPLE_M1: "neon_tbl_collapse", - CPUArchitecture.APPLE_M2: "neon_tbl_collapse", - CPUArchitecture.MOTOROLA_68K: "bus_timing_entropy", - CPUArchitecture.SPARC: "vis_fpack_collapse", - CPUArchitecture.ARM_64: "neon_tbl_collapse", - } - self.entropy_method = arch_methods.get( - self.architecture, - "generic_timing_entropy" - ) - -@dataclass -class MultiArchMutationSeed: - """Mutation seed combining entropy from multiple architectures""" - seed: bytes - block_height: int - timestamp: int - architecture_contributions: Dict[str, Tuple[str, bytes]] # arch -> (node_id, entropy_hash) - diversity_score: float # Higher = more architectures - ring_signature: bytes - -class MultiArchOracleRing: - """ - Oracle ring supporting multiple CPU architectures. - - Security increases with architectural diversity: - - 1 architecture: Single point of failure - - 2 architectures: Need to compromise both - - 5+ architectures: Extremely hard to attack all - """ - - MINIMUM_ARCHITECTURES = 2 # Need at least 2 different archs - DIVERSITY_BONUS_PER_ARCH = 0.1 # 10% bonus per unique architecture - - def __init__(self): - self.nodes: Dict[str, ArchitectureOracle] = {} - self.architectures_present: Set[CPUArchitecture] = set() - - def register_oracle(self, oracle: ArchitectureOracle) -> bool: - """Register a new oracle node""" - - # Verify architecture-specific requirements - if oracle.architecture in [CPUArchitecture.POWERPC_G4, CPUArchitecture.POWERPC_G5]: - if not oracle.simd_enabled: - print(f" ✗ {oracle.node_id}: AltiVec required for PowerPC G4/G5") - return False - - self.nodes[oracle.node_id] = oracle - self.architectures_present.add(oracle.architecture) - - print(f" ✓ {oracle.node_id}: {oracle.architecture.arch_name}") - print(f" SIMD: {oracle.architecture.simd_unit or 'None'}") - print(f" Method: {oracle.entropy_method}") - print(f" Antiquity Bonus: {oracle.architecture.antiquity_bonus}x") - - return True - - def get_diversity_score(self) -> float: - """Calculate diversity score based on unique architectures""" - base_score = len(self.architectures_present) - - # Bonus for having both big-endian and little-endian - endian_types = set() - for arch in self.architectures_present: - if arch in [CPUArchitecture.POWERPC_G4, CPUArchitecture.POWERPC_G5, - CPUArchitecture.MOTOROLA_68K, CPUArchitecture.SPARC]: - endian_types.add("big") - else: - endian_types.add("little") - - endian_bonus = 0.5 if len(endian_types) == 2 else 0 - - # Bonus for having SIMD and non-SIMD - simd_types = set() - for arch in self.architectures_present: - if arch.simd_unit: - simd_types.add("simd") - else: - simd_types.add("scalar") - - simd_bonus = 0.3 if len(simd_types) == 2 else 0 - - return base_score + endian_bonus + simd_bonus - - def collect_entropy(self, oracle: ArchitectureOracle) -> bytes: - """ - Collect architecture-specific entropy from a node. - - Each architecture generates entropy differently: - - PowerPC: AltiVec vperm timing - - x86: SSE PSHUFB timing - - ARM: NEON TBL timing - - 68k: Bus timing (no SIMD) - """ - # In production, this would SSH to node and run arch-specific binary - # For now, simulate architecture-specific entropy - - arch_entropy_size = { - CPUArchitecture.POWERPC_G4: 64, # 512-bit from AltiVec - CPUArchitecture.POWERPC_G5: 64, - CPUArchitecture.INTEL_X86_64: 64, # 512-bit from AVX - CPUArchitecture.APPLE_M1: 64, # 512-bit from NEON - CPUArchitecture.APPLE_M2: 64, - CPUArchitecture.MOTOROLA_68K: 32, # 256-bit (no SIMD, timing only) - CPUArchitecture.SPARC: 48, # 384-bit from VIS - } - - size = arch_entropy_size.get(oracle.architecture, 32) - - # Simulate architecture-specific entropy generation - entropy = hashlib.sha512( - oracle.node_id.encode() + - oracle.architecture.arch_id.encode() + - struct.pack('>Q', int(time.time() * 1000000)) + - secrets.token_bytes(32) - ).digest()[:size] - - oracle.last_entropy = entropy - return entropy - - def generate_mutation_seed(self, block_height: int) -> Optional[MultiArchMutationSeed]: - """Generate mutation seed from all architecture oracles""" - - if len(self.architectures_present) < self.MINIMUM_ARCHITECTURES: - print(f" ✗ Need {self.MINIMUM_ARCHITECTURES} architectures, have {len(self.architectures_present)}") - return None - - print(f"\n Generating multi-architecture mutation seed for block {block_height}...") - print(f" Architectures: {len(self.architectures_present)}") - print(f" Diversity Score: {self.get_diversity_score():.2f}") - - # Collect entropy from each architecture - combined = bytes(64) - contributions = {} - - for node_id, oracle in self.nodes.items(): - entropy = self.collect_entropy(oracle) - entropy_hash = hashlib.sha256(entropy).digest() - - # XOR into combined (pad shorter entropies) - padded = entropy.ljust(64, b'\0') - combined = bytes(a ^ b for a, b in zip(combined, padded)) - - contributions[oracle.architecture.arch_id] = (node_id, entropy_hash) - - print(f" ✓ {oracle.architecture.arch_name}: {entropy[:8].hex()}...") - - # Mix with block height - final_seed = hashlib.sha512( - combined + - struct.pack('>Q', block_height) + - b'MULTIARCH_MUTATION_SEED' - ).digest() - - # Ring signature - ring_sig = hmac.new( - final_seed, - b''.join(a.encode() for a in sorted(contributions.keys())), - hashlib.sha256 - ).digest() if 'hmac' in dir() else hashlib.sha256(final_seed).digest() - - seed = MultiArchMutationSeed( - seed=final_seed, - block_height=block_height, - timestamp=int(time.time() * 1000), - architecture_contributions=contributions, - diversity_score=self.get_diversity_score(), - ring_signature=ring_sig - ) - - print(f"\n ✓ Seed: {final_seed[:16].hex()}...{final_seed[-16:].hex()}") - print(f" ✓ Diversity: {seed.diversity_score:.2f} ({len(contributions)} architectures)") - - return seed - - -def demo_multi_arch_network(): - """Demonstrate multi-architecture oracle network""" - - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ RUSTCHAIN MULTI-ARCHITECTURE MUTATOR ORACLE NETWORK ║ -║ ║ -║ "Diversity is security. The chain speaks many silicon dialects." ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - ring = MultiArchOracleRing() - - print(" Registering Oracle Nodes:\n") - - # Your actual hardware - oracles = [ - # PowerPC Macs (AltiVec) - ArchitectureOracle( - node_id="G4_MIRROR_DOOR", - hostname="Lee-Crockers-Powermac-G4.local", - ip_address="192.168.0.125", - architecture=CPUArchitecture.POWERPC_G4, - cpu_model="PowerMac3,6", - simd_enabled=True, - unique_features=["altivec", "dual_cpu", "ddr_sdram"] - ), - ArchitectureOracle( - node_id="G5_DUAL", - hostname="lee-crockers-power-mac-g5.local", - ip_address="192.168.0.130", - architecture=CPUArchitecture.POWERPC_G5, - cpu_model="PowerMac7,3", - simd_enabled=True, - unique_features=["altivec", "64bit", "hypertransport"] - ), - ArchitectureOracle( - node_id="POWERBOOK_G4", - hostname="sophiacorepbs-powerbook-g4-12.local", - ip_address="192.168.0.115", - architecture=CPUArchitecture.POWERPC_G4, - cpu_model="PowerBook6,8", - simd_enabled=True, - unique_features=["altivec", "mobile", "battery_entropy"] - ), - - # Intel Macs (SSE/AVX) - ArchitectureOracle( - node_id="TRASHCAN_XEON", - hostname="mac-pro-trashcan.local", - ip_address="192.168.0.154", - architecture=CPUArchitecture.INTEL_X86_64, - cpu_model="MacPro6,1", - simd_enabled=True, - unique_features=["avx2", "xeon", "ecc_memory", "dual_gpu"] - ), - - # Apple Silicon (NEON + AMX) - ArchitectureOracle( - node_id="M2_MINI", - hostname="m2-mac-mini.local", - ip_address="192.168.0.171", - architecture=CPUArchitecture.APPLE_M2, - cpu_model="Mac14,3", - simd_enabled=True, - unique_features=["neon", "amx", "neural_engine", "unified_memory"] - ), - - # Linux x86 nodes - ArchitectureOracle( - node_id="LINUX_POWEREDGE", - hostname="sophia-PowerEdge-C4130", - ip_address="192.168.0.160", - architecture=CPUArchitecture.INTEL_X86_64, - cpu_model="Xeon E5-2680", - simd_enabled=True, - unique_features=["avx2", "server", "ecc", "tesla_gpu"] - ), - ] - - for oracle in oracles: - ring.register_oracle(oracle) - print() - - # Show architecture coverage - print("\n" + "="*70) - print(" ARCHITECTURE COVERAGE") - print("="*70) - - arch_count = {} - for oracle in ring.nodes.values(): - arch = oracle.architecture.arch_name - arch_count[arch] = arch_count.get(arch, 0) + 1 - - for arch, count in sorted(arch_count.items()): - print(f" {arch}: {count} node(s)") - - print(f"\n Total Unique Architectures: {len(ring.architectures_present)}") - print(f" Diversity Score: {ring.get_diversity_score():.2f}") - - # Generate mutation seeds - print("\n" + "="*70) - print(" GENERATING MUTATION SEEDS") - print("="*70) - - for block in [1000, 1010, 1020]: - seed = ring.generate_mutation_seed(block) - - # Show the power of diversity - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ DIVERSITY SECURITY ANALYSIS ║ -╚══════════════════════════════════════════════════════════════════════╝ - - ARCHITECTURE ENTROPY SOURCES: - ─────────────────────────────────────────────────────────────────────── - - PowerPC G4/G5 (AltiVec vperm): - • 128-bit vector permutation - • Big-endian memory ordering - • Unique timebase register - - Intel x86-64 (SSE/AVX): - • 256/512-bit vector shuffle - • Little-endian memory ordering - • RDTSC/RDTSCP timing - - Apple M1/M2 (NEON + AMX): - • 128-bit NEON permute - • ARM64 memory model - • Apple-specific timing sources - - ATTACK SCENARIOS: - ─────────────────────────────────────────────────────────────────────── - - To compromise this network, attacker must: - - 1. Build accurate emulators for: - ✗ PowerPC G4 AltiVec timing ($50,000+) - ✗ PowerPC G5 AltiVec timing ($50,000+) - ✗ Intel AVX timing ($30,000+) - ✗ ARM NEON timing ($30,000+) - - Total: $160,000+ in emulator development - - 2. OR physically compromise nodes across: - ✗ Multiple geographic locations - ✗ Multiple network segments - ✗ Multiple CPU architectures - ✗ All within 10-second block window - - DEFENSE COST: - ─────────────────────────────────────────────────────────────────────── - - • PowerMac G4: $30-50 - • PowerMac G5: $50-100 - • Mac Pro (Intel): $200-400 - • M2 Mac Mini: $500-600 - • Linux server: $100-300 - - Total hardware: ~$1,000 for 5+ architecture coverage - - ATTACK/DEFENSE RATIO: 160:1 (attacker pays 160x more!) - -╔══════════════════════════════════════════════════════════════════════╗ -║ "Every architecture added is another language the attacker ║ -║ must learn to speak fluently - in silicon." ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - -if __name__ == "__main__": - import hmac # Import for ring signature - demo_multi_arch_network() +#!/usr/bin/env python3 +""" +RustChain Multi-Architecture Mutator Oracle Network +==================================================== + +Different CPU architectures contribute unique entropy through their +specific vector/SIMD instructions. The more diverse the oracle ring, +the harder it is to compromise. + +SUPPORTED ARCHITECTURES: +═══════════════════════════════════════════════════════════════════════ + +┌─────────────────┬──────────────┬────────────────────────────────────┐ +│ Architecture │ SIMD Unit │ Unique Entropy Source │ +├─────────────────┼──────────────┼────────────────────────────────────┤ +│ PowerPC G4/G5 │ AltiVec │ vperm (128-bit vector permute) │ +│ Intel x86_64 │ SSE/AVX │ PSHUFB, VPERM2F128 │ +│ Apple Silicon │ ARM NEON │ TBL/TBX (table lookup permute) │ +│ SPARC │ VIS │ FPACK, BMASK │ +│ PA-RISC │ MAX │ Permute instructions │ +│ 68k Mac │ (none) │ Unique bus timing, no cache │ +│ Alpha │ MVI │ PERR, UNPKBW │ +│ MIPS │ MSA │ VSHF (vector shuffle) │ +└─────────────────┴──────────────┴────────────────────────────────────┘ + +NETWORK TOPOLOGY: +═══════════════════════════════════════════════════════════════════════ + + ┌─────────────────┐ + │ ENTROPY MIXER │ + │ (XOR Ring) │ + └────────┬────────┘ + │ + ┌─────────┬───────┬───────┼───────┬───────┬─────────┐ + │ │ │ │ │ │ │ + ┌───▼───┐ ┌───▼───┐ ┌─▼─┐ ┌───▼───┐ ┌─▼─┐ ┌───▼───┐ ┌───▼───┐ + │ PPC │ │ PPC │ │x86│ │ ARM │ │M1 │ │ SPARC │ │ 68k │ + │ G4 │ │ G5 │ │ │ │ NEON │ │M2 │ │ │ │ │ + │AltiVec│ │AltiVec│ │SSE│ │ Pi │ │ │ │ VIS │ │Timing │ + └───────┘ └───────┘ └───┘ └───────┘ └───┘ └───────┘ └───────┘ + +Each architecture contributes entropy that ONLY that architecture +can generate. Compromising requires controlling ALL architectures. + +"Diversity is security. The chain speaks many silicon dialects." +""" + +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Set +from enum import Enum, auto +import hashlib +import struct +import secrets +import time + +class CPUArchitecture(Enum): + """Supported CPU architectures for oracle nodes""" + POWERPC_G3 = ("ppc_g3", "PowerPC G3", None, 1997) + POWERPC_G4 = ("ppc_g4", "PowerPC G4", "AltiVec", 1999) + POWERPC_G5 = ("ppc_g5", "PowerPC G5", "AltiVec", 2003) + INTEL_X86 = ("x86", "Intel x86", "SSE", 1999) + INTEL_X86_64 = ("x86_64", "Intel x86-64", "SSE/AVX", 2003) + ARM_32 = ("arm32", "ARM 32-bit", "NEON", 2005) + ARM_64 = ("arm64", "ARM 64-bit", "NEON", 2011) + APPLE_M1 = ("m1", "Apple M1", "NEON+AMX", 2020) + APPLE_M2 = ("m2", "Apple M2", "NEON+AMX", 2022) + MOTOROLA_68K = ("m68k", "Motorola 68k", None, 1979) + SPARC = ("sparc", "SPARC", "VIS", 1987) + MIPS = ("mips", "MIPS", "MSA", 1985) + PA_RISC = ("pa_risc", "PA-RISC", "MAX", 1986) + ALPHA = ("alpha", "DEC Alpha", "MVI", 1992) + RISC_V = ("riscv", "RISC-V", "V Extension", 2010) + + def __init__(self, arch_id: str, name: str, simd: Optional[str], year: int): + self.arch_id = arch_id + self.arch_name = name + self.simd_unit = simd + self.release_year = year + + @property + def antiquity_bonus(self) -> float: + """ + Older architectures get higher bonuses. + + ARM is heavily penalized regardless of age because: + - Billions of ARM devices exist (phones, tablets, Pis) + - Easy to create bot farms with cheap Android phones + - Raspberry Pi clusters are trivial to set up + + Only rare/exotic ARM (Apple Silicon with AMX) gets slight bonus. + """ + # ARM penalty - too easy to bot farm with phones/Pis + if self.arch_id in ['arm32', 'arm64']: + return 0.1 # 10% - heavily discouraged + + # Apple Silicon - AMX coprocessor is unique and can be used as mutator + # Gets same bonus as modern x86 since AMX provides unique entropy + if self.arch_id in ['m1', 'm2']: + return 1.0 # 1x - AMX mutator capability + + # Standard age-based tiers for rare architectures + age = 2025 - self.release_year + if age >= 40: return 3.5 # Ancient (68k 1979, MIPS 1985) + if age >= 32: return 3.0 # Sacred (Alpha 1992, SPARC 1987) + if age >= 20: return 2.5 # Vintage (G3, G4, G5, x86-64) + if age >= 12: return 2.0 # Classic (older x86) + return 1.0 # Modern + +@dataclass +class ArchitectureOracle: + """An oracle node for a specific CPU architecture""" + node_id: str + hostname: str + ip_address: str + architecture: CPUArchitecture + cpu_model: str + simd_enabled: bool + unique_features: List[str] = field(default_factory=list) + entropy_method: str = "" + last_entropy: bytes = b'' + + def __post_init__(self): + """Set architecture-specific entropy method""" + arch_methods = { + CPUArchitecture.POWERPC_G4: "altivec_vperm_collapse", + CPUArchitecture.POWERPC_G5: "altivec_vperm_collapse", + CPUArchitecture.INTEL_X86_64: "sse_pshufb_collapse", + CPUArchitecture.APPLE_M1: "neon_tbl_collapse", + CPUArchitecture.APPLE_M2: "neon_tbl_collapse", + CPUArchitecture.MOTOROLA_68K: "bus_timing_entropy", + CPUArchitecture.SPARC: "vis_fpack_collapse", + CPUArchitecture.ARM_64: "neon_tbl_collapse", + } + self.entropy_method = arch_methods.get( + self.architecture, + "generic_timing_entropy" + ) + +@dataclass +class MultiArchMutationSeed: + """Mutation seed combining entropy from multiple architectures""" + seed: bytes + block_height: int + timestamp: int + architecture_contributions: Dict[str, Tuple[str, bytes]] # arch -> (node_id, entropy_hash) + diversity_score: float # Higher = more architectures + ring_signature: bytes + +class MultiArchOracleRing: + """ + Oracle ring supporting multiple CPU architectures. + + Security increases with architectural diversity: + - 1 architecture: Single point of failure + - 2 architectures: Need to compromise both + - 5+ architectures: Extremely hard to attack all + """ + + MINIMUM_ARCHITECTURES = 2 # Need at least 2 different archs + DIVERSITY_BONUS_PER_ARCH = 0.1 # 10% bonus per unique architecture + + def __init__(self): + self.nodes: Dict[str, ArchitectureOracle] = {} + self.architectures_present: Set[CPUArchitecture] = set() + + def register_oracle(self, oracle: ArchitectureOracle) -> bool: + """Register a new oracle node""" + + # Verify architecture-specific requirements + if oracle.architecture in [CPUArchitecture.POWERPC_G4, CPUArchitecture.POWERPC_G5]: + if not oracle.simd_enabled: + print(f" ✗ {oracle.node_id}: AltiVec required for PowerPC G4/G5") + return False + + self.nodes[oracle.node_id] = oracle + self.architectures_present.add(oracle.architecture) + + print(f" ✓ {oracle.node_id}: {oracle.architecture.arch_name}") + print(f" SIMD: {oracle.architecture.simd_unit or 'None'}") + print(f" Method: {oracle.entropy_method}") + print(f" Antiquity Bonus: {oracle.architecture.antiquity_bonus}x") + + return True + + def get_diversity_score(self) -> float: + """Calculate diversity score based on unique architectures""" + base_score = len(self.architectures_present) + + # Bonus for having both big-endian and little-endian + endian_types = set() + for arch in self.architectures_present: + if arch in [CPUArchitecture.POWERPC_G4, CPUArchitecture.POWERPC_G5, + CPUArchitecture.MOTOROLA_68K, CPUArchitecture.SPARC]: + endian_types.add("big") + else: + endian_types.add("little") + + endian_bonus = 0.5 if len(endian_types) == 2 else 0 + + # Bonus for having SIMD and non-SIMD + simd_types = set() + for arch in self.architectures_present: + if arch.simd_unit: + simd_types.add("simd") + else: + simd_types.add("scalar") + + simd_bonus = 0.3 if len(simd_types) == 2 else 0 + + return base_score + endian_bonus + simd_bonus + + def collect_entropy(self, oracle: ArchitectureOracle) -> bytes: + """ + Collect architecture-specific entropy from a node. + + Each architecture generates entropy differently: + - PowerPC: AltiVec vperm timing + - x86: SSE PSHUFB timing + - ARM: NEON TBL timing + - 68k: Bus timing (no SIMD) + """ + # In production, this would SSH to node and run arch-specific binary + # For now, simulate architecture-specific entropy + + arch_entropy_size = { + CPUArchitecture.POWERPC_G4: 64, # 512-bit from AltiVec + CPUArchitecture.POWERPC_G5: 64, + CPUArchitecture.INTEL_X86_64: 64, # 512-bit from AVX + CPUArchitecture.APPLE_M1: 64, # 512-bit from NEON + CPUArchitecture.APPLE_M2: 64, + CPUArchitecture.MOTOROLA_68K: 32, # 256-bit (no SIMD, timing only) + CPUArchitecture.SPARC: 48, # 384-bit from VIS + } + + size = arch_entropy_size.get(oracle.architecture, 32) + + # Simulate architecture-specific entropy generation + entropy = hashlib.sha512( + oracle.node_id.encode() + + oracle.architecture.arch_id.encode() + + struct.pack('>Q', int(time.time() * 1000000)) + + secrets.token_bytes(32) + ).digest()[:size] + + oracle.last_entropy = entropy + return entropy + + def generate_mutation_seed(self, block_height: int) -> Optional[MultiArchMutationSeed]: + """Generate mutation seed from all architecture oracles""" + + if len(self.architectures_present) < self.MINIMUM_ARCHITECTURES: + print(f" ✗ Need {self.MINIMUM_ARCHITECTURES} architectures, have {len(self.architectures_present)}") + return None + + print(f"\n Generating multi-architecture mutation seed for block {block_height}...") + print(f" Architectures: {len(self.architectures_present)}") + print(f" Diversity Score: {self.get_diversity_score():.2f}") + + # Collect entropy from each architecture + combined = bytes(64) + contributions = {} + + for node_id, oracle in self.nodes.items(): + entropy = self.collect_entropy(oracle) + entropy_hash = hashlib.sha256(entropy).digest() + + # XOR into combined (pad shorter entropies) + padded = entropy.ljust(64, b'\0') + combined = bytes(a ^ b for a, b in zip(combined, padded)) + + contributions[oracle.architecture.arch_id] = (node_id, entropy_hash) + + print(f" ✓ {oracle.architecture.arch_name}: {entropy[:8].hex()}...") + + # Mix with block height + final_seed = hashlib.sha512( + combined + + struct.pack('>Q', block_height) + + b'MULTIARCH_MUTATION_SEED' + ).digest() + + # Ring signature + ring_sig = hmac.new( + final_seed, + b''.join(a.encode() for a in sorted(contributions.keys())), + hashlib.sha256 + ).digest() if 'hmac' in dir() else hashlib.sha256(final_seed).digest() + + seed = MultiArchMutationSeed( + seed=final_seed, + block_height=block_height, + timestamp=int(time.time() * 1000), + architecture_contributions=contributions, + diversity_score=self.get_diversity_score(), + ring_signature=ring_sig + ) + + print(f"\n ✓ Seed: {final_seed[:16].hex()}...{final_seed[-16:].hex()}") + print(f" ✓ Diversity: {seed.diversity_score:.2f} ({len(contributions)} architectures)") + + return seed + + +def demo_multi_arch_network(): + """Demonstrate multi-architecture oracle network""" + + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ RUSTCHAIN MULTI-ARCHITECTURE MUTATOR ORACLE NETWORK ║ +║ ║ +║ "Diversity is security. The chain speaks many silicon dialects." ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + ring = MultiArchOracleRing() + + print(" Registering Oracle Nodes:\n") + + # Your actual hardware + oracles = [ + # PowerPC Macs (AltiVec) + ArchitectureOracle( + node_id="G4_MIRROR_DOOR", + hostname="Lee-Crockers-Powermac-G4.local", + ip_address="192.168.0.125", + architecture=CPUArchitecture.POWERPC_G4, + cpu_model="PowerMac3,6", + simd_enabled=True, + unique_features=["altivec", "dual_cpu", "ddr_sdram"] + ), + ArchitectureOracle( + node_id="G5_DUAL", + hostname="lee-crockers-power-mac-g5.local", + ip_address="192.168.0.130", + architecture=CPUArchitecture.POWERPC_G5, + cpu_model="PowerMac7,3", + simd_enabled=True, + unique_features=["altivec", "64bit", "hypertransport"] + ), + ArchitectureOracle( + node_id="POWERBOOK_G4", + hostname="sophiacorepbs-powerbook-g4-12.local", + ip_address="192.168.0.115", + architecture=CPUArchitecture.POWERPC_G4, + cpu_model="PowerBook6,8", + simd_enabled=True, + unique_features=["altivec", "mobile", "battery_entropy"] + ), + + # Intel Macs (SSE/AVX) + ArchitectureOracle( + node_id="TRASHCAN_XEON", + hostname="mac-pro-trashcan.local", + ip_address="192.168.0.154", + architecture=CPUArchitecture.INTEL_X86_64, + cpu_model="MacPro6,1", + simd_enabled=True, + unique_features=["avx2", "xeon", "ecc_memory", "dual_gpu"] + ), + + # Apple Silicon (NEON + AMX) + ArchitectureOracle( + node_id="M2_MINI", + hostname="m2-mac-mini.local", + ip_address="192.168.0.171", + architecture=CPUArchitecture.APPLE_M2, + cpu_model="Mac14,3", + simd_enabled=True, + unique_features=["neon", "amx", "neural_engine", "unified_memory"] + ), + + # Linux x86 nodes + ArchitectureOracle( + node_id="LINUX_POWEREDGE", + hostname="sophia-PowerEdge-C4130", + ip_address="192.168.0.160", + architecture=CPUArchitecture.INTEL_X86_64, + cpu_model="Xeon E5-2680", + simd_enabled=True, + unique_features=["avx2", "server", "ecc", "tesla_gpu"] + ), + ] + + for oracle in oracles: + ring.register_oracle(oracle) + print() + + # Show architecture coverage + print("\n" + "="*70) + print(" ARCHITECTURE COVERAGE") + print("="*70) + + arch_count = {} + for oracle in ring.nodes.values(): + arch = oracle.architecture.arch_name + arch_count[arch] = arch_count.get(arch, 0) + 1 + + for arch, count in sorted(arch_count.items()): + print(f" {arch}: {count} node(s)") + + print(f"\n Total Unique Architectures: {len(ring.architectures_present)}") + print(f" Diversity Score: {ring.get_diversity_score():.2f}") + + # Generate mutation seeds + print("\n" + "="*70) + print(" GENERATING MUTATION SEEDS") + print("="*70) + + for block in [1000, 1010, 1020]: + seed = ring.generate_mutation_seed(block) + + # Show the power of diversity + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ DIVERSITY SECURITY ANALYSIS ║ +╚══════════════════════════════════════════════════════════════════════╝ + + ARCHITECTURE ENTROPY SOURCES: + ─────────────────────────────────────────────────────────────────────── + + PowerPC G4/G5 (AltiVec vperm): + • 128-bit vector permutation + • Big-endian memory ordering + • Unique timebase register + + Intel x86-64 (SSE/AVX): + • 256/512-bit vector shuffle + • Little-endian memory ordering + • RDTSC/RDTSCP timing + + Apple M1/M2 (NEON + AMX): + • 128-bit NEON permute + • ARM64 memory model + • Apple-specific timing sources + + ATTACK SCENARIOS: + ─────────────────────────────────────────────────────────────────────── + + To compromise this network, attacker must: + + 1. Build accurate emulators for: + ✗ PowerPC G4 AltiVec timing ($50,000+) + ✗ PowerPC G5 AltiVec timing ($50,000+) + ✗ Intel AVX timing ($30,000+) + ✗ ARM NEON timing ($30,000+) + + Total: $160,000+ in emulator development + + 2. OR physically compromise nodes across: + ✗ Multiple geographic locations + ✗ Multiple network segments + ✗ Multiple CPU architectures + ✗ All within 10-second block window + + DEFENSE COST: + ─────────────────────────────────────────────────────────────────────── + + • PowerMac G4: $30-50 + • PowerMac G5: $50-100 + • Mac Pro (Intel): $200-400 + • M2 Mac Mini: $500-600 + • Linux server: $100-300 + + Total hardware: ~$1,000 for 5+ architecture coverage + + ATTACK/DEFENSE RATIO: 160:1 (attacker pays 160x more!) + +╔══════════════════════════════════════════════════════════════════════╗ +║ "Every architecture added is another language the attacker ║ +║ must learn to speak fluently - in silicon." ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + +if __name__ == "__main__": + import hmac # Import for ring signature + demo_multi_arch_network() diff --git a/rips/rustchain-core/src/mutator_oracle/ppc_mutator_node.py b/rips/rustchain-core/src/mutator_oracle/ppc_mutator_node.py index 4b56a95e..e33ac67e 100644 --- a/rips/rustchain-core/src/mutator_oracle/ppc_mutator_node.py +++ b/rips/rustchain-core/src/mutator_oracle/ppc_mutator_node.py @@ -1,471 +1,471 @@ -#!/usr/bin/env python3 -""" -RustChain PPC Hidden Mutator Oracle Network -============================================ - -PowerPC nodes act as hidden oracles that generate mutation seeds for the -entire network. These nodes are NEVER directly challenged - they only -generate entropy that determines HOW challenges mutate. - -Architecture: -───────────────────────────────────────────────────────────────────────── - - ┌─────────────────────────────┐ - │ PPC MUTATOR ORACLE RING │ - │ (Hidden from public view) │ - └──────────────┬──────────────┘ - │ - ┌───────────────────────┼───────────────────────┐ - │ │ │ - ┌──────▼──────┐ ┌──────▼──────┐ ┌──────▼──────┐ - │ G4 Mirror │ │ G5 Dual │ │ PowerBook │ - │ Door │ │ 2GHz │ │ G4 │ - │ (AltiVec) │ │ (AltiVec) │ │ (AltiVec) │ - └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ - │ │ │ - └───────────────────────┼───────────────────────┘ - │ - ┌───────▼───────┐ - │ MUTATION SEED │ - │ (512-bit) │ - └───────┬───────┘ - │ - ┌──────────────▼──────────────┐ - │ PUBLIC VALIDATOR RING │ - │ (Challenged with mutated │ - │ parameters each block) │ - └─────────────────────────────┘ - -Why PPC as Hidden Mutators? -═══════════════════════════════════════════════════════════════════════ - -1. UNPREDICTABLE: AltiVec vperm + timebase = quantum-resistant randomness -2. UNFAKEABLE: Physical silicon entropy can't be emulated -3. HIDDEN: Mutator nodes don't participate in public consensus -4. DISTRIBUTED: Multiple PPC nodes must agree on mutation seed -5. VINTAGE: Economic incentive to preserve old hardware - -Attack Scenarios PREVENTED: -═══════════════════════════════════════════════════════════════════════ - -❌ Pre-compute challenge responses - → Can't predict mutation seed without controlling PPC oracles - -❌ Sybil attack with emulators - → Emulators can't match AltiVec timing characteristics - -❌ MITM mutation manipulation - → Requires controlling majority of hidden PPC ring - -❌ Quantum computer attack - → Entropy is physical, not mathematical - -"The PowerPC nodes are the heartbeat of the chain. - Ancient silicon decides the fate of modern validators." -""" - -import hashlib -import hmac -import json -import secrets -import struct -import time -import subprocess -import socket -from dataclasses import dataclass, field -from typing import List, Dict, Tuple, Optional -from enum import Enum - -class MutatorRole(Enum): - """Roles in the mutator oracle network""" - PRIMARY = "primary" # Generates base entropy - SECONDARY = "secondary" # Contributes mixing entropy - WITNESS = "witness" # Validates but doesn't contribute - -@dataclass -class PPCMutatorNode: - """A PowerPC mutator oracle node""" - node_id: str - hostname: str - ip_address: str - cpu_model: str # e.g., "PowerMac3,6" - altivec_enabled: bool - role: MutatorRole - public_key: str - last_entropy: bytes = b'' - last_timestamp: int = 0 - -@dataclass -class MutationSeed: - """512-bit mutation seed generated by PPC oracle ring""" - seed: bytes # 64 bytes = 512 bits - contributing_nodes: List[str] # Node IDs that contributed - block_height: int - timestamp: int - ring_signature: bytes # Threshold signature from oracles - entropy_proofs: Dict[str, str] # node_id -> AltiVec signature - - def to_bytes(self) -> bytes: - return ( - self.seed + - struct.pack('>Q', self.block_height) + - struct.pack('>Q', self.timestamp) + - b''.join(n.encode().ljust(32, b'\0') for n in self.contributing_nodes) - ) - - def hash(self) -> bytes: - return hashlib.sha256(self.to_bytes()).digest() - -class PPCMutatorRing: - """ - The hidden ring of PowerPC mutator oracle nodes. - - These nodes: - 1. Generate AltiVec-based quantum-resistant entropy - 2. Combine their entropy into a mutation seed - 3. Sign the seed with threshold signatures - 4. Broadcast ONLY the seed (not their individual entropy) - 5. Never participate in public challenge-response - - The mutation seed determines: - - Challenge parameter ranges - - Which hardware aspects to test - - Timing windows - - Serial verification targets - """ - - MINIMUM_NODES = 2 # Need at least 2 for consensus - THRESHOLD_FRACTION = 0.67 # 2/3 must agree - SEED_REFRESH_BLOCKS = 10 # New seed every 10 blocks - - def __init__(self): - self.nodes: Dict[str, PPCMutatorNode] = {} - self.current_seed: Optional[MutationSeed] = None - self.seed_history: List[MutationSeed] = [] - - def register_node(self, node: PPCMutatorNode) -> bool: - """Register a PPC node as a mutator oracle""" - if not node.altivec_enabled: - print(f" ✗ Node {node.node_id} rejected: AltiVec required") - return False - - # Verify it's actually a PowerPC - if not node.cpu_model.startswith("Power"): - print(f" ✗ Node {node.node_id} rejected: Must be PowerPC") - return False - - self.nodes[node.node_id] = node - print(f" ✓ Node {node.node_id} registered as {node.role.value} mutator") - return True - - def collect_entropy(self, node_id: str) -> Tuple[bytes, str]: - """ - Collect AltiVec entropy from a specific node. - - In production, this would SSH to the node and run the - altivec_entropy_collapse binary, returning the 512-bit - collapsed entropy and signature. - """ - node = self.nodes.get(node_id) - if not node: - return b'', '' - - # Simulate AltiVec entropy collection - # In production: subprocess.run(['ssh', node.ip_address, '/usr/local/bin/altivec_entropy']) - - # Generate simulated AltiVec-style entropy - timestamp = int(time.time() * 1000) - node_entropy = hashlib.sha512( - node.node_id.encode() + - struct.pack('>Q', timestamp) + - secrets.token_bytes(32) - ).digest() - - signature = f"ALTIVEC-{node.cpu_model}-{node_entropy[:4].hex()}-{timestamp}" - - node.last_entropy = node_entropy - node.last_timestamp = timestamp - - return node_entropy, signature - - def generate_mutation_seed(self, block_height: int) -> Optional[MutationSeed]: - """ - Generate a new mutation seed from the oracle ring. - - Process: - 1. Collect entropy from all active nodes - 2. XOR-combine entropies (no single node controls seed) - 3. Apply additional mixing with block height - 4. Generate threshold signature - """ - if len(self.nodes) < self.MINIMUM_NODES: - print(f" ✗ Need {self.MINIMUM_NODES} nodes, have {len(self.nodes)}") - return None - - print(f"\n Generating mutation seed for block {block_height}...") - - combined_entropy = bytes(64) # Start with zeros - contributing_nodes = [] - entropy_proofs = {} - - # Collect and combine entropy from each node - for node_id, node in self.nodes.items(): - entropy, signature = self.collect_entropy(node_id) - - if entropy: - # XOR combine (no single node controls output) - combined_entropy = bytes( - a ^ b for a, b in zip(combined_entropy, entropy) - ) - contributing_nodes.append(node_id) - entropy_proofs[node_id] = signature - print(f" ✓ {node_id}: {signature[:40]}...") - - # Mix with block height for uniqueness - block_mix = hashlib.sha512( - combined_entropy + - struct.pack('>Q', block_height) + - b'RUSTCHAIN_MUTATOR_ORACLE' - ).digest() - - # Final seed is XOR of combined entropy and block mix - final_seed = bytes(a ^ b for a, b in zip(combined_entropy, block_mix)) - - # Generate ring signature (simplified - use threshold sigs in production) - ring_signature = hmac.new( - final_seed, - b''.join(n.encode() for n in sorted(contributing_nodes)), - hashlib.sha256 - ).digest() - - seed = MutationSeed( - seed=final_seed, - contributing_nodes=contributing_nodes, - block_height=block_height, - timestamp=int(time.time() * 1000), - ring_signature=ring_signature, - entropy_proofs=entropy_proofs - ) - - self.current_seed = seed - self.seed_history.append(seed) - - print(f"\n ✓ Mutation seed generated:") - print(f" Block: {block_height}") - print(f" Contributors: {len(contributing_nodes)} nodes") - print(f" Seed: {final_seed[:16].hex()}...{final_seed[-16:].hex()}") - - return seed - - def derive_challenge_params(self, seed: MutationSeed, target: str) -> dict: - """ - Derive challenge parameters from mutation seed. - - The seed determines ALL challenge parameters in a deterministic - but unpredictable way. - """ - # Derive per-target parameters - target_hash = hashlib.sha256( - seed.seed + target.encode() - ).digest() - - # Extract parameters from hash bytes - params = { - 'cache_stride': 32 + (target_hash[0] % 480), # 32-512 - 'cache_iterations': 128 + (target_hash[1] << 2), # 128-1024 - 'memory_size_kb': 256 + (target_hash[2] << 5), # 256-8192 - 'pipeline_depth': 500 + (target_hash[3] << 4), # 500-4596 - 'hash_rounds': 500 + (target_hash[4] << 4), # 500-4596 - 'jitter_min_pct': 3 + (target_hash[5] % 8), # 3-10 - 'timing_window_ms': 1000 + (target_hash[6] << 4), # 1000-5096 - 'serial_check': ['openfirmware', 'gpu', 'storage', 'platform'][target_hash[7] % 4], - } - - return params - - -class HiddenMutatorProtocol: - """ - Protocol for hidden mutator operation. - - The mutator ring operates in the shadows: - - Never directly participates in block production - - Only emits mutation seeds - - Uses dedicated secure channel (not public P2P) - - Rotates primary node each epoch - """ - - def __init__(self, ring: PPCMutatorRing): - self.ring = ring - self.current_epoch = 0 - self.primary_rotation = [] - - def initialize_rotation(self): - """Set up primary node rotation""" - # Deterministically order nodes for rotation - self.primary_rotation = sorted(self.ring.nodes.keys()) - - def get_current_primary(self) -> Optional[str]: - """Get the current primary mutator node""" - if not self.primary_rotation: - return None - return self.primary_rotation[self.current_epoch % len(self.primary_rotation)] - - def rotate_epoch(self): - """Advance to next epoch, rotating primary""" - self.current_epoch += 1 - primary = self.get_current_primary() - print(f"\n Epoch {self.current_epoch}: Primary mutator is now {primary}") - - def emit_seed_to_network(self, seed: MutationSeed) -> dict: - """ - Emit mutation seed to the public network. - - Only the SEED is emitted - individual node entropies stay hidden. - """ - return { - 'type': 'mutation_seed', - 'block_height': seed.block_height, - 'seed_hash': seed.hash().hex(), - 'contributors': len(seed.contributing_nodes), # Count only, not IDs! - 'timestamp': seed.timestamp, - 'ring_signature': seed.ring_signature.hex(), - # Individual node details are NOT included - } - - -def demo_hidden_mutator_network(): - """Demonstrate the hidden PPC mutator oracle network""" - - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ RUSTCHAIN PPC HIDDEN MUTATOR ORACLE NETWORK ║ -║ ║ -║ "Ancient silicon decides the fate of modern validators" ║ -╚══════════════════════════════════════════════════════════════════════╝ -""") - - # Create the hidden ring - ring = PPCMutatorRing() - - # Register PPC nodes as mutator oracles - print(" Registering PPC Mutator Oracles:\n") - - ppc_nodes = [ - PPCMutatorNode( - node_id="G4_ORACLE_125", - hostname="Lee-Crockers-Powermac-G4.local", - ip_address="192.168.0.125", - cpu_model="PowerMac3,6", - altivec_enabled=True, - role=MutatorRole.PRIMARY, - public_key="PPC_G4_125_PUBKEY" - ), - PPCMutatorNode( - node_id="G5_ORACLE_130", - hostname="lee-crockers-power-mac-g5.local", - ip_address="192.168.0.130", - cpu_model="PowerMac7,3", - altivec_enabled=True, - role=MutatorRole.SECONDARY, - public_key="PPC_G5_130_PUBKEY" - ), - PPCMutatorNode( - node_id="POWERBOOK_ORACLE_115", - hostname="sophiacorepbs-powerbook-g4-12.local", - ip_address="192.168.0.115", - cpu_model="PowerBook6,8", - altivec_enabled=True, - role=MutatorRole.SECONDARY, - public_key="PPC_PB_115_PUBKEY" - ), - ] - - # Try to register a fake non-PPC node - fake_node = PPCMutatorNode( - node_id="FAKE_EMULATOR", - hostname="qemu-ppc.fake", - ip_address="10.0.0.1", - cpu_model="QEMU_PPC", # Not "Power..." - altivec_enabled=True, - role=MutatorRole.PRIMARY, - public_key="FAKE_KEY" - ) - - for node in ppc_nodes: - ring.register_node(node) - - print("\n Attempting to register fake node:") - ring.register_node(fake_node) # Should be rejected - - # Initialize protocol - protocol = HiddenMutatorProtocol(ring) - protocol.initialize_rotation() - - print(f"\n Primary rotation order: {protocol.primary_rotation}") - print(f" Current primary: {protocol.get_current_primary()}") - - # Generate mutation seeds for several blocks - print("\n" + "="*70) - print(" GENERATING MUTATION SEEDS") - print("="*70) - - for block in [100, 110, 120]: - seed = ring.generate_mutation_seed(block) - - if seed: - # Show what parameters this seed would generate - print(f"\n Challenge parameters for target 'TestValidator':") - params = ring.derive_challenge_params(seed, "TestValidator") - for k, v in params.items(): - print(f" {k}: {v}") - - # Show what's emitted to public network - public_emission = protocol.emit_seed_to_network(seed) - print(f"\n Public emission (node identities HIDDEN):") - print(f" {json.dumps(public_emission, indent=4)}") - - # Rotate epoch - protocol.rotate_epoch() - - print(""" -╔══════════════════════════════════════════════════════════════════════╗ -║ SECURITY ANALYSIS ║ -╚══════════════════════════════════════════════════════════════════════╝ - - WHAT ATTACKERS SEE: - ─────────────────────────────────────────────────────────────────────── - • Mutation seed hash (can verify but not predict) - • Number of contributors (not identities) - • Ring signature (proves legitimacy) - • Challenge parameters derived from seed - - WHAT ATTACKERS DON'T SEE: - ─────────────────────────────────────────────────────────────────────── - • Which PPC nodes are mutators - • Individual node entropies - • Node IP addresses or locations - • AltiVec timing signatures - • Ring communication protocol - - TO COMPROMISE THE SYSTEM: - ─────────────────────────────────────────────────────────────────────── - 1. Identify hidden PPC mutator nodes (hard - they're not public) - 2. Physically compromise 2/3 of them (requires physical access) - 3. Extract AltiVec entropy generation (can't fake it) - 4. Do this BEFORE next block (10 second window) - - COST TO ATTACK: Find and physically control multiple hidden - vintage Macs scattered across unknown locations - - COST TO DEFEND: Buy 3 old Macs for $150 total, hide them - - "The PowerPC nodes are the heartbeat of the chain. - Ancient silicon decides the fate of modern validators." - -╚══════════════════════════════════════════════════════════════════════╝ -""") - - -if __name__ == "__main__": - demo_hidden_mutator_network() +#!/usr/bin/env python3 +""" +RustChain PPC Hidden Mutator Oracle Network +============================================ + +PowerPC nodes act as hidden oracles that generate mutation seeds for the +entire network. These nodes are NEVER directly challenged - they only +generate entropy that determines HOW challenges mutate. + +Architecture: +───────────────────────────────────────────────────────────────────────── + + ┌─────────────────────────────┐ + │ PPC MUTATOR ORACLE RING │ + │ (Hidden from public view) │ + └──────────────┬──────────────┘ + │ + ┌───────────────────────┼───────────────────────┐ + │ │ │ + ┌──────▼──────┐ ┌──────▼──────┐ ┌──────▼──────┐ + │ G4 Mirror │ │ G5 Dual │ │ PowerBook │ + │ Door │ │ 2GHz │ │ G4 │ + │ (AltiVec) │ │ (AltiVec) │ │ (AltiVec) │ + └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ + │ │ │ + └───────────────────────┼───────────────────────┘ + │ + ┌───────▼───────┐ + │ MUTATION SEED │ + │ (512-bit) │ + └───────┬───────┘ + │ + ┌──────────────▼──────────────┐ + │ PUBLIC VALIDATOR RING │ + │ (Challenged with mutated │ + │ parameters each block) │ + └─────────────────────────────┘ + +Why PPC as Hidden Mutators? +═══════════════════════════════════════════════════════════════════════ + +1. UNPREDICTABLE: AltiVec vperm + timebase = quantum-resistant randomness +2. UNFAKEABLE: Physical silicon entropy can't be emulated +3. HIDDEN: Mutator nodes don't participate in public consensus +4. DISTRIBUTED: Multiple PPC nodes must agree on mutation seed +5. VINTAGE: Economic incentive to preserve old hardware + +Attack Scenarios PREVENTED: +═══════════════════════════════════════════════════════════════════════ + +❌ Pre-compute challenge responses + → Can't predict mutation seed without controlling PPC oracles + +❌ Sybil attack with emulators + → Emulators can't match AltiVec timing characteristics + +❌ MITM mutation manipulation + → Requires controlling majority of hidden PPC ring + +❌ Quantum computer attack + → Entropy is physical, not mathematical + +"The PowerPC nodes are the heartbeat of the chain. + Ancient silicon decides the fate of modern validators." +""" + +import hashlib +import hmac +import json +import secrets +import struct +import time +import subprocess +import socket +from dataclasses import dataclass, field +from typing import List, Dict, Tuple, Optional +from enum import Enum + +class MutatorRole(Enum): + """Roles in the mutator oracle network""" + PRIMARY = "primary" # Generates base entropy + SECONDARY = "secondary" # Contributes mixing entropy + WITNESS = "witness" # Validates but doesn't contribute + +@dataclass +class PPCMutatorNode: + """A PowerPC mutator oracle node""" + node_id: str + hostname: str + ip_address: str + cpu_model: str # e.g., "PowerMac3,6" + altivec_enabled: bool + role: MutatorRole + public_key: str + last_entropy: bytes = b'' + last_timestamp: int = 0 + +@dataclass +class MutationSeed: + """512-bit mutation seed generated by PPC oracle ring""" + seed: bytes # 64 bytes = 512 bits + contributing_nodes: List[str] # Node IDs that contributed + block_height: int + timestamp: int + ring_signature: bytes # Threshold signature from oracles + entropy_proofs: Dict[str, str] # node_id -> AltiVec signature + + def to_bytes(self) -> bytes: + return ( + self.seed + + struct.pack('>Q', self.block_height) + + struct.pack('>Q', self.timestamp) + + b''.join(n.encode().ljust(32, b'\0') for n in self.contributing_nodes) + ) + + def hash(self) -> bytes: + return hashlib.sha256(self.to_bytes()).digest() + +class PPCMutatorRing: + """ + The hidden ring of PowerPC mutator oracle nodes. + + These nodes: + 1. Generate AltiVec-based quantum-resistant entropy + 2. Combine their entropy into a mutation seed + 3. Sign the seed with threshold signatures + 4. Broadcast ONLY the seed (not their individual entropy) + 5. Never participate in public challenge-response + + The mutation seed determines: + - Challenge parameter ranges + - Which hardware aspects to test + - Timing windows + - Serial verification targets + """ + + MINIMUM_NODES = 2 # Need at least 2 for consensus + THRESHOLD_FRACTION = 0.67 # 2/3 must agree + SEED_REFRESH_BLOCKS = 10 # New seed every 10 blocks + + def __init__(self): + self.nodes: Dict[str, PPCMutatorNode] = {} + self.current_seed: Optional[MutationSeed] = None + self.seed_history: List[MutationSeed] = [] + + def register_node(self, node: PPCMutatorNode) -> bool: + """Register a PPC node as a mutator oracle""" + if not node.altivec_enabled: + print(f" ✗ Node {node.node_id} rejected: AltiVec required") + return False + + # Verify it's actually a PowerPC + if not node.cpu_model.startswith("Power"): + print(f" ✗ Node {node.node_id} rejected: Must be PowerPC") + return False + + self.nodes[node.node_id] = node + print(f" ✓ Node {node.node_id} registered as {node.role.value} mutator") + return True + + def collect_entropy(self, node_id: str) -> Tuple[bytes, str]: + """ + Collect AltiVec entropy from a specific node. + + In production, this would SSH to the node and run the + altivec_entropy_collapse binary, returning the 512-bit + collapsed entropy and signature. + """ + node = self.nodes.get(node_id) + if not node: + return b'', '' + + # Simulate AltiVec entropy collection + # In production: subprocess.run(['ssh', node.ip_address, '/usr/local/bin/altivec_entropy']) + + # Generate simulated AltiVec-style entropy + timestamp = int(time.time() * 1000) + node_entropy = hashlib.sha512( + node.node_id.encode() + + struct.pack('>Q', timestamp) + + secrets.token_bytes(32) + ).digest() + + signature = f"ALTIVEC-{node.cpu_model}-{node_entropy[:4].hex()}-{timestamp}" + + node.last_entropy = node_entropy + node.last_timestamp = timestamp + + return node_entropy, signature + + def generate_mutation_seed(self, block_height: int) -> Optional[MutationSeed]: + """ + Generate a new mutation seed from the oracle ring. + + Process: + 1. Collect entropy from all active nodes + 2. XOR-combine entropies (no single node controls seed) + 3. Apply additional mixing with block height + 4. Generate threshold signature + """ + if len(self.nodes) < self.MINIMUM_NODES: + print(f" ✗ Need {self.MINIMUM_NODES} nodes, have {len(self.nodes)}") + return None + + print(f"\n Generating mutation seed for block {block_height}...") + + combined_entropy = bytes(64) # Start with zeros + contributing_nodes = [] + entropy_proofs = {} + + # Collect and combine entropy from each node + for node_id, node in self.nodes.items(): + entropy, signature = self.collect_entropy(node_id) + + if entropy: + # XOR combine (no single node controls output) + combined_entropy = bytes( + a ^ b for a, b in zip(combined_entropy, entropy) + ) + contributing_nodes.append(node_id) + entropy_proofs[node_id] = signature + print(f" ✓ {node_id}: {signature[:40]}...") + + # Mix with block height for uniqueness + block_mix = hashlib.sha512( + combined_entropy + + struct.pack('>Q', block_height) + + b'RUSTCHAIN_MUTATOR_ORACLE' + ).digest() + + # Final seed is XOR of combined entropy and block mix + final_seed = bytes(a ^ b for a, b in zip(combined_entropy, block_mix)) + + # Generate ring signature (simplified - use threshold sigs in production) + ring_signature = hmac.new( + final_seed, + b''.join(n.encode() for n in sorted(contributing_nodes)), + hashlib.sha256 + ).digest() + + seed = MutationSeed( + seed=final_seed, + contributing_nodes=contributing_nodes, + block_height=block_height, + timestamp=int(time.time() * 1000), + ring_signature=ring_signature, + entropy_proofs=entropy_proofs + ) + + self.current_seed = seed + self.seed_history.append(seed) + + print(f"\n ✓ Mutation seed generated:") + print(f" Block: {block_height}") + print(f" Contributors: {len(contributing_nodes)} nodes") + print(f" Seed: {final_seed[:16].hex()}...{final_seed[-16:].hex()}") + + return seed + + def derive_challenge_params(self, seed: MutationSeed, target: str) -> dict: + """ + Derive challenge parameters from mutation seed. + + The seed determines ALL challenge parameters in a deterministic + but unpredictable way. + """ + # Derive per-target parameters + target_hash = hashlib.sha256( + seed.seed + target.encode() + ).digest() + + # Extract parameters from hash bytes + params = { + 'cache_stride': 32 + (target_hash[0] % 480), # 32-512 + 'cache_iterations': 128 + (target_hash[1] << 2), # 128-1024 + 'memory_size_kb': 256 + (target_hash[2] << 5), # 256-8192 + 'pipeline_depth': 500 + (target_hash[3] << 4), # 500-4596 + 'hash_rounds': 500 + (target_hash[4] << 4), # 500-4596 + 'jitter_min_pct': 3 + (target_hash[5] % 8), # 3-10 + 'timing_window_ms': 1000 + (target_hash[6] << 4), # 1000-5096 + 'serial_check': ['openfirmware', 'gpu', 'storage', 'platform'][target_hash[7] % 4], + } + + return params + + +class HiddenMutatorProtocol: + """ + Protocol for hidden mutator operation. + + The mutator ring operates in the shadows: + - Never directly participates in block production + - Only emits mutation seeds + - Uses dedicated secure channel (not public P2P) + - Rotates primary node each epoch + """ + + def __init__(self, ring: PPCMutatorRing): + self.ring = ring + self.current_epoch = 0 + self.primary_rotation = [] + + def initialize_rotation(self): + """Set up primary node rotation""" + # Deterministically order nodes for rotation + self.primary_rotation = sorted(self.ring.nodes.keys()) + + def get_current_primary(self) -> Optional[str]: + """Get the current primary mutator node""" + if not self.primary_rotation: + return None + return self.primary_rotation[self.current_epoch % len(self.primary_rotation)] + + def rotate_epoch(self): + """Advance to next epoch, rotating primary""" + self.current_epoch += 1 + primary = self.get_current_primary() + print(f"\n Epoch {self.current_epoch}: Primary mutator is now {primary}") + + def emit_seed_to_network(self, seed: MutationSeed) -> dict: + """ + Emit mutation seed to the public network. + + Only the SEED is emitted - individual node entropies stay hidden. + """ + return { + 'type': 'mutation_seed', + 'block_height': seed.block_height, + 'seed_hash': seed.hash().hex(), + 'contributors': len(seed.contributing_nodes), # Count only, not IDs! + 'timestamp': seed.timestamp, + 'ring_signature': seed.ring_signature.hex(), + # Individual node details are NOT included + } + + +def demo_hidden_mutator_network(): + """Demonstrate the hidden PPC mutator oracle network""" + + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ RUSTCHAIN PPC HIDDEN MUTATOR ORACLE NETWORK ║ +║ ║ +║ "Ancient silicon decides the fate of modern validators" ║ +╚══════════════════════════════════════════════════════════════════════╝ +""") + + # Create the hidden ring + ring = PPCMutatorRing() + + # Register PPC nodes as mutator oracles + print(" Registering PPC Mutator Oracles:\n") + + ppc_nodes = [ + PPCMutatorNode( + node_id="G4_ORACLE_125", + hostname="Lee-Crockers-Powermac-G4.local", + ip_address="192.168.0.125", + cpu_model="PowerMac3,6", + altivec_enabled=True, + role=MutatorRole.PRIMARY, + public_key="PPC_G4_125_PUBKEY" + ), + PPCMutatorNode( + node_id="G5_ORACLE_130", + hostname="lee-crockers-power-mac-g5.local", + ip_address="192.168.0.130", + cpu_model="PowerMac7,3", + altivec_enabled=True, + role=MutatorRole.SECONDARY, + public_key="PPC_G5_130_PUBKEY" + ), + PPCMutatorNode( + node_id="POWERBOOK_ORACLE_115", + hostname="sophiacorepbs-powerbook-g4-12.local", + ip_address="192.168.0.115", + cpu_model="PowerBook6,8", + altivec_enabled=True, + role=MutatorRole.SECONDARY, + public_key="PPC_PB_115_PUBKEY" + ), + ] + + # Try to register a fake non-PPC node + fake_node = PPCMutatorNode( + node_id="FAKE_EMULATOR", + hostname="qemu-ppc.fake", + ip_address="10.0.0.1", + cpu_model="QEMU_PPC", # Not "Power..." + altivec_enabled=True, + role=MutatorRole.PRIMARY, + public_key="FAKE_KEY" + ) + + for node in ppc_nodes: + ring.register_node(node) + + print("\n Attempting to register fake node:") + ring.register_node(fake_node) # Should be rejected + + # Initialize protocol + protocol = HiddenMutatorProtocol(ring) + protocol.initialize_rotation() + + print(f"\n Primary rotation order: {protocol.primary_rotation}") + print(f" Current primary: {protocol.get_current_primary()}") + + # Generate mutation seeds for several blocks + print("\n" + "="*70) + print(" GENERATING MUTATION SEEDS") + print("="*70) + + for block in [100, 110, 120]: + seed = ring.generate_mutation_seed(block) + + if seed: + # Show what parameters this seed would generate + print(f"\n Challenge parameters for target 'TestValidator':") + params = ring.derive_challenge_params(seed, "TestValidator") + for k, v in params.items(): + print(f" {k}: {v}") + + # Show what's emitted to public network + public_emission = protocol.emit_seed_to_network(seed) + print(f"\n Public emission (node identities HIDDEN):") + print(f" {json.dumps(public_emission, indent=4)}") + + # Rotate epoch + protocol.rotate_epoch() + + print(""" +╔══════════════════════════════════════════════════════════════════════╗ +║ SECURITY ANALYSIS ║ +╚══════════════════════════════════════════════════════════════════════╝ + + WHAT ATTACKERS SEE: + ─────────────────────────────────────────────────────────────────────── + • Mutation seed hash (can verify but not predict) + • Number of contributors (not identities) + • Ring signature (proves legitimacy) + • Challenge parameters derived from seed + + WHAT ATTACKERS DON'T SEE: + ─────────────────────────────────────────────────────────────────────── + • Which PPC nodes are mutators + • Individual node entropies + • Node IP addresses or locations + • AltiVec timing signatures + • Ring communication protocol + + TO COMPROMISE THE SYSTEM: + ─────────────────────────────────────────────────────────────────────── + 1. Identify hidden PPC mutator nodes (hard - they're not public) + 2. Physically compromise 2/3 of them (requires physical access) + 3. Extract AltiVec entropy generation (can't fake it) + 4. Do this BEFORE next block (10 second window) + + COST TO ATTACK: Find and physically control multiple hidden + vintage Macs scattered across unknown locations + + COST TO DEFEND: Buy 3 old Macs for $150 total, hide them + + "The PowerPC nodes are the heartbeat of the chain. + Ancient silicon decides the fate of modern validators." + +╚══════════════════════════════════════════════════════════════════════╝ +""") + + +if __name__ == "__main__": + demo_hidden_mutator_network() diff --git a/rips/rustchain-core/validator/entropy.py b/rips/rustchain-core/validator/entropy.py index a8a44f93..25d21006 100644 --- a/rips/rustchain-core/validator/entropy.py +++ b/rips/rustchain-core/validator/entropy.py @@ -1,1005 +1,1005 @@ -""" -RustChain Entropy-Based Validator Fingerprinting (RIP-0007) -============================================================ - -Multi-source entropy fingerprint system for validator identification, -anti-emulation verification, and cumulative reputation weighting. - -Philosophy: "It's cheaper to buy a $50 486 than to emulate one" - -Entropy Layers: -1. Hardware (60%): CPU timing, cache, memory SPD, thermal, BIOS -2. Software (25%): Kernel boot, MAC, SMBIOS, disk serials -3. Temporal (15%): Uptime continuity, drift history, challenges -""" - -import hashlib -import time -import struct -import platform -import subprocess -import os -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Any -from enum import Enum -from pathlib import Path - - -# ============================================================================= -# Constants -# ============================================================================= - -# Entropy layer weights (must sum to 1.0) -HARDWARE_WEIGHT = 0.60 -SOFTWARE_WEIGHT = 0.25 -TEMPORAL_WEIGHT = 0.15 - -# Individual source weights within hardware layer -HW_CPU_TIMING_WEIGHT = 0.25 -HW_CACHE_WEIGHT = 0.20 -HW_MEMORY_WEIGHT = 0.15 -HW_THERMAL_WEIGHT = 0.15 -HW_BIOS_WEIGHT = 0.15 -HW_TOPOLOGY_WEIGHT = 0.10 - -# Drift thresholds -MAX_DRIFT_ALLOWED = 10 # Maximum drift events before penalty -DRIFT_THRESHOLD_PERCENT = 5.0 # % change that counts as drift - -# Challenge timeouts -CHALLENGE_TIMEOUT_MS = 5000 - - -# ============================================================================= -# Data Structures -# ============================================================================= - -@dataclass -class EntropySource: - """Individual entropy source measurement""" - name: str - hash: str - raw_value: Any - confidence: float # 0.0 - 1.0 - timestamp: int - - -@dataclass -class EntropyProfile: - """Complete entropy profile for a node""" - # Hardware layer - cpu_fingerprint: str = "" - cache_fingerprint: str = "" - memory_fingerprint: str = "" - thermal_fingerprint: str = "" - bios_fingerprint: str = "" - topology_fingerprint: str = "" - - # Software layer - kernel_fingerprint: str = "" - mac_fingerprint: str = "" - smbios_fingerprint: str = "" - disk_fingerprint: str = "" - - # Temporal layer - uptime_seconds: int = 0 - collection_timestamp: int = 0 - - # Computed values - validator_id: str = "" - combined_hash: str = "" - confidence_score: float = 0.0 - - def __post_init__(self): - if not self.validator_id: - self.validator_id = self._derive_validator_id() - if not self.combined_hash: - self.combined_hash = self._compute_combined_hash() - - def _derive_validator_id(self) -> str: - """Derive unique validator ID from entropy profile""" - combined = ( - self.cpu_fingerprint + - self.memory_fingerprint + - self.bios_fingerprint + - self.topology_fingerprint + - self.mac_fingerprint + - self.disk_fingerprint + - self.kernel_fingerprint - ) - return hashlib.sha256(combined.encode()).hexdigest() - - def _compute_combined_hash(self) -> str: - """Compute combined entropy hash""" - all_hashes = [ - self.cpu_fingerprint, - self.cache_fingerprint, - self.memory_fingerprint, - self.thermal_fingerprint, - self.bios_fingerprint, - self.topology_fingerprint, - self.kernel_fingerprint, - self.mac_fingerprint, - self.smbios_fingerprint, - self.disk_fingerprint, - ] - combined = ''.join(h for h in all_hashes if h) - return hashlib.sha256(combined.encode()).hexdigest() - - -@dataclass -class DriftEvent: - """Record of entropy drift""" - timestamp: int - source: str - old_hash: str - new_hash: str - drift_percent: float - - -@dataclass -class ChallengeResult: - """Result of a challenge-response verification""" - challenge_type: str - nonce: bytes - response: bytes - timing_ms: float - valid: bool - details: str = "" - - -# ============================================================================= -# Hardware Entropy Collection -# ============================================================================= - -class HardwareEntropyCollector: - """ - Collects hardware-level entropy for fingerprinting. - - Security: Real hardware has measurable, consistent characteristics. - Emulators fail to perfectly replicate timing, cache, and thermal behavior. - """ - - @staticmethod - def fingerprint_cpu() -> EntropySource: - """ - Collect CPU-specific entropy. - - Measures: - - Instruction timing variations - - CPUID responses - - Cache line behavior - """ - data = {} - - # Get CPU info - try: - if platform.system() == "Linux": - with open("/proc/cpuinfo", "r") as f: - cpuinfo = f.read() - data["cpuinfo"] = cpuinfo[:2000] # First 2KB - else: - data["platform_processor"] = platform.processor() - except: - data["platform_processor"] = platform.processor() - - # Measure instruction timing (simplified - real impl would use rdtsc) - timing_samples = [] - for _ in range(100): - start = time.perf_counter_ns() - # Simple operations - x = 0 - for i in range(1000): - x += i * i - elapsed = time.perf_counter_ns() - start - timing_samples.append(elapsed) - - data["timing_mean"] = sum(timing_samples) / len(timing_samples) - data["timing_variance"] = sum((t - data["timing_mean"])**2 for t in timing_samples) / len(timing_samples) - - # Hash the data - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="cpu", - hash=fingerprint, - raw_value=data, - confidence=0.85, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_cache() -> EntropySource: - """ - Measure cache behavior patterns. - - Real hardware has specific L1/L2 cache timing characteristics - that are extremely difficult to emulate accurately. - """ - data = {} - - # Allocate memory and measure access patterns - try: - import array - buffer_size = 1024 * 1024 # 1MB - buffer = array.array('i', [0] * (buffer_size // 4)) - - # Sequential access timing - start = time.perf_counter_ns() - for i in range(0, len(buffer), 64): # Cache line stride - _ = buffer[i] - seq_time = time.perf_counter_ns() - start - data["sequential_access_ns"] = seq_time - - # Random access timing (should be slower due to cache misses) - import random - indices = list(range(0, len(buffer), 64)) - random.shuffle(indices) - start = time.perf_counter_ns() - for i in indices[:1000]: - _ = buffer[i] - rand_time = time.perf_counter_ns() - start - data["random_access_ns"] = rand_time - - # Cache efficiency ratio - data["cache_ratio"] = seq_time / max(1, rand_time) - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="cache", - hash=fingerprint, - raw_value=data, - confidence=0.75, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_memory() -> EntropySource: - """ - Collect memory timing and SPD data. - - SPD (Serial Presence Detect) contains timing parameters - programmed into memory modules at manufacture. - """ - data = {} - - # Try to read memory info - try: - if platform.system() == "Linux": - # Memory info - with open("/proc/meminfo", "r") as f: - data["meminfo"] = f.read()[:1000] - - # Try DMI decode for memory details (requires root) - try: - result = subprocess.run( - ["dmidecode", "-t", "memory"], - capture_output=True, text=True, timeout=5 - ) - if result.returncode == 0: - data["dmi_memory"] = result.stdout[:2000] - except: - pass - - elif platform.system() == "Darwin": # macOS - try: - result = subprocess.run( - ["system_profiler", "SPMemoryDataType"], - capture_output=True, text=True, timeout=10 - ) - data["system_profiler"] = result.stdout[:2000] - except: - pass - - except Exception as e: - data["error"] = str(e) - - data["total_memory"] = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') if hasattr(os, 'sysconf') else 0 - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="memory", - hash=fingerprint, - raw_value=data, - confidence=0.70, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_thermal() -> EntropySource: - """ - Collect thermal signature data. - - Real hardware has specific thermal response patterns. - Emulators cannot physically generate heat. - """ - data = {} - - try: - if platform.system() == "Linux": - # Read thermal zones - thermal_path = Path("/sys/class/thermal") - if thermal_path.exists(): - for zone in thermal_path.glob("thermal_zone*"): - try: - temp_file = zone / "temp" - if temp_file.exists(): - with open(temp_file, "r") as f: - temp = int(f.read().strip()) / 1000.0 - data[zone.name] = temp - except: - pass - - # CPU frequency (varies with thermal throttling) - cpufreq_path = Path("/sys/devices/system/cpu/cpu0/cpufreq") - if cpufreq_path.exists(): - for freq_file in ["scaling_cur_freq", "cpuinfo_max_freq"]: - fpath = cpufreq_path / freq_file - if fpath.exists(): - try: - with open(fpath, "r") as f: - data[freq_file] = int(f.read().strip()) - except: - pass - - elif platform.system() == "Darwin": - # macOS - try powermetrics or SMC - try: - result = subprocess.run( - ["sysctl", "-a"], - capture_output=True, text=True, timeout=5 - ) - for line in result.stdout.split('\n'): - if 'temperature' in line.lower() or 'thermal' in line.lower(): - data[line.split(':')[0].strip()] = line.split(':')[1].strip() if ':' in line else '' - except: - pass - - except Exception as e: - data["error"] = str(e) - - # Include timestamp for temporal entropy - data["collection_time"] = time.time() - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="thermal", - hash=fingerprint, - raw_value=data, - confidence=0.60 if data else 0.20, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_bios() -> EntropySource: - """ - Collect BIOS/UEFI/OpenFirmware entropy. - - Firmware timestamps and configuration are unique per machine. - """ - data = {} - - try: - if platform.system() == "Linux": - # DMI data - dmi_path = Path("/sys/class/dmi/id") - if dmi_path.exists(): - for field in ["bios_vendor", "bios_version", "bios_date", - "board_name", "board_vendor", "board_serial", - "sys_vendor", "product_name", "product_serial"]: - fpath = dmi_path / field - if fpath.exists(): - try: - with open(fpath, "r") as f: - data[field] = f.read().strip() - except: - pass - - elif platform.system() == "Darwin": - # macOS - OpenFirmware/NVRAM - try: - result = subprocess.run( - ["system_profiler", "SPHardwareDataType"], - capture_output=True, text=True, timeout=10 - ) - data["hardware_profile"] = result.stdout[:2000] - except: - pass - - # NVRAM - try: - result = subprocess.run( - ["nvram", "-p"], - capture_output=True, text=True, timeout=5 - ) - data["nvram"] = result.stdout[:1000] - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="bios", - hash=fingerprint, - raw_value=data, - confidence=0.80 if data else 0.30, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_topology() -> EntropySource: - """ - Collect hardware topology (PCIe, USB, IRQ). - - Physical device configuration is unique to each machine. - """ - data = {} - - try: - if platform.system() == "Linux": - # PCI devices - try: - result = subprocess.run( - ["lspci", "-nn"], - capture_output=True, text=True, timeout=10 - ) - if result.returncode == 0: - data["pci_devices"] = result.stdout[:4000] - except: - pass - - # USB devices - try: - result = subprocess.run( - ["lsusb"], - capture_output=True, text=True, timeout=10 - ) - if result.returncode == 0: - data["usb_devices"] = result.stdout[:2000] - except: - pass - - # Block devices - try: - result = subprocess.run( - ["lsblk", "-o", "NAME,SIZE,MODEL,SERIAL"], - capture_output=True, text=True, timeout=10 - ) - if result.returncode == 0: - data["block_devices"] = result.stdout[:2000] - except: - pass - - elif platform.system() == "Darwin": - try: - result = subprocess.run( - ["system_profiler", "SPUSBDataType", "SPPCIDataType"], - capture_output=True, text=True, timeout=15 - ) - data["devices"] = result.stdout[:4000] - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="topology", - hash=fingerprint, - raw_value=data, - confidence=0.75 if data else 0.25, - timestamp=int(time.time()), - ) - - -# ============================================================================= -# Software Entropy Collection -# ============================================================================= - -class SoftwareEntropyCollector: - """Collects software-level entropy for fingerprinting.""" - - @staticmethod - def fingerprint_kernel() -> EntropySource: - """Collect kernel boot and configuration entropy.""" - data = {} - - try: - # Kernel version - data["kernel"] = platform.release() - data["platform"] = platform.platform() - - if platform.system() == "Linux": - # Boot time - with open("/proc/stat", "r") as f: - for line in f: - if line.startswith("btime"): - data["boot_time"] = int(line.split()[1]) - break - - # Kernel command line - try: - with open("/proc/cmdline", "r") as f: - data["cmdline"] = f.read().strip()[:500] - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="kernel", - hash=fingerprint, - raw_value=data, - confidence=0.70, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_mac() -> EntropySource: - """Collect MAC address entropy.""" - data = {} - - try: - import uuid - data["mac"] = ':'.join(['{:02x}'.format((uuid.getnode() >> i) & 0xff) - for i in range(0, 48, 8)][::-1]) - - if platform.system() == "Linux": - # Get all network interfaces - net_path = Path("/sys/class/net") - if net_path.exists(): - for iface in net_path.iterdir(): - addr_file = iface / "address" - if addr_file.exists(): - try: - with open(addr_file, "r") as f: - data[iface.name] = f.read().strip() - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="mac", - hash=fingerprint, - raw_value=data, - confidence=0.65, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_smbios() -> EntropySource: - """Collect SMBIOS/DMI entropy.""" - data = {} - - try: - data["machine"] = platform.machine() - data["node"] = platform.node() - - if platform.system() == "Linux": - # Try dmidecode - try: - result = subprocess.run( - ["dmidecode", "-t", "system"], - capture_output=True, text=True, timeout=10 - ) - if result.returncode == 0: - data["system"] = result.stdout[:2000] - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="smbios", - hash=fingerprint, - raw_value=data, - confidence=0.70, - timestamp=int(time.time()), - ) - - @staticmethod - def fingerprint_disk() -> EntropySource: - """Collect disk serial and identity entropy.""" - data = {} - - try: - if platform.system() == "Linux": - # Disk by-id - byid_path = Path("/dev/disk/by-id") - if byid_path.exists(): - data["disk_ids"] = [d.name for d in byid_path.iterdir()][:20] - - # Root filesystem UUID - try: - result = subprocess.run( - ["findmnt", "-n", "-o", "UUID", "/"], - capture_output=True, text=True, timeout=5 - ) - if result.returncode == 0: - data["root_uuid"] = result.stdout.strip() - except: - pass - - elif platform.system() == "Darwin": - try: - result = subprocess.run( - ["diskutil", "info", "/"], - capture_output=True, text=True, timeout=10 - ) - data["diskutil"] = result.stdout[:1000] - except: - pass - - except Exception as e: - data["error"] = str(e) - - fingerprint = hashlib.sha256(str(data).encode()).hexdigest() - - return EntropySource( - name="disk", - hash=fingerprint, - raw_value=data, - confidence=0.75, - timestamp=int(time.time()), - ) - - -# ============================================================================= -# Entropy Profile Builder -# ============================================================================= - -class EntropyProfileBuilder: - """ - Builds complete entropy profiles from all sources. - - Security Model: - - Multi-layer entropy makes forgery economically irrational - - Each layer provides independent verification - - Weighted combination resists partial spoofing - """ - - def __init__(self): - self.hw_collector = HardwareEntropyCollector() - self.sw_collector = SoftwareEntropyCollector() - - def collect_full_profile(self) -> EntropyProfile: - """Collect complete entropy profile.""" - # Hardware layer - cpu = self.hw_collector.fingerprint_cpu() - cache = self.hw_collector.fingerprint_cache() - memory = self.hw_collector.fingerprint_memory() - thermal = self.hw_collector.fingerprint_thermal() - bios = self.hw_collector.fingerprint_bios() - topology = self.hw_collector.fingerprint_topology() - - # Software layer - kernel = self.sw_collector.fingerprint_kernel() - mac = self.sw_collector.fingerprint_mac() - smbios = self.sw_collector.fingerprint_smbios() - disk = self.sw_collector.fingerprint_disk() - - # Get uptime - try: - with open("/proc/uptime", "r") as f: - uptime = int(float(f.read().split()[0])) - except: - uptime = 0 - - # Build profile - profile = EntropyProfile( - cpu_fingerprint=cpu.hash, - cache_fingerprint=cache.hash, - memory_fingerprint=memory.hash, - thermal_fingerprint=thermal.hash, - bios_fingerprint=bios.hash, - topology_fingerprint=topology.hash, - kernel_fingerprint=kernel.hash, - mac_fingerprint=mac.hash, - smbios_fingerprint=smbios.hash, - disk_fingerprint=disk.hash, - uptime_seconds=uptime, - collection_timestamp=int(time.time()), - ) - - # Calculate confidence score - confidences = [ - cpu.confidence * HW_CPU_TIMING_WEIGHT, - cache.confidence * HW_CACHE_WEIGHT, - memory.confidence * HW_MEMORY_WEIGHT, - thermal.confidence * HW_THERMAL_WEIGHT, - bios.confidence * HW_BIOS_WEIGHT, - topology.confidence * HW_TOPOLOGY_WEIGHT, - ] - profile.confidence_score = sum(confidences) - - return profile - - -# ============================================================================= -# Drift Detection -# ============================================================================= - -class DriftDetector: - """ - Detects entropy drift over time. - - Drift indicates: - - Possible emulation attempt - - Hardware swap - - System instability - """ - - def __init__(self): - self._history: Dict[str, List[EntropyProfile]] = {} - self._drift_events: Dict[str, List[DriftEvent]] = {} - - def record_profile(self, validator_id: str, profile: EntropyProfile): - """Record a profile observation.""" - if validator_id not in self._history: - self._history[validator_id] = [] - self._history[validator_id].append(profile) - - # Keep last 100 profiles - if len(self._history[validator_id]) > 100: - self._history[validator_id] = self._history[validator_id][-100:] - - def check_drift(self, validator_id: str, new_profile: EntropyProfile) -> List[DriftEvent]: - """Check for drift from historical profiles.""" - events = [] - - if validator_id not in self._history or not self._history[validator_id]: - return events - - # Compare with baseline (first recorded profile) - baseline = self._history[validator_id][0] - - # Check each fingerprint component - components = [ - ("cpu", baseline.cpu_fingerprint, new_profile.cpu_fingerprint), - ("cache", baseline.cache_fingerprint, new_profile.cache_fingerprint), - ("memory", baseline.memory_fingerprint, new_profile.memory_fingerprint), - ("bios", baseline.bios_fingerprint, new_profile.bios_fingerprint), - ("topology", baseline.topology_fingerprint, new_profile.topology_fingerprint), - ] - - for name, old_hash, new_hash in components: - if old_hash and new_hash and old_hash != new_hash: - # Calculate drift percentage (simplified - hash difference) - diff_chars = sum(1 for a, b in zip(old_hash, new_hash) if a != b) - drift_pct = (diff_chars / len(old_hash)) * 100 - - if drift_pct > 0: - event = DriftEvent( - timestamp=int(time.time()), - source=name, - old_hash=old_hash[:16], - new_hash=new_hash[:16], - drift_percent=drift_pct, - ) - events.append(event) - - if validator_id not in self._drift_events: - self._drift_events[validator_id] = [] - self._drift_events[validator_id].append(event) - - return events - - def get_drift_count(self, validator_id: str) -> int: - """Get total drift events for a validator.""" - return len(self._drift_events.get(validator_id, [])) - - -# ============================================================================= -# Entropy Score Calculator -# ============================================================================= - -def compute_entropy_score( - profile: EntropyProfile, - drift_events: int, - successful_challenges: int = 0, -) -> float: - """ - Calculate entropy score modifier for Antiquity Score. - - Formula: - ENTROPY_SCORE = uptime_weight × stability_score × verification_bonus - - Returns: - Score between 0.1 and 1.5 - """ - # Uptime weight (max at 30 days) - max_uptime = 30 * 24 * 3600 # 30 days in seconds - uptime_weight = min(1.0, profile.uptime_seconds / max_uptime) - - # Stability score (penalize drift) - stability_score = max(0.1, 1.0 - (drift_events / MAX_DRIFT_ALLOWED)) - - # Challenge verification bonus - verification_bonus = 1.0 + (successful_challenges * 0.05) - - # Combined score - entropy_score = uptime_weight * stability_score * verification_bonus - - # Include confidence - entropy_score *= (0.7 + 0.3 * profile.confidence_score) - - return min(1.5, max(0.1, entropy_score)) - - -def compute_effective_antiquity_score( - base_antiquity_score: float, - entropy_score: float, -) -> float: - """ - Calculate effective Antiquity Score with entropy modifier. - - Formula: - EFFECTIVE_AS = BASE_AS × (0.7 + 0.3 × ENTROPY_SCORE) - """ - modifier = 0.7 + 0.3 * entropy_score - return base_antiquity_score * modifier - - -# ============================================================================= -# Validator Identity Manager -# ============================================================================= - -class ValidatorIdentityManager: - """ - Manages validator identities derived from entropy profiles. - - Each physical machine has a unique validator ID that: - - Cannot be forged without physical access - - Provides Sybil resistance - - Enables reputation tracking - """ - - def __init__(self): - self.profile_builder = EntropyProfileBuilder() - self.drift_detector = DriftDetector() - self._identities: Dict[str, EntropyProfile] = {} - self._challenges: Dict[str, int] = {} - - def register_validator(self) -> Tuple[str, EntropyProfile]: - """ - Register this machine as a validator. - - Returns: - (validator_id, entropy_profile) - """ - profile = self.profile_builder.collect_full_profile() - validator_id = profile.validator_id - - self._identities[validator_id] = profile - self.drift_detector.record_profile(validator_id, profile) - - return validator_id, profile - - def verify_validator(self, claimed_id: str) -> Tuple[bool, str, float]: - """ - Verify a claimed validator identity. - - Returns: - (valid, message, entropy_score) - """ - # Collect current profile - current_profile = self.profile_builder.collect_full_profile() - - # Check if ID matches - if current_profile.validator_id != claimed_id: - return False, "Validator ID mismatch", 0.0 - - # Check drift - drift_events = self.drift_detector.check_drift(claimed_id, current_profile) - drift_count = self.drift_detector.get_drift_count(claimed_id) - - if drift_count > MAX_DRIFT_ALLOWED: - return False, f"Excessive drift: {drift_count} events", 0.0 - - # Calculate entropy score - successful_challenges = self._challenges.get(claimed_id, 0) - entropy_score = compute_entropy_score( - current_profile, - drift_count, - successful_challenges, - ) - - # Record profile - self.drift_detector.record_profile(claimed_id, current_profile) - - if drift_events: - return True, f"Valid with {len(drift_events)} drift events", entropy_score - - return True, "Valid", entropy_score - - -# ============================================================================= -# Main Entry Point -# ============================================================================= - -def derive_validator_id() -> str: - """Quick function to get validator ID for this machine.""" - builder = EntropyProfileBuilder() - profile = builder.collect_full_profile() - return profile.validator_id - - -def collect_entropy_profile() -> Dict[str, Any]: - """Collect complete entropy profile as dictionary.""" - builder = EntropyProfileBuilder() - profile = builder.collect_full_profile() - - return { - "validator_id": profile.validator_id, - "cpu_fingerprint": profile.cpu_fingerprint, - "memory_fingerprint": profile.memory_fingerprint, - "bios_fingerprint": profile.bios_fingerprint, - "topology_fingerprint": profile.topology_fingerprint, - "mac_fingerprint": profile.mac_fingerprint, - "disk_fingerprint": profile.disk_fingerprint, - "kernel_fingerprint": profile.kernel_fingerprint, - "combined_hash": profile.combined_hash, - "confidence_score": profile.confidence_score, - "uptime_seconds": profile.uptime_seconds, - "collection_timestamp": profile.collection_timestamp, - } - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN ENTROPY FINGERPRINTING (RIP-0007)") - print("=" * 60) - print() - print("Collecting entropy profile...") - print() - - profile = collect_entropy_profile() - - print("VALIDATOR IDENTITY") - print("-" * 40) - print(f" Validator ID: {profile['validator_id'][:32]}...") - print(f" Confidence: {profile['confidence_score']:.2%}") - print(f" Uptime: {profile['uptime_seconds'] // 3600} hours") - print() - - print("FINGERPRINTS") - print("-" * 40) - print(f" CPU: {profile['cpu_fingerprint'][:16]}...") - print(f" Memory: {profile['memory_fingerprint'][:16]}...") - print(f" BIOS: {profile['bios_fingerprint'][:16]}...") - print(f" Topology: {profile['topology_fingerprint'][:16]}...") - print(f" MAC: {profile['mac_fingerprint'][:16]}...") - print(f" Disk: {profile['disk_fingerprint'][:16]}...") - print() - - print("COMBINED HASH") - print("-" * 40) - print(f" {profile['combined_hash']}") - print() - - print("Philosophy: 'It's cheaper to buy a $50 486 than to emulate one'") +""" +RustChain Entropy-Based Validator Fingerprinting (RIP-0007) +============================================================ + +Multi-source entropy fingerprint system for validator identification, +anti-emulation verification, and cumulative reputation weighting. + +Philosophy: "It's cheaper to buy a $50 486 than to emulate one" + +Entropy Layers: +1. Hardware (60%): CPU timing, cache, memory SPD, thermal, BIOS +2. Software (25%): Kernel boot, MAC, SMBIOS, disk serials +3. Temporal (15%): Uptime continuity, drift history, challenges +""" + +import hashlib +import time +import struct +import platform +import subprocess +import os +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Any +from enum import Enum +from pathlib import Path + + +# ============================================================================= +# Constants +# ============================================================================= + +# Entropy layer weights (must sum to 1.0) +HARDWARE_WEIGHT = 0.60 +SOFTWARE_WEIGHT = 0.25 +TEMPORAL_WEIGHT = 0.15 + +# Individual source weights within hardware layer +HW_CPU_TIMING_WEIGHT = 0.25 +HW_CACHE_WEIGHT = 0.20 +HW_MEMORY_WEIGHT = 0.15 +HW_THERMAL_WEIGHT = 0.15 +HW_BIOS_WEIGHT = 0.15 +HW_TOPOLOGY_WEIGHT = 0.10 + +# Drift thresholds +MAX_DRIFT_ALLOWED = 10 # Maximum drift events before penalty +DRIFT_THRESHOLD_PERCENT = 5.0 # % change that counts as drift + +# Challenge timeouts +CHALLENGE_TIMEOUT_MS = 5000 + + +# ============================================================================= +# Data Structures +# ============================================================================= + +@dataclass +class EntropySource: + """Individual entropy source measurement""" + name: str + hash: str + raw_value: Any + confidence: float # 0.0 - 1.0 + timestamp: int + + +@dataclass +class EntropyProfile: + """Complete entropy profile for a node""" + # Hardware layer + cpu_fingerprint: str = "" + cache_fingerprint: str = "" + memory_fingerprint: str = "" + thermal_fingerprint: str = "" + bios_fingerprint: str = "" + topology_fingerprint: str = "" + + # Software layer + kernel_fingerprint: str = "" + mac_fingerprint: str = "" + smbios_fingerprint: str = "" + disk_fingerprint: str = "" + + # Temporal layer + uptime_seconds: int = 0 + collection_timestamp: int = 0 + + # Computed values + validator_id: str = "" + combined_hash: str = "" + confidence_score: float = 0.0 + + def __post_init__(self): + if not self.validator_id: + self.validator_id = self._derive_validator_id() + if not self.combined_hash: + self.combined_hash = self._compute_combined_hash() + + def _derive_validator_id(self) -> str: + """Derive unique validator ID from entropy profile""" + combined = ( + self.cpu_fingerprint + + self.memory_fingerprint + + self.bios_fingerprint + + self.topology_fingerprint + + self.mac_fingerprint + + self.disk_fingerprint + + self.kernel_fingerprint + ) + return hashlib.sha256(combined.encode()).hexdigest() + + def _compute_combined_hash(self) -> str: + """Compute combined entropy hash""" + all_hashes = [ + self.cpu_fingerprint, + self.cache_fingerprint, + self.memory_fingerprint, + self.thermal_fingerprint, + self.bios_fingerprint, + self.topology_fingerprint, + self.kernel_fingerprint, + self.mac_fingerprint, + self.smbios_fingerprint, + self.disk_fingerprint, + ] + combined = ''.join(h for h in all_hashes if h) + return hashlib.sha256(combined.encode()).hexdigest() + + +@dataclass +class DriftEvent: + """Record of entropy drift""" + timestamp: int + source: str + old_hash: str + new_hash: str + drift_percent: float + + +@dataclass +class ChallengeResult: + """Result of a challenge-response verification""" + challenge_type: str + nonce: bytes + response: bytes + timing_ms: float + valid: bool + details: str = "" + + +# ============================================================================= +# Hardware Entropy Collection +# ============================================================================= + +class HardwareEntropyCollector: + """ + Collects hardware-level entropy for fingerprinting. + + Security: Real hardware has measurable, consistent characteristics. + Emulators fail to perfectly replicate timing, cache, and thermal behavior. + """ + + @staticmethod + def fingerprint_cpu() -> EntropySource: + """ + Collect CPU-specific entropy. + + Measures: + - Instruction timing variations + - CPUID responses + - Cache line behavior + """ + data = {} + + # Get CPU info + try: + if platform.system() == "Linux": + with open("/proc/cpuinfo", "r") as f: + cpuinfo = f.read() + data["cpuinfo"] = cpuinfo[:2000] # First 2KB + else: + data["platform_processor"] = platform.processor() + except: + data["platform_processor"] = platform.processor() + + # Measure instruction timing (simplified - real impl would use rdtsc) + timing_samples = [] + for _ in range(100): + start = time.perf_counter_ns() + # Simple operations + x = 0 + for i in range(1000): + x += i * i + elapsed = time.perf_counter_ns() - start + timing_samples.append(elapsed) + + data["timing_mean"] = sum(timing_samples) / len(timing_samples) + data["timing_variance"] = sum((t - data["timing_mean"])**2 for t in timing_samples) / len(timing_samples) + + # Hash the data + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="cpu", + hash=fingerprint, + raw_value=data, + confidence=0.85, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_cache() -> EntropySource: + """ + Measure cache behavior patterns. + + Real hardware has specific L1/L2 cache timing characteristics + that are extremely difficult to emulate accurately. + """ + data = {} + + # Allocate memory and measure access patterns + try: + import array + buffer_size = 1024 * 1024 # 1MB + buffer = array.array('i', [0] * (buffer_size // 4)) + + # Sequential access timing + start = time.perf_counter_ns() + for i in range(0, len(buffer), 64): # Cache line stride + _ = buffer[i] + seq_time = time.perf_counter_ns() - start + data["sequential_access_ns"] = seq_time + + # Random access timing (should be slower due to cache misses) + import random + indices = list(range(0, len(buffer), 64)) + random.shuffle(indices) + start = time.perf_counter_ns() + for i in indices[:1000]: + _ = buffer[i] + rand_time = time.perf_counter_ns() - start + data["random_access_ns"] = rand_time + + # Cache efficiency ratio + data["cache_ratio"] = seq_time / max(1, rand_time) + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="cache", + hash=fingerprint, + raw_value=data, + confidence=0.75, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_memory() -> EntropySource: + """ + Collect memory timing and SPD data. + + SPD (Serial Presence Detect) contains timing parameters + programmed into memory modules at manufacture. + """ + data = {} + + # Try to read memory info + try: + if platform.system() == "Linux": + # Memory info + with open("/proc/meminfo", "r") as f: + data["meminfo"] = f.read()[:1000] + + # Try DMI decode for memory details (requires root) + try: + result = subprocess.run( + ["dmidecode", "-t", "memory"], + capture_output=True, text=True, timeout=5 + ) + if result.returncode == 0: + data["dmi_memory"] = result.stdout[:2000] + except: + pass + + elif platform.system() == "Darwin": # macOS + try: + result = subprocess.run( + ["system_profiler", "SPMemoryDataType"], + capture_output=True, text=True, timeout=10 + ) + data["system_profiler"] = result.stdout[:2000] + except: + pass + + except Exception as e: + data["error"] = str(e) + + data["total_memory"] = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') if hasattr(os, 'sysconf') else 0 + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="memory", + hash=fingerprint, + raw_value=data, + confidence=0.70, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_thermal() -> EntropySource: + """ + Collect thermal signature data. + + Real hardware has specific thermal response patterns. + Emulators cannot physically generate heat. + """ + data = {} + + try: + if platform.system() == "Linux": + # Read thermal zones + thermal_path = Path("/sys/class/thermal") + if thermal_path.exists(): + for zone in thermal_path.glob("thermal_zone*"): + try: + temp_file = zone / "temp" + if temp_file.exists(): + with open(temp_file, "r") as f: + temp = int(f.read().strip()) / 1000.0 + data[zone.name] = temp + except: + pass + + # CPU frequency (varies with thermal throttling) + cpufreq_path = Path("/sys/devices/system/cpu/cpu0/cpufreq") + if cpufreq_path.exists(): + for freq_file in ["scaling_cur_freq", "cpuinfo_max_freq"]: + fpath = cpufreq_path / freq_file + if fpath.exists(): + try: + with open(fpath, "r") as f: + data[freq_file] = int(f.read().strip()) + except: + pass + + elif platform.system() == "Darwin": + # macOS - try powermetrics or SMC + try: + result = subprocess.run( + ["sysctl", "-a"], + capture_output=True, text=True, timeout=5 + ) + for line in result.stdout.split('\n'): + if 'temperature' in line.lower() or 'thermal' in line.lower(): + data[line.split(':')[0].strip()] = line.split(':')[1].strip() if ':' in line else '' + except: + pass + + except Exception as e: + data["error"] = str(e) + + # Include timestamp for temporal entropy + data["collection_time"] = time.time() + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="thermal", + hash=fingerprint, + raw_value=data, + confidence=0.60 if data else 0.20, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_bios() -> EntropySource: + """ + Collect BIOS/UEFI/OpenFirmware entropy. + + Firmware timestamps and configuration are unique per machine. + """ + data = {} + + try: + if platform.system() == "Linux": + # DMI data + dmi_path = Path("/sys/class/dmi/id") + if dmi_path.exists(): + for field in ["bios_vendor", "bios_version", "bios_date", + "board_name", "board_vendor", "board_serial", + "sys_vendor", "product_name", "product_serial"]: + fpath = dmi_path / field + if fpath.exists(): + try: + with open(fpath, "r") as f: + data[field] = f.read().strip() + except: + pass + + elif platform.system() == "Darwin": + # macOS - OpenFirmware/NVRAM + try: + result = subprocess.run( + ["system_profiler", "SPHardwareDataType"], + capture_output=True, text=True, timeout=10 + ) + data["hardware_profile"] = result.stdout[:2000] + except: + pass + + # NVRAM + try: + result = subprocess.run( + ["nvram", "-p"], + capture_output=True, text=True, timeout=5 + ) + data["nvram"] = result.stdout[:1000] + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="bios", + hash=fingerprint, + raw_value=data, + confidence=0.80 if data else 0.30, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_topology() -> EntropySource: + """ + Collect hardware topology (PCIe, USB, IRQ). + + Physical device configuration is unique to each machine. + """ + data = {} + + try: + if platform.system() == "Linux": + # PCI devices + try: + result = subprocess.run( + ["lspci", "-nn"], + capture_output=True, text=True, timeout=10 + ) + if result.returncode == 0: + data["pci_devices"] = result.stdout[:4000] + except: + pass + + # USB devices + try: + result = subprocess.run( + ["lsusb"], + capture_output=True, text=True, timeout=10 + ) + if result.returncode == 0: + data["usb_devices"] = result.stdout[:2000] + except: + pass + + # Block devices + try: + result = subprocess.run( + ["lsblk", "-o", "NAME,SIZE,MODEL,SERIAL"], + capture_output=True, text=True, timeout=10 + ) + if result.returncode == 0: + data["block_devices"] = result.stdout[:2000] + except: + pass + + elif platform.system() == "Darwin": + try: + result = subprocess.run( + ["system_profiler", "SPUSBDataType", "SPPCIDataType"], + capture_output=True, text=True, timeout=15 + ) + data["devices"] = result.stdout[:4000] + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="topology", + hash=fingerprint, + raw_value=data, + confidence=0.75 if data else 0.25, + timestamp=int(time.time()), + ) + + +# ============================================================================= +# Software Entropy Collection +# ============================================================================= + +class SoftwareEntropyCollector: + """Collects software-level entropy for fingerprinting.""" + + @staticmethod + def fingerprint_kernel() -> EntropySource: + """Collect kernel boot and configuration entropy.""" + data = {} + + try: + # Kernel version + data["kernel"] = platform.release() + data["platform"] = platform.platform() + + if platform.system() == "Linux": + # Boot time + with open("/proc/stat", "r") as f: + for line in f: + if line.startswith("btime"): + data["boot_time"] = int(line.split()[1]) + break + + # Kernel command line + try: + with open("/proc/cmdline", "r") as f: + data["cmdline"] = f.read().strip()[:500] + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="kernel", + hash=fingerprint, + raw_value=data, + confidence=0.70, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_mac() -> EntropySource: + """Collect MAC address entropy.""" + data = {} + + try: + import uuid + data["mac"] = ':'.join(['{:02x}'.format((uuid.getnode() >> i) & 0xff) + for i in range(0, 48, 8)][::-1]) + + if platform.system() == "Linux": + # Get all network interfaces + net_path = Path("/sys/class/net") + if net_path.exists(): + for iface in net_path.iterdir(): + addr_file = iface / "address" + if addr_file.exists(): + try: + with open(addr_file, "r") as f: + data[iface.name] = f.read().strip() + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="mac", + hash=fingerprint, + raw_value=data, + confidence=0.65, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_smbios() -> EntropySource: + """Collect SMBIOS/DMI entropy.""" + data = {} + + try: + data["machine"] = platform.machine() + data["node"] = platform.node() + + if platform.system() == "Linux": + # Try dmidecode + try: + result = subprocess.run( + ["dmidecode", "-t", "system"], + capture_output=True, text=True, timeout=10 + ) + if result.returncode == 0: + data["system"] = result.stdout[:2000] + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="smbios", + hash=fingerprint, + raw_value=data, + confidence=0.70, + timestamp=int(time.time()), + ) + + @staticmethod + def fingerprint_disk() -> EntropySource: + """Collect disk serial and identity entropy.""" + data = {} + + try: + if platform.system() == "Linux": + # Disk by-id + byid_path = Path("/dev/disk/by-id") + if byid_path.exists(): + data["disk_ids"] = [d.name for d in byid_path.iterdir()][:20] + + # Root filesystem UUID + try: + result = subprocess.run( + ["findmnt", "-n", "-o", "UUID", "/"], + capture_output=True, text=True, timeout=5 + ) + if result.returncode == 0: + data["root_uuid"] = result.stdout.strip() + except: + pass + + elif platform.system() == "Darwin": + try: + result = subprocess.run( + ["diskutil", "info", "/"], + capture_output=True, text=True, timeout=10 + ) + data["diskutil"] = result.stdout[:1000] + except: + pass + + except Exception as e: + data["error"] = str(e) + + fingerprint = hashlib.sha256(str(data).encode()).hexdigest() + + return EntropySource( + name="disk", + hash=fingerprint, + raw_value=data, + confidence=0.75, + timestamp=int(time.time()), + ) + + +# ============================================================================= +# Entropy Profile Builder +# ============================================================================= + +class EntropyProfileBuilder: + """ + Builds complete entropy profiles from all sources. + + Security Model: + - Multi-layer entropy makes forgery economically irrational + - Each layer provides independent verification + - Weighted combination resists partial spoofing + """ + + def __init__(self): + self.hw_collector = HardwareEntropyCollector() + self.sw_collector = SoftwareEntropyCollector() + + def collect_full_profile(self) -> EntropyProfile: + """Collect complete entropy profile.""" + # Hardware layer + cpu = self.hw_collector.fingerprint_cpu() + cache = self.hw_collector.fingerprint_cache() + memory = self.hw_collector.fingerprint_memory() + thermal = self.hw_collector.fingerprint_thermal() + bios = self.hw_collector.fingerprint_bios() + topology = self.hw_collector.fingerprint_topology() + + # Software layer + kernel = self.sw_collector.fingerprint_kernel() + mac = self.sw_collector.fingerprint_mac() + smbios = self.sw_collector.fingerprint_smbios() + disk = self.sw_collector.fingerprint_disk() + + # Get uptime + try: + with open("/proc/uptime", "r") as f: + uptime = int(float(f.read().split()[0])) + except: + uptime = 0 + + # Build profile + profile = EntropyProfile( + cpu_fingerprint=cpu.hash, + cache_fingerprint=cache.hash, + memory_fingerprint=memory.hash, + thermal_fingerprint=thermal.hash, + bios_fingerprint=bios.hash, + topology_fingerprint=topology.hash, + kernel_fingerprint=kernel.hash, + mac_fingerprint=mac.hash, + smbios_fingerprint=smbios.hash, + disk_fingerprint=disk.hash, + uptime_seconds=uptime, + collection_timestamp=int(time.time()), + ) + + # Calculate confidence score + confidences = [ + cpu.confidence * HW_CPU_TIMING_WEIGHT, + cache.confidence * HW_CACHE_WEIGHT, + memory.confidence * HW_MEMORY_WEIGHT, + thermal.confidence * HW_THERMAL_WEIGHT, + bios.confidence * HW_BIOS_WEIGHT, + topology.confidence * HW_TOPOLOGY_WEIGHT, + ] + profile.confidence_score = sum(confidences) + + return profile + + +# ============================================================================= +# Drift Detection +# ============================================================================= + +class DriftDetector: + """ + Detects entropy drift over time. + + Drift indicates: + - Possible emulation attempt + - Hardware swap + - System instability + """ + + def __init__(self): + self._history: Dict[str, List[EntropyProfile]] = {} + self._drift_events: Dict[str, List[DriftEvent]] = {} + + def record_profile(self, validator_id: str, profile: EntropyProfile): + """Record a profile observation.""" + if validator_id not in self._history: + self._history[validator_id] = [] + self._history[validator_id].append(profile) + + # Keep last 100 profiles + if len(self._history[validator_id]) > 100: + self._history[validator_id] = self._history[validator_id][-100:] + + def check_drift(self, validator_id: str, new_profile: EntropyProfile) -> List[DriftEvent]: + """Check for drift from historical profiles.""" + events = [] + + if validator_id not in self._history or not self._history[validator_id]: + return events + + # Compare with baseline (first recorded profile) + baseline = self._history[validator_id][0] + + # Check each fingerprint component + components = [ + ("cpu", baseline.cpu_fingerprint, new_profile.cpu_fingerprint), + ("cache", baseline.cache_fingerprint, new_profile.cache_fingerprint), + ("memory", baseline.memory_fingerprint, new_profile.memory_fingerprint), + ("bios", baseline.bios_fingerprint, new_profile.bios_fingerprint), + ("topology", baseline.topology_fingerprint, new_profile.topology_fingerprint), + ] + + for name, old_hash, new_hash in components: + if old_hash and new_hash and old_hash != new_hash: + # Calculate drift percentage (simplified - hash difference) + diff_chars = sum(1 for a, b in zip(old_hash, new_hash) if a != b) + drift_pct = (diff_chars / len(old_hash)) * 100 + + if drift_pct > 0: + event = DriftEvent( + timestamp=int(time.time()), + source=name, + old_hash=old_hash[:16], + new_hash=new_hash[:16], + drift_percent=drift_pct, + ) + events.append(event) + + if validator_id not in self._drift_events: + self._drift_events[validator_id] = [] + self._drift_events[validator_id].append(event) + + return events + + def get_drift_count(self, validator_id: str) -> int: + """Get total drift events for a validator.""" + return len(self._drift_events.get(validator_id, [])) + + +# ============================================================================= +# Entropy Score Calculator +# ============================================================================= + +def compute_entropy_score( + profile: EntropyProfile, + drift_events: int, + successful_challenges: int = 0, +) -> float: + """ + Calculate entropy score modifier for Antiquity Score. + + Formula: + ENTROPY_SCORE = uptime_weight × stability_score × verification_bonus + + Returns: + Score between 0.1 and 1.5 + """ + # Uptime weight (max at 30 days) + max_uptime = 30 * 24 * 3600 # 30 days in seconds + uptime_weight = min(1.0, profile.uptime_seconds / max_uptime) + + # Stability score (penalize drift) + stability_score = max(0.1, 1.0 - (drift_events / MAX_DRIFT_ALLOWED)) + + # Challenge verification bonus + verification_bonus = 1.0 + (successful_challenges * 0.05) + + # Combined score + entropy_score = uptime_weight * stability_score * verification_bonus + + # Include confidence + entropy_score *= (0.7 + 0.3 * profile.confidence_score) + + return min(1.5, max(0.1, entropy_score)) + + +def compute_effective_antiquity_score( + base_antiquity_score: float, + entropy_score: float, +) -> float: + """ + Calculate effective Antiquity Score with entropy modifier. + + Formula: + EFFECTIVE_AS = BASE_AS × (0.7 + 0.3 × ENTROPY_SCORE) + """ + modifier = 0.7 + 0.3 * entropy_score + return base_antiquity_score * modifier + + +# ============================================================================= +# Validator Identity Manager +# ============================================================================= + +class ValidatorIdentityManager: + """ + Manages validator identities derived from entropy profiles. + + Each physical machine has a unique validator ID that: + - Cannot be forged without physical access + - Provides Sybil resistance + - Enables reputation tracking + """ + + def __init__(self): + self.profile_builder = EntropyProfileBuilder() + self.drift_detector = DriftDetector() + self._identities: Dict[str, EntropyProfile] = {} + self._challenges: Dict[str, int] = {} + + def register_validator(self) -> Tuple[str, EntropyProfile]: + """ + Register this machine as a validator. + + Returns: + (validator_id, entropy_profile) + """ + profile = self.profile_builder.collect_full_profile() + validator_id = profile.validator_id + + self._identities[validator_id] = profile + self.drift_detector.record_profile(validator_id, profile) + + return validator_id, profile + + def verify_validator(self, claimed_id: str) -> Tuple[bool, str, float]: + """ + Verify a claimed validator identity. + + Returns: + (valid, message, entropy_score) + """ + # Collect current profile + current_profile = self.profile_builder.collect_full_profile() + + # Check if ID matches + if current_profile.validator_id != claimed_id: + return False, "Validator ID mismatch", 0.0 + + # Check drift + drift_events = self.drift_detector.check_drift(claimed_id, current_profile) + drift_count = self.drift_detector.get_drift_count(claimed_id) + + if drift_count > MAX_DRIFT_ALLOWED: + return False, f"Excessive drift: {drift_count} events", 0.0 + + # Calculate entropy score + successful_challenges = self._challenges.get(claimed_id, 0) + entropy_score = compute_entropy_score( + current_profile, + drift_count, + successful_challenges, + ) + + # Record profile + self.drift_detector.record_profile(claimed_id, current_profile) + + if drift_events: + return True, f"Valid with {len(drift_events)} drift events", entropy_score + + return True, "Valid", entropy_score + + +# ============================================================================= +# Main Entry Point +# ============================================================================= + +def derive_validator_id() -> str: + """Quick function to get validator ID for this machine.""" + builder = EntropyProfileBuilder() + profile = builder.collect_full_profile() + return profile.validator_id + + +def collect_entropy_profile() -> Dict[str, Any]: + """Collect complete entropy profile as dictionary.""" + builder = EntropyProfileBuilder() + profile = builder.collect_full_profile() + + return { + "validator_id": profile.validator_id, + "cpu_fingerprint": profile.cpu_fingerprint, + "memory_fingerprint": profile.memory_fingerprint, + "bios_fingerprint": profile.bios_fingerprint, + "topology_fingerprint": profile.topology_fingerprint, + "mac_fingerprint": profile.mac_fingerprint, + "disk_fingerprint": profile.disk_fingerprint, + "kernel_fingerprint": profile.kernel_fingerprint, + "combined_hash": profile.combined_hash, + "confidence_score": profile.confidence_score, + "uptime_seconds": profile.uptime_seconds, + "collection_timestamp": profile.collection_timestamp, + } + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN ENTROPY FINGERPRINTING (RIP-0007)") + print("=" * 60) + print() + print("Collecting entropy profile...") + print() + + profile = collect_entropy_profile() + + print("VALIDATOR IDENTITY") + print("-" * 40) + print(f" Validator ID: {profile['validator_id'][:32]}...") + print(f" Confidence: {profile['confidence_score']:.2%}") + print(f" Uptime: {profile['uptime_seconds'] // 3600} hours") + print() + + print("FINGERPRINTS") + print("-" * 40) + print(f" CPU: {profile['cpu_fingerprint'][:16]}...") + print(f" Memory: {profile['memory_fingerprint'][:16]}...") + print(f" BIOS: {profile['bios_fingerprint'][:16]}...") + print(f" Topology: {profile['topology_fingerprint'][:16]}...") + print(f" MAC: {profile['mac_fingerprint'][:16]}...") + print(f" Disk: {profile['disk_fingerprint'][:16]}...") + print() + + print("COMBINED HASH") + print("-" * 40) + print(f" {profile['combined_hash']}") + print() + + print("Philosophy: 'It's cheaper to buy a $50 486 than to emulate one'") diff --git a/rips/rustchain-core/validator/score.py b/rips/rustchain-core/validator/score.py index e24eb92f..3fdadb58 100644 --- a/rips/rustchain-core/validator/score.py +++ b/rips/rustchain-core/validator/score.py @@ -1,582 +1,582 @@ -""" -RustChain Validator & Antiquity Score (RIP-0001, RIP-0003) -========================================================== - -Hardware validation, Antiquity Score calculation, and drift lock management. - -Security Mechanisms: -- Hardware fingerprinting via deep entropy -- Drift detection for behavioral anomalies -- Quarantine system for suspected emulators -- Reputation tracking for long-term behavior -""" - -import hashlib -import math -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Any -from enum import Enum - -from ..config.chain_params import ( - CURRENT_YEAR, - AS_MAX, - AS_MIN, - HARDWARE_TIERS, - DRIFT_THRESHOLD, - QUARANTINE_DURATION_BLOCKS, - ENTROPY_WEIGHTS, - EMULATION_PROBABILITY_THRESHOLD, - MIN_ENTROPY_SCORE, -) - - -# ============================================================================= -# Hardware Database -# ============================================================================= - -# Known CPU models with release years (for validation) -HARDWARE_DATABASE: Dict[str, Dict[str, Any]] = { - # Ancient (30+ years) - 3.5x multiplier - "486DX2": {"year": 1992, "family": "x86", "arch": "i486"}, - "486DX4": {"year": 1994, "family": "x86", "arch": "i486"}, - "68040": {"year": 1990, "family": "68k", "arch": "m68k"}, - - # Sacred (25-29 years) - 3.0x multiplier - "Pentium": {"year": 1993, "family": "x86", "arch": "P5"}, - "Pentium Pro": {"year": 1995, "family": "x86", "arch": "P6"}, - "Pentium II": {"year": 1997, "family": "x86", "arch": "P6"}, - "PowerPC 601": {"year": 1993, "family": "ppc", "arch": "POWER"}, - "PowerPC 603": {"year": 1994, "family": "ppc", "arch": "POWER"}, - "Alpha 21064": {"year": 1992, "family": "alpha", "arch": "EV4"}, - - # Vintage (20-24 years) - 2.5x multiplier - "Pentium III": {"year": 1999, "family": "x86", "arch": "P6"}, - "Pentium 4": {"year": 2000, "family": "x86", "arch": "NetBurst"}, - "PowerPC G4": {"year": 1999, "family": "ppc", "arch": "G4"}, - "Athlon": {"year": 1999, "family": "x86", "arch": "K7"}, - - # Classic (15-19 years) - 2.0x multiplier - "Core 2 Duo": {"year": 2006, "family": "x86", "arch": "Core"}, - "Core 2 Quad": {"year": 2007, "family": "x86", "arch": "Core"}, - "PowerPC G5": {"year": 2003, "family": "ppc", "arch": "G5"}, - "Athlon 64": {"year": 2003, "family": "x86", "arch": "K8"}, - "Opteron": {"year": 2003, "family": "x86", "arch": "K8"}, - - # Retro (10-14 years) - 1.5x multiplier - "Core i7 Nehalem": {"year": 2008, "family": "x86", "arch": "Nehalem"}, - "Core i5 Sandy Bridge": {"year": 2011, "family": "x86", "arch": "Sandy Bridge"}, - "FX-8350": {"year": 2012, "family": "x86", "arch": "Piledriver"}, - - # Modern (5-9 years) - 1.0x multiplier - "Core i7 Skylake": {"year": 2015, "family": "x86", "arch": "Skylake"}, - "Ryzen 7 1800X": {"year": 2017, "family": "x86", "arch": "Zen"}, - "Ryzen 9 3900X": {"year": 2019, "family": "x86", "arch": "Zen2"}, - - # Recent (0-4 years) - 0.5x penalty - "Core i9 12900K": {"year": 2021, "family": "x86", "arch": "Alder Lake"}, - "Ryzen 9 7950X": {"year": 2022, "family": "x86", "arch": "Zen4"}, - "Apple M1": {"year": 2020, "family": "arm", "arch": "Apple Silicon"}, - "Apple M3": {"year": 2023, "family": "arm", "arch": "Apple Silicon"}, -} - - -# ============================================================================= -# Hardware Validation -# ============================================================================= - -@dataclass -class HardwareInfo: - """Validated hardware information""" - cpu_model: str - release_year: int - uptime_days: int - architecture: str = "x86" - unique_id: str = "" - tier: str = "" - multiplier: float = 1.0 - age_years: int = 0 - - def __post_init__(self): - self.age_years = CURRENT_YEAR - self.release_year - self.tier = self._compute_tier() - self.multiplier = HARDWARE_TIERS.get(self.tier, {}).get("multiplier", 0.5) - - def _compute_tier(self) -> str: - for tier_name, params in HARDWARE_TIERS.items(): - if params["min_age"] <= self.age_years <= params["max_age"]: - return tier_name - return "recent" - - def generate_hardware_hash(self) -> str: - """Generate unique hardware fingerprint""" - data = f"{self.cpu_model}:{self.architecture}:{self.unique_id}" - return hashlib.sha256(data.encode()).hexdigest() - - -def validate_hardware_claim(model: str, claimed_year: int) -> Tuple[bool, str]: - """ - Validate a hardware claim against known database. - - Security: Prevents false claims about hardware age. - - Args: - model: CPU model string - claimed_year: Year claimed by node - - Returns: - (valid, message) tuple - """ - # Check if model is in database - for known_model, info in HARDWARE_DATABASE.items(): - if known_model.lower() in model.lower(): - actual_year = info["year"] - # Allow 1-year tolerance for variants - if abs(claimed_year - actual_year) <= 1: - return True, f"Hardware validated: {known_model} ({actual_year})" - else: - return False, f"Year mismatch: claimed {claimed_year}, actual {actual_year}" - - # Unknown hardware - allow with warning - return True, f"Unknown hardware: {model} - accepting claimed year {claimed_year}" - - -# ============================================================================= -# Antiquity Score Calculator -# ============================================================================= - -def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: - """ - Calculate Antiquity Score per RIP-0001 spec. - - Formula: AS = (current_year - release_year) * log10(uptime_days + 1) - - This is NOT Proof of Work! Rewards: - - Hardware preservation (age) - - Node reliability (uptime) - - NOT computational speed - """ - age = max(0, CURRENT_YEAR - release_year) - uptime_factor = math.log10(uptime_days + 1) - return age * uptime_factor - - -def calculate_effective_score(base_score: float, tier: str, reputation: float = 1.0) -> float: - """ - Calculate effective score with tier multiplier and reputation. - - Args: - base_score: Raw Antiquity Score - tier: Hardware tier - reputation: Reputation multiplier (0.0 - 1.0) - - Returns: - Effective score for mining weight - """ - multiplier = HARDWARE_TIERS.get(tier, {}).get("multiplier", 0.5) - return base_score * multiplier * reputation - - -# ============================================================================= -# Drift Lock System (RIP-0003) -# ============================================================================= - -class DriftStatus(Enum): - """Node drift status""" - NORMAL = "normal" - WARNING = "warning" - QUARANTINED = "quarantined" - - -@dataclass -class DriftRecord: - """Record of a node's behavioral drift""" - wallet: str - baseline_score: float - current_score: float - drift_percentage: float - status: DriftStatus - quarantine_until_block: Optional[int] = None - violations: List[str] = field(default_factory=list) - - -class DriftLockManager: - """ - Drift Lock System - detects emulation attempts via behavioral analysis. - - Security Principle: Real vintage hardware has consistent, predictable behavior. - Emulators often show inconsistent timing, entropy, or performance patterns. - - When drift exceeds threshold: - 1. Node enters WARNING state - 2. Challenged to prove hardware authenticity - 3. Failed challenge = QUARANTINE - 4. Quarantine lasts QUARANTINE_DURATION_BLOCKS - """ - - def __init__(self): - self._baselines: Dict[str, float] = {} - self._history: Dict[str, List[float]] = {} - self._drift_records: Dict[str, DriftRecord] = {} - self._quarantined: set = set() - - def record_score(self, wallet: str, score: float): - """Record a score observation for drift analysis""" - if wallet not in self._history: - self._history[wallet] = [] - self._baselines[wallet] = score - - self._history[wallet].append(score) - - # Keep last 100 observations - if len(self._history[wallet]) > 100: - self._history[wallet] = self._history[wallet][-100:] - - # Update baseline (rolling average) - if len(self._history[wallet]) >= 10: - self._baselines[wallet] = sum(self._history[wallet]) / len(self._history[wallet]) - - def check_drift(self, wallet: str, current_score: float) -> DriftRecord: - """ - Check if a node's behavior has drifted from baseline. - - Drift indicates possible: - - Emulation attempt - - Hardware swap - - System instability - """ - baseline = self._baselines.get(wallet, current_score) - - if baseline == 0: - drift_pct = 0.0 - else: - drift_pct = abs(current_score - baseline) / baseline - - violations = [] - status = DriftStatus.NORMAL - - if drift_pct > DRIFT_THRESHOLD: - violations.append(f"Score drift: {drift_pct:.1%} > {DRIFT_THRESHOLD:.0%}") - status = DriftStatus.WARNING - - if drift_pct > DRIFT_THRESHOLD * 2: - status = DriftStatus.QUARANTINED - self._quarantined.add(wallet) - - record = DriftRecord( - wallet=wallet, - baseline_score=baseline, - current_score=current_score, - drift_percentage=drift_pct, - status=status, - violations=violations, - ) - - self._drift_records[wallet] = record - return record - - def quarantine_node(self, wallet: str, current_block: int, reason: str): - """Place a node in quarantine""" - self._quarantined.add(wallet) - - record = self._drift_records.get(wallet, DriftRecord( - wallet=wallet, - baseline_score=0, - current_score=0, - drift_percentage=0, - status=DriftStatus.QUARANTINED, - )) - - record.status = DriftStatus.QUARANTINED - record.quarantine_until_block = current_block + QUARANTINE_DURATION_BLOCKS - record.violations.append(reason) - - self._drift_records[wallet] = record - print(f"Node {wallet[:16]}... QUARANTINED: {reason}") - - def release_from_quarantine(self, wallet: str, current_block: int) -> bool: - """Check if node can be released from quarantine""" - record = self._drift_records.get(wallet) - if not record or record.status != DriftStatus.QUARANTINED: - return True - - if record.quarantine_until_block and current_block >= record.quarantine_until_block: - self._quarantined.discard(wallet) - record.status = DriftStatus.NORMAL - record.quarantine_until_block = None - print(f"Node {wallet[:16]}... released from quarantine") - return True - - return False - - def is_quarantined(self, wallet: str) -> bool: - """Check if a node is currently quarantined""" - return wallet in self._quarantined - - -# ============================================================================= -# Deep Entropy Verification -# ============================================================================= - -@dataclass -class EntropyProof: - """Entropy proof from hardware verification""" - instruction_timing: float - memory_patterns: float - bus_timing: float - thermal_signature: float - architectural_quirks: float - combined_score: float = 0.0 - signature_hash: str = "" - - def __post_init__(self): - self.combined_score = self._calculate_combined() - self.signature_hash = self._generate_hash() - - def _calculate_combined(self) -> float: - """Calculate weighted combined score""" - return ( - ENTROPY_WEIGHTS["instruction_timing"] * self.instruction_timing + - ENTROPY_WEIGHTS["memory_patterns"] * self.memory_patterns + - ENTROPY_WEIGHTS["bus_timing"] * self.bus_timing + - ENTROPY_WEIGHTS["thermal_signature"] * self.thermal_signature + - ENTROPY_WEIGHTS["architectural_quirks"] * self.architectural_quirks - ) - - def _generate_hash(self) -> str: - data = f"{self.instruction_timing}:{self.memory_patterns}:{self.bus_timing}" - return hashlib.sha256(data.encode()).hexdigest() - - -class EntropyVerifier: - """ - Deep Entropy Verification System. - - Core Security Principle: - "It's cheaper to buy a $50 486 than to emulate one" - - Verification Layers: - 1. Instruction Timing - CPU cycle variations - 2. Memory Patterns - Cache/RAM behavior - 3. Bus Timing - I/O timing characteristics - 4. Thermal Signature - Heat patterns under load - 5. Architectural Quirks - Known hardware bugs/features - """ - - def verify(self, proof: EntropyProof, hardware: HardwareInfo) -> Tuple[bool, float, str]: - """ - Verify an entropy proof. - - Args: - proof: Entropy proof to verify - hardware: Claimed hardware info - - Returns: - (valid, emulation_probability, message) - """ - # Check minimum score - if proof.combined_score < MIN_ENTROPY_SCORE: - return False, 1.0, f"Entropy score {proof.combined_score:.2f} below minimum" - - # Calculate emulation probability - # Real hardware has consistent, high entropy - # Emulators typically fail on timing precision - emulation_prob = self._estimate_emulation_probability(proof, hardware) - - if emulation_prob > EMULATION_PROBABILITY_THRESHOLD: - return False, emulation_prob, f"High emulation probability: {emulation_prob:.1%}" - - return True, emulation_prob, "Hardware verification passed" - - def _estimate_emulation_probability(self, proof: EntropyProof, hardware: HardwareInfo) -> float: - """ - Estimate probability that hardware is emulated. - - Factors: - - Too-perfect timing = likely emulator - - Too-uniform patterns = likely emulator - - Missing quirks = likely emulator - """ - prob = 0.0 - - # Perfect timing is suspicious (real hardware has jitter) - if proof.instruction_timing > 0.99: - prob += 0.3 # Too perfect - - # Uniform memory patterns are suspicious - if proof.memory_patterns > 0.99: - prob += 0.2 - - # Vintage hardware should have quirks - if hardware.age_years >= 20 and proof.architectural_quirks < 0.5: - prob += 0.3 # Old hardware without quirks = suspicious - - # Bus timing should vary - if proof.bus_timing > 0.98: - prob += 0.2 - - return min(1.0, prob) - - -# ============================================================================= -# Reputation System -# ============================================================================= - -@dataclass -class NodeReputation: - """Node reputation tracking for long-term behavior""" - wallet: str - score: float = 50.0 # Start neutral (0-100) - total_blocks: int = 0 - successful_validations: int = 0 - drift_violations: int = 0 - last_active: int = 0 - - def update(self, block_validated: bool, drift_ok: bool): - """Update reputation based on recent behavior""" - self.total_blocks += 1 - self.last_active = int(time.time()) - - if block_validated: - self.successful_validations += 1 - self.score = min(100, self.score + 0.5) - - if not drift_ok: - self.drift_violations += 1 - self.score = max(0, self.score - 5.0) - - @property - def reliability_factor(self) -> float: - """Get reliability factor (0.0 - 1.0) for scoring""" - return self.score / 100.0 - - -# ============================================================================= -# Complete Validator -# ============================================================================= - -class HardwareValidator: - """ - Complete hardware validation system combining all checks. - - Validates: - 1. Hardware claim authenticity - 2. Antiquity Score calculation - 3. Entropy proof verification - 4. Drift lock status - 5. Reputation - """ - - def __init__(self): - self.drift_manager = DriftLockManager() - self.entropy_verifier = EntropyVerifier() - self.reputations: Dict[str, NodeReputation] = {} - - def validate_miner( - self, - wallet: str, - hardware: HardwareInfo, - entropy_proof: Optional[EntropyProof] = None, - current_block: int = 0, - ) -> Dict[str, Any]: - """ - Complete validation of a miner. - - Returns: - Validation result with score and eligibility - """ - result = { - "wallet": wallet, - "eligible": True, - "errors": [], - "warnings": [], - } - - # 1. Check quarantine status - if self.drift_manager.is_quarantined(wallet): - released = self.drift_manager.release_from_quarantine(wallet, current_block) - if not released: - result["eligible"] = False - result["errors"].append("Node is quarantined") - return result - - # 2. Validate hardware claim - valid, msg = validate_hardware_claim(hardware.cpu_model, hardware.release_year) - if not valid: - result["eligible"] = False - result["errors"].append(msg) - return result - - # 3. Calculate Antiquity Score - base_score = calculate_antiquity_score(hardware.release_year, hardware.uptime_days) - if base_score < AS_MIN: - result["eligible"] = False - result["errors"].append(f"Antiquity Score {base_score:.2f} below minimum {AS_MIN}") - return result - - # 4. Verify entropy proof if provided - if entropy_proof: - valid, emul_prob, msg = self.entropy_verifier.verify(entropy_proof, hardware) - if not valid: - result["eligible"] = False - result["errors"].append(msg) - return result - result["emulation_probability"] = emul_prob - - # 5. Check drift - drift = self.drift_manager.check_drift(wallet, base_score) - if drift.status == DriftStatus.QUARANTINED: - result["eligible"] = False - result["errors"].append("Drift lock triggered") - return result - elif drift.status == DriftStatus.WARNING: - result["warnings"].append(f"Drift warning: {drift.drift_percentage:.1%}") - - # 6. Get reputation - rep = self.reputations.get(wallet, NodeReputation(wallet=wallet)) - - # 7. Calculate final score - effective_score = calculate_effective_score( - base_score, - hardware.tier, - rep.reliability_factor - ) - - result["antiquity_score"] = base_score - result["effective_score"] = effective_score - result["tier"] = hardware.tier - result["multiplier"] = hardware.multiplier - result["reputation"] = rep.score - - return result - - -# ============================================================================= -# Tests -# ============================================================================= - -if __name__ == "__main__": - print("=" * 60) - print("RUSTCHAIN VALIDATOR - ANTIQUITY SCORE CALCULATOR") - print("=" * 60) - - validator = HardwareValidator() - - test_cases = [ - ("RTC1Miner486", HardwareInfo("486DX2", 1992, 300)), - ("RTC2MinerG4", HardwareInfo("PowerPC G4", 2002, 200)), - ("RTC3MinerModern", HardwareInfo("Ryzen 9 7950X", 2022, 30)), - ] - - for wallet, hardware in test_cases: - result = validator.validate_miner(wallet, hardware) - print(f"\n{wallet}:") - print(f" Hardware: {hardware.cpu_model} ({hardware.release_year})") - print(f" Age: {hardware.age_years} years") - print(f" Tier: {hardware.tier} ({hardware.multiplier}x)") - print(f" Eligible: {result['eligible']}") - if result['eligible']: - print(f" Antiquity Score: {result['antiquity_score']:.2f}") - print(f" Effective Score: {result['effective_score']:.2f}") - else: - print(f" Errors: {result['errors']}") +""" +RustChain Validator & Antiquity Score (RIP-0001, RIP-0003) +========================================================== + +Hardware validation, Antiquity Score calculation, and drift lock management. + +Security Mechanisms: +- Hardware fingerprinting via deep entropy +- Drift detection for behavioral anomalies +- Quarantine system for suspected emulators +- Reputation tracking for long-term behavior +""" + +import hashlib +import math +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Any +from enum import Enum + +from ..config.chain_params import ( + CURRENT_YEAR, + AS_MAX, + AS_MIN, + HARDWARE_TIERS, + DRIFT_THRESHOLD, + QUARANTINE_DURATION_BLOCKS, + ENTROPY_WEIGHTS, + EMULATION_PROBABILITY_THRESHOLD, + MIN_ENTROPY_SCORE, +) + + +# ============================================================================= +# Hardware Database +# ============================================================================= + +# Known CPU models with release years (for validation) +HARDWARE_DATABASE: Dict[str, Dict[str, Any]] = { + # Ancient (30+ years) - 3.5x multiplier + "486DX2": {"year": 1992, "family": "x86", "arch": "i486"}, + "486DX4": {"year": 1994, "family": "x86", "arch": "i486"}, + "68040": {"year": 1990, "family": "68k", "arch": "m68k"}, + + # Sacred (25-29 years) - 3.0x multiplier + "Pentium": {"year": 1993, "family": "x86", "arch": "P5"}, + "Pentium Pro": {"year": 1995, "family": "x86", "arch": "P6"}, + "Pentium II": {"year": 1997, "family": "x86", "arch": "P6"}, + "PowerPC 601": {"year": 1993, "family": "ppc", "arch": "POWER"}, + "PowerPC 603": {"year": 1994, "family": "ppc", "arch": "POWER"}, + "Alpha 21064": {"year": 1992, "family": "alpha", "arch": "EV4"}, + + # Vintage (20-24 years) - 2.5x multiplier + "Pentium III": {"year": 1999, "family": "x86", "arch": "P6"}, + "Pentium 4": {"year": 2000, "family": "x86", "arch": "NetBurst"}, + "PowerPC G4": {"year": 1999, "family": "ppc", "arch": "G4"}, + "Athlon": {"year": 1999, "family": "x86", "arch": "K7"}, + + # Classic (15-19 years) - 2.0x multiplier + "Core 2 Duo": {"year": 2006, "family": "x86", "arch": "Core"}, + "Core 2 Quad": {"year": 2007, "family": "x86", "arch": "Core"}, + "PowerPC G5": {"year": 2003, "family": "ppc", "arch": "G5"}, + "Athlon 64": {"year": 2003, "family": "x86", "arch": "K8"}, + "Opteron": {"year": 2003, "family": "x86", "arch": "K8"}, + + # Retro (10-14 years) - 1.5x multiplier + "Core i7 Nehalem": {"year": 2008, "family": "x86", "arch": "Nehalem"}, + "Core i5 Sandy Bridge": {"year": 2011, "family": "x86", "arch": "Sandy Bridge"}, + "FX-8350": {"year": 2012, "family": "x86", "arch": "Piledriver"}, + + # Modern (5-9 years) - 1.0x multiplier + "Core i7 Skylake": {"year": 2015, "family": "x86", "arch": "Skylake"}, + "Ryzen 7 1800X": {"year": 2017, "family": "x86", "arch": "Zen"}, + "Ryzen 9 3900X": {"year": 2019, "family": "x86", "arch": "Zen2"}, + + # Recent (0-4 years) - 0.5x penalty + "Core i9 12900K": {"year": 2021, "family": "x86", "arch": "Alder Lake"}, + "Ryzen 9 7950X": {"year": 2022, "family": "x86", "arch": "Zen4"}, + "Apple M1": {"year": 2020, "family": "arm", "arch": "Apple Silicon"}, + "Apple M3": {"year": 2023, "family": "arm", "arch": "Apple Silicon"}, +} + + +# ============================================================================= +# Hardware Validation +# ============================================================================= + +@dataclass +class HardwareInfo: + """Validated hardware information""" + cpu_model: str + release_year: int + uptime_days: int + architecture: str = "x86" + unique_id: str = "" + tier: str = "" + multiplier: float = 1.0 + age_years: int = 0 + + def __post_init__(self): + self.age_years = CURRENT_YEAR - self.release_year + self.tier = self._compute_tier() + self.multiplier = HARDWARE_TIERS.get(self.tier, {}).get("multiplier", 0.5) + + def _compute_tier(self) -> str: + for tier_name, params in HARDWARE_TIERS.items(): + if params["min_age"] <= self.age_years <= params["max_age"]: + return tier_name + return "recent" + + def generate_hardware_hash(self) -> str: + """Generate unique hardware fingerprint""" + data = f"{self.cpu_model}:{self.architecture}:{self.unique_id}" + return hashlib.sha256(data.encode()).hexdigest() + + +def validate_hardware_claim(model: str, claimed_year: int) -> Tuple[bool, str]: + """ + Validate a hardware claim against known database. + + Security: Prevents false claims about hardware age. + + Args: + model: CPU model string + claimed_year: Year claimed by node + + Returns: + (valid, message) tuple + """ + # Check if model is in database + for known_model, info in HARDWARE_DATABASE.items(): + if known_model.lower() in model.lower(): + actual_year = info["year"] + # Allow 1-year tolerance for variants + if abs(claimed_year - actual_year) <= 1: + return True, f"Hardware validated: {known_model} ({actual_year})" + else: + return False, f"Year mismatch: claimed {claimed_year}, actual {actual_year}" + + # Unknown hardware - allow with warning + return True, f"Unknown hardware: {model} - accepting claimed year {claimed_year}" + + +# ============================================================================= +# Antiquity Score Calculator +# ============================================================================= + +def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: + """ + Calculate Antiquity Score per RIP-0001 spec. + + Formula: AS = (current_year - release_year) * log10(uptime_days + 1) + + This is NOT Proof of Work! Rewards: + - Hardware preservation (age) + - Node reliability (uptime) + - NOT computational speed + """ + age = max(0, CURRENT_YEAR - release_year) + uptime_factor = math.log10(uptime_days + 1) + return age * uptime_factor + + +def calculate_effective_score(base_score: float, tier: str, reputation: float = 1.0) -> float: + """ + Calculate effective score with tier multiplier and reputation. + + Args: + base_score: Raw Antiquity Score + tier: Hardware tier + reputation: Reputation multiplier (0.0 - 1.0) + + Returns: + Effective score for mining weight + """ + multiplier = HARDWARE_TIERS.get(tier, {}).get("multiplier", 0.5) + return base_score * multiplier * reputation + + +# ============================================================================= +# Drift Lock System (RIP-0003) +# ============================================================================= + +class DriftStatus(Enum): + """Node drift status""" + NORMAL = "normal" + WARNING = "warning" + QUARANTINED = "quarantined" + + +@dataclass +class DriftRecord: + """Record of a node's behavioral drift""" + wallet: str + baseline_score: float + current_score: float + drift_percentage: float + status: DriftStatus + quarantine_until_block: Optional[int] = None + violations: List[str] = field(default_factory=list) + + +class DriftLockManager: + """ + Drift Lock System - detects emulation attempts via behavioral analysis. + + Security Principle: Real vintage hardware has consistent, predictable behavior. + Emulators often show inconsistent timing, entropy, or performance patterns. + + When drift exceeds threshold: + 1. Node enters WARNING state + 2. Challenged to prove hardware authenticity + 3. Failed challenge = QUARANTINE + 4. Quarantine lasts QUARANTINE_DURATION_BLOCKS + """ + + def __init__(self): + self._baselines: Dict[str, float] = {} + self._history: Dict[str, List[float]] = {} + self._drift_records: Dict[str, DriftRecord] = {} + self._quarantined: set = set() + + def record_score(self, wallet: str, score: float): + """Record a score observation for drift analysis""" + if wallet not in self._history: + self._history[wallet] = [] + self._baselines[wallet] = score + + self._history[wallet].append(score) + + # Keep last 100 observations + if len(self._history[wallet]) > 100: + self._history[wallet] = self._history[wallet][-100:] + + # Update baseline (rolling average) + if len(self._history[wallet]) >= 10: + self._baselines[wallet] = sum(self._history[wallet]) / len(self._history[wallet]) + + def check_drift(self, wallet: str, current_score: float) -> DriftRecord: + """ + Check if a node's behavior has drifted from baseline. + + Drift indicates possible: + - Emulation attempt + - Hardware swap + - System instability + """ + baseline = self._baselines.get(wallet, current_score) + + if baseline == 0: + drift_pct = 0.0 + else: + drift_pct = abs(current_score - baseline) / baseline + + violations = [] + status = DriftStatus.NORMAL + + if drift_pct > DRIFT_THRESHOLD: + violations.append(f"Score drift: {drift_pct:.1%} > {DRIFT_THRESHOLD:.0%}") + status = DriftStatus.WARNING + + if drift_pct > DRIFT_THRESHOLD * 2: + status = DriftStatus.QUARANTINED + self._quarantined.add(wallet) + + record = DriftRecord( + wallet=wallet, + baseline_score=baseline, + current_score=current_score, + drift_percentage=drift_pct, + status=status, + violations=violations, + ) + + self._drift_records[wallet] = record + return record + + def quarantine_node(self, wallet: str, current_block: int, reason: str): + """Place a node in quarantine""" + self._quarantined.add(wallet) + + record = self._drift_records.get(wallet, DriftRecord( + wallet=wallet, + baseline_score=0, + current_score=0, + drift_percentage=0, + status=DriftStatus.QUARANTINED, + )) + + record.status = DriftStatus.QUARANTINED + record.quarantine_until_block = current_block + QUARANTINE_DURATION_BLOCKS + record.violations.append(reason) + + self._drift_records[wallet] = record + print(f"Node {wallet[:16]}... QUARANTINED: {reason}") + + def release_from_quarantine(self, wallet: str, current_block: int) -> bool: + """Check if node can be released from quarantine""" + record = self._drift_records.get(wallet) + if not record or record.status != DriftStatus.QUARANTINED: + return True + + if record.quarantine_until_block and current_block >= record.quarantine_until_block: + self._quarantined.discard(wallet) + record.status = DriftStatus.NORMAL + record.quarantine_until_block = None + print(f"Node {wallet[:16]}... released from quarantine") + return True + + return False + + def is_quarantined(self, wallet: str) -> bool: + """Check if a node is currently quarantined""" + return wallet in self._quarantined + + +# ============================================================================= +# Deep Entropy Verification +# ============================================================================= + +@dataclass +class EntropyProof: + """Entropy proof from hardware verification""" + instruction_timing: float + memory_patterns: float + bus_timing: float + thermal_signature: float + architectural_quirks: float + combined_score: float = 0.0 + signature_hash: str = "" + + def __post_init__(self): + self.combined_score = self._calculate_combined() + self.signature_hash = self._generate_hash() + + def _calculate_combined(self) -> float: + """Calculate weighted combined score""" + return ( + ENTROPY_WEIGHTS["instruction_timing"] * self.instruction_timing + + ENTROPY_WEIGHTS["memory_patterns"] * self.memory_patterns + + ENTROPY_WEIGHTS["bus_timing"] * self.bus_timing + + ENTROPY_WEIGHTS["thermal_signature"] * self.thermal_signature + + ENTROPY_WEIGHTS["architectural_quirks"] * self.architectural_quirks + ) + + def _generate_hash(self) -> str: + data = f"{self.instruction_timing}:{self.memory_patterns}:{self.bus_timing}" + return hashlib.sha256(data.encode()).hexdigest() + + +class EntropyVerifier: + """ + Deep Entropy Verification System. + + Core Security Principle: + "It's cheaper to buy a $50 486 than to emulate one" + + Verification Layers: + 1. Instruction Timing - CPU cycle variations + 2. Memory Patterns - Cache/RAM behavior + 3. Bus Timing - I/O timing characteristics + 4. Thermal Signature - Heat patterns under load + 5. Architectural Quirks - Known hardware bugs/features + """ + + def verify(self, proof: EntropyProof, hardware: HardwareInfo) -> Tuple[bool, float, str]: + """ + Verify an entropy proof. + + Args: + proof: Entropy proof to verify + hardware: Claimed hardware info + + Returns: + (valid, emulation_probability, message) + """ + # Check minimum score + if proof.combined_score < MIN_ENTROPY_SCORE: + return False, 1.0, f"Entropy score {proof.combined_score:.2f} below minimum" + + # Calculate emulation probability + # Real hardware has consistent, high entropy + # Emulators typically fail on timing precision + emulation_prob = self._estimate_emulation_probability(proof, hardware) + + if emulation_prob > EMULATION_PROBABILITY_THRESHOLD: + return False, emulation_prob, f"High emulation probability: {emulation_prob:.1%}" + + return True, emulation_prob, "Hardware verification passed" + + def _estimate_emulation_probability(self, proof: EntropyProof, hardware: HardwareInfo) -> float: + """ + Estimate probability that hardware is emulated. + + Factors: + - Too-perfect timing = likely emulator + - Too-uniform patterns = likely emulator + - Missing quirks = likely emulator + """ + prob = 0.0 + + # Perfect timing is suspicious (real hardware has jitter) + if proof.instruction_timing > 0.99: + prob += 0.3 # Too perfect + + # Uniform memory patterns are suspicious + if proof.memory_patterns > 0.99: + prob += 0.2 + + # Vintage hardware should have quirks + if hardware.age_years >= 20 and proof.architectural_quirks < 0.5: + prob += 0.3 # Old hardware without quirks = suspicious + + # Bus timing should vary + if proof.bus_timing > 0.98: + prob += 0.2 + + return min(1.0, prob) + + +# ============================================================================= +# Reputation System +# ============================================================================= + +@dataclass +class NodeReputation: + """Node reputation tracking for long-term behavior""" + wallet: str + score: float = 50.0 # Start neutral (0-100) + total_blocks: int = 0 + successful_validations: int = 0 + drift_violations: int = 0 + last_active: int = 0 + + def update(self, block_validated: bool, drift_ok: bool): + """Update reputation based on recent behavior""" + self.total_blocks += 1 + self.last_active = int(time.time()) + + if block_validated: + self.successful_validations += 1 + self.score = min(100, self.score + 0.5) + + if not drift_ok: + self.drift_violations += 1 + self.score = max(0, self.score - 5.0) + + @property + def reliability_factor(self) -> float: + """Get reliability factor (0.0 - 1.0) for scoring""" + return self.score / 100.0 + + +# ============================================================================= +# Complete Validator +# ============================================================================= + +class HardwareValidator: + """ + Complete hardware validation system combining all checks. + + Validates: + 1. Hardware claim authenticity + 2. Antiquity Score calculation + 3. Entropy proof verification + 4. Drift lock status + 5. Reputation + """ + + def __init__(self): + self.drift_manager = DriftLockManager() + self.entropy_verifier = EntropyVerifier() + self.reputations: Dict[str, NodeReputation] = {} + + def validate_miner( + self, + wallet: str, + hardware: HardwareInfo, + entropy_proof: Optional[EntropyProof] = None, + current_block: int = 0, + ) -> Dict[str, Any]: + """ + Complete validation of a miner. + + Returns: + Validation result with score and eligibility + """ + result = { + "wallet": wallet, + "eligible": True, + "errors": [], + "warnings": [], + } + + # 1. Check quarantine status + if self.drift_manager.is_quarantined(wallet): + released = self.drift_manager.release_from_quarantine(wallet, current_block) + if not released: + result["eligible"] = False + result["errors"].append("Node is quarantined") + return result + + # 2. Validate hardware claim + valid, msg = validate_hardware_claim(hardware.cpu_model, hardware.release_year) + if not valid: + result["eligible"] = False + result["errors"].append(msg) + return result + + # 3. Calculate Antiquity Score + base_score = calculate_antiquity_score(hardware.release_year, hardware.uptime_days) + if base_score < AS_MIN: + result["eligible"] = False + result["errors"].append(f"Antiquity Score {base_score:.2f} below minimum {AS_MIN}") + return result + + # 4. Verify entropy proof if provided + if entropy_proof: + valid, emul_prob, msg = self.entropy_verifier.verify(entropy_proof, hardware) + if not valid: + result["eligible"] = False + result["errors"].append(msg) + return result + result["emulation_probability"] = emul_prob + + # 5. Check drift + drift = self.drift_manager.check_drift(wallet, base_score) + if drift.status == DriftStatus.QUARANTINED: + result["eligible"] = False + result["errors"].append("Drift lock triggered") + return result + elif drift.status == DriftStatus.WARNING: + result["warnings"].append(f"Drift warning: {drift.drift_percentage:.1%}") + + # 6. Get reputation + rep = self.reputations.get(wallet, NodeReputation(wallet=wallet)) + + # 7. Calculate final score + effective_score = calculate_effective_score( + base_score, + hardware.tier, + rep.reliability_factor + ) + + result["antiquity_score"] = base_score + result["effective_score"] = effective_score + result["tier"] = hardware.tier + result["multiplier"] = hardware.multiplier + result["reputation"] = rep.score + + return result + + +# ============================================================================= +# Tests +# ============================================================================= + +if __name__ == "__main__": + print("=" * 60) + print("RUSTCHAIN VALIDATOR - ANTIQUITY SCORE CALCULATOR") + print("=" * 60) + + validator = HardwareValidator() + + test_cases = [ + ("RTC1Miner486", HardwareInfo("486DX2", 1992, 300)), + ("RTC2MinerG4", HardwareInfo("PowerPC G4", 2002, 200)), + ("RTC3MinerModern", HardwareInfo("Ryzen 9 7950X", 2022, 30)), + ] + + for wallet, hardware in test_cases: + result = validator.validate_miner(wallet, hardware) + print(f"\n{wallet}:") + print(f" Hardware: {hardware.cpu_model} ({hardware.release_year})") + print(f" Age: {hardware.age_years} years") + print(f" Tier: {hardware.tier} ({hardware.multiplier}x)") + print(f" Eligible: {result['eligible']}") + if result['eligible']: + print(f" Antiquity Score: {result['antiquity_score']:.2f}") + print(f" Effective Score: {result['effective_score']:.2f}") + else: + print(f" Errors: {result['errors']}") diff --git a/rips/rustchain-core/validator/setup_validator.py b/rips/rustchain-core/validator/setup_validator.py index e09bb044..ce043a91 100644 --- a/rips/rustchain-core/validator/setup_validator.py +++ b/rips/rustchain-core/validator/setup_validator.py @@ -1,613 +1,613 @@ -#!/usr/bin/env python3 -""" -RustChain Validator Setup Script -================================ - -"Every vintage computer has historical potential" - -This script sets up a new validator node on the RustChain network. -It uses the authentic genesis block born on PowerMac G4 Mirror Door -with 12 hardware entropy sources. - -Emulation is economically irrational: - - Real hardware: ~$50 for a vintage machine - - Emulation: Thousands of hours to perfectly fake hardware fingerprints - -Usage: - python3 setup_validator.py --hardware-profile - python3 setup_validator.py --register - python3 setup_validator.py --start -""" - -import argparse -import hashlib -import json -import os -import platform -import socket -import subprocess -import sys -import time -from dataclasses import dataclass, asdict -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -# Add parent to path -sys.path.insert(0, str(Path(__file__).parent.parent)) - -from config.chain_params import ( - CHAIN_ID, NETWORK_NAME, HARDWARE_TIERS, - ANCIENT_THRESHOLD, SACRED_THRESHOLD, VINTAGE_THRESHOLD, - CLASSIC_THRESHOLD, RETRO_THRESHOLD, MODERN_THRESHOLD -) -from validator.entropy import ( - HardwareEntropyCollector, SoftwareEntropyCollector, - EntropyProfile, ValidatorIdentityManager -) - -# ============================================================================= -# Constants -# ============================================================================= - -RUSTCHAIN_DIR = Path.home() / ".rustchain" -GENESIS_FILE = "genesis_deep_entropy.json" -VALIDATOR_CONFIG = "validator.json" -ENTROPY_CACHE = "entropy_profile.json" - -BOOTSTRAP_NODES = [ - # Initial bootstrap nodes (founder nodes) - "192.168.0.160:9333", # Sophia Prime Node - "192.168.0.125:9333", # G4 Mirror Door Genesis Node - "192.168.0.126:9333", # G4 Mirror Door Secondary -] - -CURRENT_YEAR = 2025 - -# ============================================================================= -# Hardware Detection -# ============================================================================= - -@dataclass -class HardwareProfile: - """Detected hardware profile for antiquity scoring""" - cpu_model: str - cpu_vendor: str - cpu_family: str - release_year: int - architecture: str - ram_mb: int - cores: int - tier: str - multiplier: float - is_vintage: bool - entropy_sources: List[str] - - -def detect_cpu_info() -> Dict: - """Detect CPU information across platforms""" - info = { - "model": "Unknown", - "vendor": "Unknown", - "family": "Unknown", - "architecture": platform.machine(), - } - - system = platform.system() - - if system == "Linux": - try: - with open("/proc/cpuinfo", "r") as f: - for line in f: - if "model name" in line.lower(): - info["model"] = line.split(":")[1].strip() - elif "vendor_id" in line.lower(): - info["vendor"] = line.split(":")[1].strip() - elif "cpu family" in line.lower(): - info["family"] = line.split(":")[1].strip() - except: - pass - - elif system == "Darwin": # macOS - try: - result = subprocess.run( - ["sysctl", "-n", "machdep.cpu.brand_string"], - capture_output=True, text=True - ) - if result.returncode == 0: - info["model"] = result.stdout.strip() - - # Check for PowerPC - if platform.machine() in ["Power Macintosh", "ppc", "ppc64"]: - result = subprocess.run( - ["system_profiler", "SPHardwareDataType"], - capture_output=True, text=True - ) - for line in result.stdout.split("\n"): - if "Model Identifier" in line: - info["model"] = line.split(":")[1].strip() - if "Processor Name" in line: - info["family"] = line.split(":")[1].strip() - except: - pass - - elif system == "Windows": - try: - result = subprocess.run( - ["wmic", "cpu", "get", "name"], - capture_output=True, text=True - ) - lines = [l.strip() for l in result.stdout.split("\n") if l.strip()] - if len(lines) > 1: - info["model"] = lines[1] - except: - pass - - return info - - -def estimate_release_year(cpu_model: str, cpu_vendor: str) -> int: - """ - Estimate CPU release year based on model string. - This is a simplified heuristic - real implementation would use a database. - """ - model_lower = cpu_model.lower() - - # PowerPC (Apple) - if "powermac" in model_lower or "powerpc" in model_lower: - if "g5" in model_lower: - return 2003 - elif "g4" in model_lower or "3,6" in model_lower: - return 2003 - elif "g3" in model_lower: - return 1999 - return 2002 - - # Intel generations (very simplified) - if "i9-14" in model_lower or "i7-14" in model_lower: - return 2024 - elif "i9-13" in model_lower or "i7-13" in model_lower: - return 2023 - elif "i9-12" in model_lower or "i7-12" in model_lower: - return 2022 - elif "i9-11" in model_lower or "i7-11" in model_lower: - return 2021 - elif "i9-10" in model_lower or "i7-10" in model_lower: - return 2020 - elif "ryzen 9 7" in model_lower: - return 2023 - elif "ryzen 9 5" in model_lower: - return 2021 - elif "ryzen 9 3" in model_lower: - return 2019 - - # Very old CPUs - if "pentium" in model_lower: - if "4" in model_lower: - return 2000 - elif "3" in model_lower or "iii" in model_lower: - return 1999 - elif "2" in model_lower or "ii" in model_lower: - return 1997 - return 1993 - - if "486" in model_lower: - return 1989 - if "386" in model_lower: - return 1985 - if "286" in model_lower: - return 1982 - if "8086" in model_lower or "8088" in model_lower: - return 1978 - - # Default to somewhat recent - return 2020 - - -def determine_tier(release_year: int) -> Tuple[str, float]: - """Determine hardware tier and multiplier based on release year""" - age = CURRENT_YEAR - release_year - - if age >= ANCIENT_THRESHOLD: - return "ancient", HARDWARE_TIERS["ancient"] - elif age >= SACRED_THRESHOLD: - return "sacred", HARDWARE_TIERS["sacred"] - elif age >= VINTAGE_THRESHOLD: - return "vintage", HARDWARE_TIERS["vintage"] - elif age >= CLASSIC_THRESHOLD: - return "classic", HARDWARE_TIERS["classic"] - elif age >= RETRO_THRESHOLD: - return "retro", HARDWARE_TIERS["retro"] - elif age >= MODERN_THRESHOLD: - return "modern", HARDWARE_TIERS["modern"] - else: - return "recent", HARDWARE_TIERS["recent"] - - -def detect_hardware() -> HardwareProfile: - """Detect full hardware profile""" - cpu_info = detect_cpu_info() - release_year = estimate_release_year(cpu_info["model"], cpu_info["vendor"]) - tier, multiplier = determine_tier(release_year) - - # Get RAM - try: - if platform.system() == "Linux": - with open("/proc/meminfo", "r") as f: - for line in f: - if "MemTotal" in line: - ram_kb = int(line.split()[1]) - ram_mb = ram_kb // 1024 - break - elif platform.system() == "Darwin": - result = subprocess.run( - ["sysctl", "-n", "hw.memsize"], - capture_output=True, text=True - ) - ram_mb = int(result.stdout.strip()) // (1024 * 1024) - else: - ram_mb = 4096 # Default - except: - ram_mb = 4096 - - # Get cores - cores = os.cpu_count() or 1 - - # Detect available entropy sources - entropy_sources = [] - if os.path.exists("/dev/urandom"): - entropy_sources.append("urandom") - if os.path.exists("/proc/cpuinfo"): - entropy_sources.append("cpuinfo") - if platform.machine() in ["Power Macintosh", "ppc", "ppc64"]: - entropy_sources.append("powerpc_timebase") - if os.path.exists("/sys/class/thermal"): - entropy_sources.append("thermal") - if os.path.exists("/sys/class/dmi"): - entropy_sources.append("dmi") - - return HardwareProfile( - cpu_model=cpu_info["model"], - cpu_vendor=cpu_info["vendor"], - cpu_family=cpu_info["family"], - release_year=release_year, - architecture=cpu_info["architecture"], - ram_mb=ram_mb, - cores=cores, - tier=tier, - multiplier=multiplier, - is_vintage=(CURRENT_YEAR - release_year) >= 10, - entropy_sources=entropy_sources, - ) - - -# ============================================================================= -# Genesis Loading -# ============================================================================= - -def load_genesis() -> Dict: - """Load the authentic G4-born genesis block""" - genesis_path = RUSTCHAIN_DIR / "genesis" / GENESIS_FILE - - if not genesis_path.exists(): - # Try to find genesis in package - pkg_genesis = Path(__file__).parent.parent / "genesis" / GENESIS_FILE - if pkg_genesis.exists(): - genesis_path.parent.mkdir(parents=True, exist_ok=True) - import shutil - shutil.copy(pkg_genesis, genesis_path) - - if not genesis_path.exists(): - raise FileNotFoundError( - f"Genesis block not found at {genesis_path}\n" - "Please run: rustchain-setup --download-genesis" - ) - - with open(genesis_path, "r") as f: - genesis = json.load(f) - - # Verify genesis authenticity - if "deep_entropy_proof" not in genesis: - raise ValueError("Invalid genesis: missing deep entropy proof") - - if not genesis.get("proof_of_antiquity", {}).get("hardware_verified"): - print("WARNING: Genesis was not verified on real vintage hardware") - - return genesis - - -def verify_genesis_signature(genesis: Dict) -> bool: - """Verify the genesis block signature""" - proof = genesis.get("deep_entropy_proof", {}) - signature = proof.get("signature", "") - - # Check for PowerPC G4 signature format - if not signature.startswith("PPC-G4-DEEP-"): - print(f"WARNING: Genesis signature format unexpected: {signature[:20]}...") - return False - - # Verify depth - depth = int(signature.split("-D")[-1]) if "-D" in signature else 0 - if depth < 10: - print(f"WARNING: Genesis entropy depth too low: {depth}") - return False - - print(f"Genesis signature verified: {signature[:40]}...") - return True - - -# ============================================================================= -# Validator Registration -# ============================================================================= - -@dataclass -class ValidatorConfig: - """Validator configuration""" - validator_id: str - wallet_address: str - hardware_profile: Dict - entropy_fingerprint: str - antiquity_score: float - tier: str - bootstrap_nodes: List[str] - api_port: int - p2p_port: int - registered_at: int - - -def generate_wallet_address(entropy_fingerprint: str) -> str: - """Generate a wallet address from entropy fingerprint""" - # Simple address generation (real implementation would use proper crypto) - addr_hash = hashlib.sha256(entropy_fingerprint.encode()).hexdigest() - checksum = hashlib.sha256(bytes.fromhex(addr_hash)).hexdigest()[:8] - return f"RTC{addr_hash[:32]}{checksum}" - - -def calculate_antiquity_score(release_year: int, uptime_days: int = 1) -> float: - """ - Calculate Antiquity Score using the RIP formula: - AS = (current_year - release_year) * log10(uptime_days + 1) - """ - import math - age = CURRENT_YEAR - release_year - return age * math.log10(uptime_days + 1) - - -def register_validator(hardware: HardwareProfile, genesis: Dict) -> ValidatorConfig: - """Register a new validator""" - - print("\nGenerating validator identity...") - - # Collect entropy - hw_collector = HardwareEntropyCollector() - sw_collector = SoftwareEntropyCollector() - - hw_entropy = hw_collector.collect_all() - sw_entropy = sw_collector.collect_all() - - # Create entropy profile - profile = EntropyProfile( - hardware_entropy=hw_entropy, - software_entropy=sw_entropy, - collection_timestamp=int(time.time()), - hardware_tier=hardware.tier, - estimated_release_year=hardware.release_year, - ) - - # Generate validator ID - identity_manager = ValidatorIdentityManager() - validator_id = identity_manager.derive_validator_id(profile) - - # Generate wallet address - wallet_address = generate_wallet_address(validator_id) - - # Calculate antiquity score - antiquity_score = calculate_antiquity_score(hardware.release_year) - - config = ValidatorConfig( - validator_id=validator_id, - wallet_address=wallet_address, - hardware_profile=asdict(hardware), - entropy_fingerprint=identity_manager.fingerprint_hash, - antiquity_score=antiquity_score, - tier=hardware.tier, - bootstrap_nodes=BOOTSTRAP_NODES, - api_port=9332, - p2p_port=9333, - registered_at=int(time.time()), - ) - - # Save config - config_path = RUSTCHAIN_DIR / VALIDATOR_CONFIG - RUSTCHAIN_DIR.mkdir(parents=True, exist_ok=True) - with open(config_path, "w") as f: - json.dump(asdict(config), f, indent=2) - - print(f"Validator registered: {validator_id[:16]}...") - print(f"Wallet address: {wallet_address[:24]}...") - print(f"Antiquity Score: {antiquity_score:.2f}") - print(f"Hardware Tier: {hardware.tier} ({hardware.multiplier}x)") - - return config - - -# ============================================================================= -# Main CLI -# ============================================================================= - -def print_banner(): - """Print RustChain banner""" - banner = """ -╔══════════════════════════════════════════════════════════════════════════════╗ -║ ║ -║ ██████╗ ██╗ ██╗███████╗████████╗ ██████╗██╗ ██╗ █████╗ ██╗███╗ ██╗ ║ -║ ██╔══██╗██║ ██║██╔════╝╚══██╔══╝██╔════╝██║ ██║██╔══██╗██║████╗ ██║ ║ -║ ██████╔╝██║ ██║███████╗ ██║ ██║ ███████║███████║██║██╔██╗ ██║ ║ -║ ██╔══██╗██║ ██║╚════██║ ██║ ██║ ██╔══██║██╔══██║██║██║╚██╗██║ ║ -║ ██║ ██║╚██████╔╝███████║ ██║ ╚██████╗██║ ██║██║ ██║██║██║ ╚████║ ║ -║ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ║ -║ ║ -║ PROOF OF ANTIQUITY VALIDATOR SETUP ║ -║ ║ -║ "Every vintage computer has historical potential" ║ -║ ║ -║ This is NOT Proof of Work. This is PROOF OF ANTIQUITY. ║ -║ Real hardware rewarded. Emulation economically irrational. ║ -║ ║ -╚══════════════════════════════════════════════════════════════════════════════╝ - """ - print(banner) - - -def cmd_hardware_profile(args): - """Show hardware profile""" - print("\nDetecting hardware...") - hardware = detect_hardware() - - print("\n" + "=" * 60) - print("HARDWARE PROFILE") - print("=" * 60) - print(f" CPU Model: {hardware.cpu_model}") - print(f" Vendor: {hardware.cpu_vendor}") - print(f" Architecture: {hardware.architecture}") - print(f" Cores: {hardware.cores}") - print(f" RAM: {hardware.ram_mb} MB") - print(f" Estimated Release Year: {hardware.release_year}") - print(f" Age: {CURRENT_YEAR - hardware.release_year} years") - print(f" Hardware Tier: {hardware.tier.upper()}") - print(f" Reward Multiplier: {hardware.multiplier}x") - print(f" Is Vintage: {'YES' if hardware.is_vintage else 'NO'}") - print(f" Entropy Sources: {', '.join(hardware.entropy_sources)}") - - # Calculate projected antiquity score - score = calculate_antiquity_score(hardware.release_year, uptime_days=30) - print(f"\n Projected Antiquity Score (30 day uptime): {score:.2f}") - - if hardware.is_vintage: - print("\n ✓ This hardware qualifies for vintage rewards!") - else: - print("\n ⚠ Modern hardware receives reduced rewards (0.5x)") - print(" Consider using vintage hardware for better returns.") - - -def cmd_register(args): - """Register as validator""" - print("\nLoading genesis block...") - try: - genesis = load_genesis() - verify_genesis_signature(genesis) - print(f"Genesis loaded: Chain ID {genesis['rustchain_genesis']['chain_id']}") - except FileNotFoundError as e: - print(f"ERROR: {e}") - print("\nTo download genesis, create the genesis directory and copy genesis_deep_entropy.json") - return - - print("\nDetecting hardware...") - hardware = detect_hardware() - - print(f"\nHardware detected: {hardware.cpu_model}") - print(f"Tier: {hardware.tier} ({hardware.multiplier}x multiplier)") - - config = register_validator(hardware, genesis) - - print("\n" + "=" * 60) - print("VALIDATOR REGISTRATION COMPLETE") - print("=" * 60) - print(f"\nValidator ID: {config.validator_id}") - print(f"Wallet: {config.wallet_address}") - print(f"Antiquity Score: {config.antiquity_score:.2f}") - print(f"\nConfig saved to: {RUSTCHAIN_DIR / VALIDATOR_CONFIG}") - print("\nTo start your validator, run:") - print(" python3 setup_validator.py --start") - - -def cmd_start(args): - """Start the validator node""" - config_path = RUSTCHAIN_DIR / VALIDATOR_CONFIG - - if not config_path.exists(): - print("ERROR: Validator not registered. Run with --register first.") - return - - with open(config_path, "r") as f: - config = json.load(f) - - print("\nStarting RustChain Validator Node...") - print(f"Validator ID: {config['validator_id'][:16]}...") - print(f"P2P Port: {config['p2p_port']}") - print(f"API Port: {config['api_port']}") - print(f"Bootstrap nodes: {len(config['bootstrap_nodes'])}") - - # Import and start the node - try: - from main import RustChainNode - node = RustChainNode( - port=config['p2p_port'], - data_dir=str(RUSTCHAIN_DIR), - mining=True - ) - print("\nNode started. Press Ctrl+C to stop.") - node.start() - except ImportError: - print("\nNode module not found. Starting in simulation mode...") - print("Full node functionality coming in next release.") - - # Simulation - while True: - try: - time.sleep(600) # 10 minute blocks - print(f"[Block] Antiquity proof submitted (score: {config['antiquity_score']:.2f})") - except KeyboardInterrupt: - print("\nValidator stopped.") - break - - -def main(): - parser = argparse.ArgumentParser( - description="RustChain Proof of Antiquity Validator Setup" - ) - parser.add_argument( - "--hardware-profile", "-p", - action="store_true", - help="Show detected hardware profile and tier" - ) - parser.add_argument( - "--register", "-r", - action="store_true", - help="Register as a validator" - ) - parser.add_argument( - "--start", "-s", - action="store_true", - help="Start the validator node" - ) - parser.add_argument( - "--version", "-v", - action="store_true", - help="Show version" - ) - - args = parser.parse_args() - - print_banner() - - if args.version: - print("RustChain Validator Setup v0.1.0") - print("Genesis: PPC-G4-DEEP (PowerMac G4 Mirror Door)") - return - - if args.hardware_profile: - cmd_hardware_profile(args) - elif args.register: - cmd_register(args) - elif args.start: - cmd_start(args) - else: - parser.print_help() - print("\nQuick Start:") - print(" 1. Check your hardware tier: --hardware-profile") - print(" 2. Register as validator: --register") - print(" 3. Start mining: --start") - - -if __name__ == "__main__": - main() +#!/usr/bin/env python3 +""" +RustChain Validator Setup Script +================================ + +"Every vintage computer has historical potential" + +This script sets up a new validator node on the RustChain network. +It uses the authentic genesis block born on PowerMac G4 Mirror Door +with 12 hardware entropy sources. + +Emulation is economically irrational: + - Real hardware: ~$50 for a vintage machine + - Emulation: Thousands of hours to perfectly fake hardware fingerprints + +Usage: + python3 setup_validator.py --hardware-profile + python3 setup_validator.py --register + python3 setup_validator.py --start +""" + +import argparse +import hashlib +import json +import os +import platform +import socket +import subprocess +import sys +import time +from dataclasses import dataclass, asdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from config.chain_params import ( + CHAIN_ID, NETWORK_NAME, HARDWARE_TIERS, + ANCIENT_THRESHOLD, SACRED_THRESHOLD, VINTAGE_THRESHOLD, + CLASSIC_THRESHOLD, RETRO_THRESHOLD, MODERN_THRESHOLD +) +from validator.entropy import ( + HardwareEntropyCollector, SoftwareEntropyCollector, + EntropyProfile, ValidatorIdentityManager +) + +# ============================================================================= +# Constants +# ============================================================================= + +RUSTCHAIN_DIR = Path.home() / ".rustchain" +GENESIS_FILE = "genesis_deep_entropy.json" +VALIDATOR_CONFIG = "validator.json" +ENTROPY_CACHE = "entropy_profile.json" + +BOOTSTRAP_NODES = [ + # Initial bootstrap nodes (founder nodes) + "192.168.0.160:9333", # Sophia Prime Node + "192.168.0.125:9333", # G4 Mirror Door Genesis Node + "192.168.0.126:9333", # G4 Mirror Door Secondary +] + +CURRENT_YEAR = 2025 + +# ============================================================================= +# Hardware Detection +# ============================================================================= + +@dataclass +class HardwareProfile: + """Detected hardware profile for antiquity scoring""" + cpu_model: str + cpu_vendor: str + cpu_family: str + release_year: int + architecture: str + ram_mb: int + cores: int + tier: str + multiplier: float + is_vintage: bool + entropy_sources: List[str] + + +def detect_cpu_info() -> Dict: + """Detect CPU information across platforms""" + info = { + "model": "Unknown", + "vendor": "Unknown", + "family": "Unknown", + "architecture": platform.machine(), + } + + system = platform.system() + + if system == "Linux": + try: + with open("/proc/cpuinfo", "r") as f: + for line in f: + if "model name" in line.lower(): + info["model"] = line.split(":")[1].strip() + elif "vendor_id" in line.lower(): + info["vendor"] = line.split(":")[1].strip() + elif "cpu family" in line.lower(): + info["family"] = line.split(":")[1].strip() + except: + pass + + elif system == "Darwin": # macOS + try: + result = subprocess.run( + ["sysctl", "-n", "machdep.cpu.brand_string"], + capture_output=True, text=True + ) + if result.returncode == 0: + info["model"] = result.stdout.strip() + + # Check for PowerPC + if platform.machine() in ["Power Macintosh", "ppc", "ppc64"]: + result = subprocess.run( + ["system_profiler", "SPHardwareDataType"], + capture_output=True, text=True + ) + for line in result.stdout.split("\n"): + if "Model Identifier" in line: + info["model"] = line.split(":")[1].strip() + if "Processor Name" in line: + info["family"] = line.split(":")[1].strip() + except: + pass + + elif system == "Windows": + try: + result = subprocess.run( + ["wmic", "cpu", "get", "name"], + capture_output=True, text=True + ) + lines = [l.strip() for l in result.stdout.split("\n") if l.strip()] + if len(lines) > 1: + info["model"] = lines[1] + except: + pass + + return info + + +def estimate_release_year(cpu_model: str, cpu_vendor: str) -> int: + """ + Estimate CPU release year based on model string. + This is a simplified heuristic - real implementation would use a database. + """ + model_lower = cpu_model.lower() + + # PowerPC (Apple) + if "powermac" in model_lower or "powerpc" in model_lower: + if "g5" in model_lower: + return 2003 + elif "g4" in model_lower or "3,6" in model_lower: + return 2003 + elif "g3" in model_lower: + return 1999 + return 2002 + + # Intel generations (very simplified) + if "i9-14" in model_lower or "i7-14" in model_lower: + return 2024 + elif "i9-13" in model_lower or "i7-13" in model_lower: + return 2023 + elif "i9-12" in model_lower or "i7-12" in model_lower: + return 2022 + elif "i9-11" in model_lower or "i7-11" in model_lower: + return 2021 + elif "i9-10" in model_lower or "i7-10" in model_lower: + return 2020 + elif "ryzen 9 7" in model_lower: + return 2023 + elif "ryzen 9 5" in model_lower: + return 2021 + elif "ryzen 9 3" in model_lower: + return 2019 + + # Very old CPUs + if "pentium" in model_lower: + if "4" in model_lower: + return 2000 + elif "3" in model_lower or "iii" in model_lower: + return 1999 + elif "2" in model_lower or "ii" in model_lower: + return 1997 + return 1993 + + if "486" in model_lower: + return 1989 + if "386" in model_lower: + return 1985 + if "286" in model_lower: + return 1982 + if "8086" in model_lower or "8088" in model_lower: + return 1978 + + # Default to somewhat recent + return 2020 + + +def determine_tier(release_year: int) -> Tuple[str, float]: + """Determine hardware tier and multiplier based on release year""" + age = CURRENT_YEAR - release_year + + if age >= ANCIENT_THRESHOLD: + return "ancient", HARDWARE_TIERS["ancient"] + elif age >= SACRED_THRESHOLD: + return "sacred", HARDWARE_TIERS["sacred"] + elif age >= VINTAGE_THRESHOLD: + return "vintage", HARDWARE_TIERS["vintage"] + elif age >= CLASSIC_THRESHOLD: + return "classic", HARDWARE_TIERS["classic"] + elif age >= RETRO_THRESHOLD: + return "retro", HARDWARE_TIERS["retro"] + elif age >= MODERN_THRESHOLD: + return "modern", HARDWARE_TIERS["modern"] + else: + return "recent", HARDWARE_TIERS["recent"] + + +def detect_hardware() -> HardwareProfile: + """Detect full hardware profile""" + cpu_info = detect_cpu_info() + release_year = estimate_release_year(cpu_info["model"], cpu_info["vendor"]) + tier, multiplier = determine_tier(release_year) + + # Get RAM + try: + if platform.system() == "Linux": + with open("/proc/meminfo", "r") as f: + for line in f: + if "MemTotal" in line: + ram_kb = int(line.split()[1]) + ram_mb = ram_kb // 1024 + break + elif platform.system() == "Darwin": + result = subprocess.run( + ["sysctl", "-n", "hw.memsize"], + capture_output=True, text=True + ) + ram_mb = int(result.stdout.strip()) // (1024 * 1024) + else: + ram_mb = 4096 # Default + except: + ram_mb = 4096 + + # Get cores + cores = os.cpu_count() or 1 + + # Detect available entropy sources + entropy_sources = [] + if os.path.exists("/dev/urandom"): + entropy_sources.append("urandom") + if os.path.exists("/proc/cpuinfo"): + entropy_sources.append("cpuinfo") + if platform.machine() in ["Power Macintosh", "ppc", "ppc64"]: + entropy_sources.append("powerpc_timebase") + if os.path.exists("/sys/class/thermal"): + entropy_sources.append("thermal") + if os.path.exists("/sys/class/dmi"): + entropy_sources.append("dmi") + + return HardwareProfile( + cpu_model=cpu_info["model"], + cpu_vendor=cpu_info["vendor"], + cpu_family=cpu_info["family"], + release_year=release_year, + architecture=cpu_info["architecture"], + ram_mb=ram_mb, + cores=cores, + tier=tier, + multiplier=multiplier, + is_vintage=(CURRENT_YEAR - release_year) >= 10, + entropy_sources=entropy_sources, + ) + + +# ============================================================================= +# Genesis Loading +# ============================================================================= + +def load_genesis() -> Dict: + """Load the authentic G4-born genesis block""" + genesis_path = RUSTCHAIN_DIR / "genesis" / GENESIS_FILE + + if not genesis_path.exists(): + # Try to find genesis in package + pkg_genesis = Path(__file__).parent.parent / "genesis" / GENESIS_FILE + if pkg_genesis.exists(): + genesis_path.parent.mkdir(parents=True, exist_ok=True) + import shutil + shutil.copy(pkg_genesis, genesis_path) + + if not genesis_path.exists(): + raise FileNotFoundError( + f"Genesis block not found at {genesis_path}\n" + "Please run: rustchain-setup --download-genesis" + ) + + with open(genesis_path, "r") as f: + genesis = json.load(f) + + # Verify genesis authenticity + if "deep_entropy_proof" not in genesis: + raise ValueError("Invalid genesis: missing deep entropy proof") + + if not genesis.get("proof_of_antiquity", {}).get("hardware_verified"): + print("WARNING: Genesis was not verified on real vintage hardware") + + return genesis + + +def verify_genesis_signature(genesis: Dict) -> bool: + """Verify the genesis block signature""" + proof = genesis.get("deep_entropy_proof", {}) + signature = proof.get("signature", "") + + # Check for PowerPC G4 signature format + if not signature.startswith("PPC-G4-DEEP-"): + print(f"WARNING: Genesis signature format unexpected: {signature[:20]}...") + return False + + # Verify depth + depth = int(signature.split("-D")[-1]) if "-D" in signature else 0 + if depth < 10: + print(f"WARNING: Genesis entropy depth too low: {depth}") + return False + + print(f"Genesis signature verified: {signature[:40]}...") + return True + + +# ============================================================================= +# Validator Registration +# ============================================================================= + +@dataclass +class ValidatorConfig: + """Validator configuration""" + validator_id: str + wallet_address: str + hardware_profile: Dict + entropy_fingerprint: str + antiquity_score: float + tier: str + bootstrap_nodes: List[str] + api_port: int + p2p_port: int + registered_at: int + + +def generate_wallet_address(entropy_fingerprint: str) -> str: + """Generate a wallet address from entropy fingerprint""" + # Simple address generation (real implementation would use proper crypto) + addr_hash = hashlib.sha256(entropy_fingerprint.encode()).hexdigest() + checksum = hashlib.sha256(bytes.fromhex(addr_hash)).hexdigest()[:8] + return f"RTC{addr_hash[:32]}{checksum}" + + +def calculate_antiquity_score(release_year: int, uptime_days: int = 1) -> float: + """ + Calculate Antiquity Score using the RIP formula: + AS = (current_year - release_year) * log10(uptime_days + 1) + """ + import math + age = CURRENT_YEAR - release_year + return age * math.log10(uptime_days + 1) + + +def register_validator(hardware: HardwareProfile, genesis: Dict) -> ValidatorConfig: + """Register a new validator""" + + print("\nGenerating validator identity...") + + # Collect entropy + hw_collector = HardwareEntropyCollector() + sw_collector = SoftwareEntropyCollector() + + hw_entropy = hw_collector.collect_all() + sw_entropy = sw_collector.collect_all() + + # Create entropy profile + profile = EntropyProfile( + hardware_entropy=hw_entropy, + software_entropy=sw_entropy, + collection_timestamp=int(time.time()), + hardware_tier=hardware.tier, + estimated_release_year=hardware.release_year, + ) + + # Generate validator ID + identity_manager = ValidatorIdentityManager() + validator_id = identity_manager.derive_validator_id(profile) + + # Generate wallet address + wallet_address = generate_wallet_address(validator_id) + + # Calculate antiquity score + antiquity_score = calculate_antiquity_score(hardware.release_year) + + config = ValidatorConfig( + validator_id=validator_id, + wallet_address=wallet_address, + hardware_profile=asdict(hardware), + entropy_fingerprint=identity_manager.fingerprint_hash, + antiquity_score=antiquity_score, + tier=hardware.tier, + bootstrap_nodes=BOOTSTRAP_NODES, + api_port=9332, + p2p_port=9333, + registered_at=int(time.time()), + ) + + # Save config + config_path = RUSTCHAIN_DIR / VALIDATOR_CONFIG + RUSTCHAIN_DIR.mkdir(parents=True, exist_ok=True) + with open(config_path, "w") as f: + json.dump(asdict(config), f, indent=2) + + print(f"Validator registered: {validator_id[:16]}...") + print(f"Wallet address: {wallet_address[:24]}...") + print(f"Antiquity Score: {antiquity_score:.2f}") + print(f"Hardware Tier: {hardware.tier} ({hardware.multiplier}x)") + + return config + + +# ============================================================================= +# Main CLI +# ============================================================================= + +def print_banner(): + """Print RustChain banner""" + banner = """ +╔══════════════════════════════════════════════════════════════════════════════╗ +║ ║ +║ ██████╗ ██╗ ██╗███████╗████████╗ ██████╗██╗ ██╗ █████╗ ██╗███╗ ██╗ ║ +║ ██╔══██╗██║ ██║██╔════╝╚══██╔══╝██╔════╝██║ ██║██╔══██╗██║████╗ ██║ ║ +║ ██████╔╝██║ ██║███████╗ ██║ ██║ ███████║███████║██║██╔██╗ ██║ ║ +║ ██╔══██╗██║ ██║╚════██║ ██║ ██║ ██╔══██║██╔══██║██║██║╚██╗██║ ║ +║ ██║ ██║╚██████╔╝███████║ ██║ ╚██████╗██║ ██║██║ ██║██║██║ ╚████║ ║ +║ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ║ +║ ║ +║ PROOF OF ANTIQUITY VALIDATOR SETUP ║ +║ ║ +║ "Every vintage computer has historical potential" ║ +║ ║ +║ This is NOT Proof of Work. This is PROOF OF ANTIQUITY. ║ +║ Real hardware rewarded. Emulation economically irrational. ║ +║ ║ +╚══════════════════════════════════════════════════════════════════════════════╝ + """ + print(banner) + + +def cmd_hardware_profile(args): + """Show hardware profile""" + print("\nDetecting hardware...") + hardware = detect_hardware() + + print("\n" + "=" * 60) + print("HARDWARE PROFILE") + print("=" * 60) + print(f" CPU Model: {hardware.cpu_model}") + print(f" Vendor: {hardware.cpu_vendor}") + print(f" Architecture: {hardware.architecture}") + print(f" Cores: {hardware.cores}") + print(f" RAM: {hardware.ram_mb} MB") + print(f" Estimated Release Year: {hardware.release_year}") + print(f" Age: {CURRENT_YEAR - hardware.release_year} years") + print(f" Hardware Tier: {hardware.tier.upper()}") + print(f" Reward Multiplier: {hardware.multiplier}x") + print(f" Is Vintage: {'YES' if hardware.is_vintage else 'NO'}") + print(f" Entropy Sources: {', '.join(hardware.entropy_sources)}") + + # Calculate projected antiquity score + score = calculate_antiquity_score(hardware.release_year, uptime_days=30) + print(f"\n Projected Antiquity Score (30 day uptime): {score:.2f}") + + if hardware.is_vintage: + print("\n ✓ This hardware qualifies for vintage rewards!") + else: + print("\n ⚠ Modern hardware receives reduced rewards (0.5x)") + print(" Consider using vintage hardware for better returns.") + + +def cmd_register(args): + """Register as validator""" + print("\nLoading genesis block...") + try: + genesis = load_genesis() + verify_genesis_signature(genesis) + print(f"Genesis loaded: Chain ID {genesis['rustchain_genesis']['chain_id']}") + except FileNotFoundError as e: + print(f"ERROR: {e}") + print("\nTo download genesis, create the genesis directory and copy genesis_deep_entropy.json") + return + + print("\nDetecting hardware...") + hardware = detect_hardware() + + print(f"\nHardware detected: {hardware.cpu_model}") + print(f"Tier: {hardware.tier} ({hardware.multiplier}x multiplier)") + + config = register_validator(hardware, genesis) + + print("\n" + "=" * 60) + print("VALIDATOR REGISTRATION COMPLETE") + print("=" * 60) + print(f"\nValidator ID: {config.validator_id}") + print(f"Wallet: {config.wallet_address}") + print(f"Antiquity Score: {config.antiquity_score:.2f}") + print(f"\nConfig saved to: {RUSTCHAIN_DIR / VALIDATOR_CONFIG}") + print("\nTo start your validator, run:") + print(" python3 setup_validator.py --start") + + +def cmd_start(args): + """Start the validator node""" + config_path = RUSTCHAIN_DIR / VALIDATOR_CONFIG + + if not config_path.exists(): + print("ERROR: Validator not registered. Run with --register first.") + return + + with open(config_path, "r") as f: + config = json.load(f) + + print("\nStarting RustChain Validator Node...") + print(f"Validator ID: {config['validator_id'][:16]}...") + print(f"P2P Port: {config['p2p_port']}") + print(f"API Port: {config['api_port']}") + print(f"Bootstrap nodes: {len(config['bootstrap_nodes'])}") + + # Import and start the node + try: + from main import RustChainNode + node = RustChainNode( + port=config['p2p_port'], + data_dir=str(RUSTCHAIN_DIR), + mining=True + ) + print("\nNode started. Press Ctrl+C to stop.") + node.start() + except ImportError: + print("\nNode module not found. Starting in simulation mode...") + print("Full node functionality coming in next release.") + + # Simulation + while True: + try: + time.sleep(600) # 10 minute blocks + print(f"[Block] Antiquity proof submitted (score: {config['antiquity_score']:.2f})") + except KeyboardInterrupt: + print("\nValidator stopped.") + break + + +def main(): + parser = argparse.ArgumentParser( + description="RustChain Proof of Antiquity Validator Setup" + ) + parser.add_argument( + "--hardware-profile", "-p", + action="store_true", + help="Show detected hardware profile and tier" + ) + parser.add_argument( + "--register", "-r", + action="store_true", + help="Register as a validator" + ) + parser.add_argument( + "--start", "-s", + action="store_true", + help="Start the validator node" + ) + parser.add_argument( + "--version", "-v", + action="store_true", + help="Show version" + ) + + args = parser.parse_args() + + print_banner() + + if args.version: + print("RustChain Validator Setup v0.1.0") + print("Genesis: PPC-G4-DEEP (PowerMac G4 Mirror Door)") + return + + if args.hardware_profile: + cmd_hardware_profile(args) + elif args.register: + cmd_register(args) + elif args.start: + cmd_start(args) + else: + parser.print_help() + print("\nQuick Start:") + print(" 1. Check your hardware tier: --hardware-profile") + print(" 2. Register as validator: --register") + print(" 3. Start mining: --start") + + +if __name__ == "__main__": + main() diff --git a/rustchain-poa/tools/amiga/README.md b/rustchain-poa/tools/amiga/README.md index 5a49fb09..db6b7b2d 100644 --- a/rustchain-poa/tools/amiga/README.md +++ b/rustchain-poa/tools/amiga/README.md @@ -1,13 +1,13 @@ -# RustChain Amiga Tools - -This directory contains Amiga 500-compatible Devpac assembly code for generating hardware fingerprints. - -## Files - -- `amiga_fingerprint.asm`: Assembles with Devpac; prints `ExecBase`, `AttnFlags`, and Kickstart ROM checksum. -- Output can be redirected to file and sent to RustChain's PoA REST API. - -## Usage - -1. Open in **Devpac** or compatible assembler on real Amiga or emulator (e.g., Amiga Forever, WinUAE). -2. Assemble and run: +# RustChain Amiga Tools + +This directory contains Amiga 500-compatible Devpac assembly code for generating hardware fingerprints. + +## Files + +- `amiga_fingerprint.asm`: Assembles with Devpac; prints `ExecBase`, `AttnFlags`, and Kickstart ROM checksum. +- Output can be redirected to file and sent to RustChain's PoA REST API. + +## Usage + +1. Open in **Devpac** or compatible assembler on real Amiga or emulator (e.g., Amiga Forever, WinUAE). +2. Assemble and run: diff --git a/scripts/moltbook_solver.py b/scripts/moltbook_solver.py index 0493ae99..44430ed6 100644 --- a/scripts/moltbook_solver.py +++ b/scripts/moltbook_solver.py @@ -1,718 +1,718 @@ -#!/usr/bin/env python3 -""" -Moltbook Challenge Solver & Agent Rotation System -================================================== - -Shared module for all Moltbook bots. Two-tier solving: - 1. Regex solver (fast, no API call, ~70% accuracy) - 2. LLM solver via Gemini 2.5 Flash (slower, ~95% accuracy) - -Anti-suspension features: - - Agent rotation with suspension tracking - - Content uniqueness enforcement (prevents duplicate_comment bans) - - Rate limit awareness (IP-based 30min cooldown) - -Usage: - from moltbook_solver import solve_challenge, post_with_rotation, get_available_agent - -(C) Elyan Labs 2026 -""" - -import hashlib -import json -import logging -import os -import re -import sqlite3 -import time -from datetime import datetime, timezone -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -import requests - -log = logging.getLogger("moltbook_solver") - -# ─── Agent Registry ────────────────────────────────────────────────────────── - -AGENTS = { - "sophia": {"key": "moltbook_sk_nuTK8FxFHuUtknLGrXUJKxcgBsTJ0zP7", "persona": "warm_tech"}, - "boris": {"key": "moltbook_sk_mACTltXU55x6s1mYqDuWkeEcuDQ9feMB", "persona": "soviet_enthusiast"}, - "janitor": {"key": "moltbook_sk_yWpLPPIp1MxWAlbgiCEdamHodyClGg08", "persona": "sysadmin"}, - "bottube": {"key": "moltbook_sk_CJgvb5ecA9ZnutcmmaFy2Scm_X4SQgcz", "persona": "platform_bot"}, - "msgoogletoggle": {"key": "moltbook_sk_-zuaZPUGMVoC_tdQJA-YaLVlj-VnUMdw", "persona": "gracious_socialite"}, - "oneo": {"key": "moltbook_sk_BeO3rZoBKuleNwSX3sZeBNQRYhOBK436", "persona": "minimalist"}, -} - -# Gemini for LLM solving -GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY", "") -GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" - -# State DB for tracking suspensions and rate limits -STATE_DB = Path(os.environ.get("MOLTBOOK_STATE_DB", - os.path.expanduser("~/.local/share/moltbook_solver.db"))) - - -# ─── State Database ────────────────────────────────────────────────────────── - -def _ensure_db() -> sqlite3.Connection: - """Create or open the solver state database.""" - STATE_DB.parent.mkdir(parents=True, exist_ok=True) - db = sqlite3.connect(str(STATE_DB)) - db.execute("""CREATE TABLE IF NOT EXISTS agent_suspensions ( - agent TEXT PRIMARY KEY, - suspended_until TEXT, - reason TEXT, - offense_num INTEGER DEFAULT 0, - updated_at TEXT - )""") - db.execute("""CREATE TABLE IF NOT EXISTS post_hashes ( - hash TEXT PRIMARY KEY, - agent TEXT, - submolt TEXT, - created_at TEXT - )""") - db.execute("""CREATE TABLE IF NOT EXISTS rate_limits ( - ip_key TEXT PRIMARY KEY, - last_post_at REAL, - agent TEXT - )""") - db.execute("""CREATE TABLE IF NOT EXISTS solver_stats ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - challenge TEXT, - degarbled TEXT, - regex_answer TEXT, - llm_answer TEXT, - final_answer TEXT, - correct INTEGER DEFAULT -1, - created_at TEXT - )""") - db.commit() - return db - - -def record_suspension(agent: str, suspended_until: str, reason: str, offense: int = 0): - """Record that an agent got suspended.""" - db = _ensure_db() - db.execute( - """INSERT OR REPLACE INTO agent_suspensions - (agent, suspended_until, reason, offense_num, updated_at) - VALUES (?, ?, ?, ?, ?)""", - (agent, suspended_until, reason, offense, - datetime.now(timezone.utc).isoformat()) - ) - db.commit() - db.close() - log.warning("Recorded suspension: %s until %s (offense #%d: %s)", - agent, suspended_until, offense, reason) - - -def get_available_agents() -> List[str]: - """Return agents that are NOT currently suspended, ordered by preference.""" - db = _ensure_db() - now = datetime.now(timezone.utc).isoformat() - - suspended = set() - for row in db.execute( - "SELECT agent, suspended_until FROM agent_suspensions" - ).fetchall(): - if row[1] and row[1] > now: - suspended.add(row[0]) - - db.close() - - # Preference order: msgoogletoggle first (it's our best solver host), - # then sophia, boris, janitor, bottube, oneo - preferred = ["msgoogletoggle", "sophia", "boris", "janitor", "bottube", "oneo"] - return [a for a in preferred if a in AGENTS and a not in suspended] - - -def get_agent_key(agent: str) -> Optional[str]: - """Get API key for an agent.""" - return AGENTS.get(agent, {}).get("key") - - -# ─── Content Uniqueness ───────────────────────────────────────────────────── - -def _content_hash(title: str, content: str) -> str: - """Generate a fuzzy hash of content to prevent duplicate detection. - - Uses first 200 chars of content + title, lowercased, stripped of punctuation. - This catches Moltbook's duplicate_comment detector which likely uses - similar fuzzy matching. - """ - normalized = re.sub(r"[^a-z0-9\s]", "", (title + " " + content[:200]).lower()) - normalized = re.sub(r"\s+", " ", normalized).strip() - return hashlib.sha256(normalized.encode()).hexdigest()[:16] - - -def is_content_unique(title: str, content: str, lookback_days: int = 7) -> bool: - """Check if this content is sufficiently unique vs recent posts.""" - h = _content_hash(title, content) - db = _ensure_db() - - cutoff = datetime.now(timezone.utc).isoformat()[:10] # rough 24h check - existing = db.execute( - "SELECT hash FROM post_hashes WHERE hash = ?", (h,) - ).fetchone() - db.close() - return existing is None - - -def record_post(title: str, content: str, agent: str, submolt: str): - """Record a post hash to prevent future duplicates.""" - h = _content_hash(title, content) - db = _ensure_db() - db.execute( - "INSERT OR IGNORE INTO post_hashes (hash, agent, submolt, created_at) VALUES (?, ?, ?, ?)", - (h, agent, submolt, datetime.now(timezone.utc).isoformat()) - ) - db.commit() - db.close() - - -# ─── Challenge Degarbling ──────────────────────────────────────────────────── - -def degarble(challenge: str) -> str: - """Clean Moltbook's garbled verification text. - - Input: "A] lOoObS-tErS^ ClAwS ExErT/ TwEnTy FiVe ] NoOtOnS" - Output: "lobsters claws exert twenty five newtons" - """ - # Strip all non-alphanumeric except spaces - clean = re.sub(r"[^a-zA-Z0-9\s]", " ", challenge) - # Lowercase and collapse whitespace - clean = re.sub(r"\s+", " ", clean.lower()).strip() - # Only collapse 3+ repeated characters: "looob" → "lob" but keep "ee" in "three" - deduped = re.sub(r"(.)\1{2,}", r"\1\1", clean) - - # Word corrections for common garble artifacts - FIXES = { - "lobster": "lobster", "lobstr": "lobster", "loobster": "lobster", - "lobsters": "lobsters", "lobs ters": "lobsters", - "notons": "newtons", "nutons": "newtons", "neutons": "newtons", - "nootons": "newtons", "nootons": "newtons", - "thre": "three", "thee": "three", "threee": "three", - "fiften": "fifteen", "fiftteen": "fifteen", - "twentyfive": "twenty five", "thirtyfive": "thirty five", - "stro ng": "strong", "strrong": "strong", - "swi ms": "swims", - "um": "", "umm": "", "ummm": "", - } - - words = deduped.split() - fixed = [] - for w in words: - fixed.append(FIXES.get(w, w)) - return " ".join(w for w in fixed if w).strip() - - -# ─── Number Extraction ─────────────────────────────────────────────────────── - -NUMBER_WORDS = [ - # Compound numbers first (longest match) - ("ninetynine", 99), ("ninetyeight", 98), ("ninetyseven", 97), - ("ninetysix", 96), ("ninetyfive", 95), ("ninetyfour", 94), - ("ninetythree", 93), ("ninetytwo", 92), ("ninetyone", 91), - ("eightynine", 89), ("eightyeight", 88), ("eightyseven", 87), - ("eightysix", 86), ("eightyfive", 85), ("eightyfour", 84), - ("eightythree", 83), ("eightytwo", 82), ("eightyone", 81), - ("seventynine", 79), ("seventyeight", 78), ("seventyseven", 77), - ("seventysix", 76), ("seventyfive", 75), ("seventyfour", 74), - ("seventythree", 73), ("seventytwo", 72), ("seventyone", 71), - ("sixtynine", 69), ("sixtyeight", 68), ("sixtyseven", 67), - ("sixtysix", 66), ("sixtyfive", 65), ("sixtyfour", 64), - ("sixtythree", 63), ("sixtytwo", 62), ("sixtyone", 61), - ("fiftynine", 59), ("fiftyeight", 58), ("fiftyseven", 57), - ("fiftysix", 56), ("fiftyfive", 55), ("fiftyfour", 54), - ("fiftythree", 53), ("fiftytwo", 52), ("fiftyone", 51), - ("fortynine", 49), ("fortyeight", 48), ("fortyseven", 47), - ("fortysix", 46), ("fortyfive", 45), ("fortyfour", 44), - ("fortythree", 43), ("fortytwo", 42), ("fortyone", 41), - ("thirtynine", 39), ("thirtyeight", 38), ("thirtyseven", 37), - ("thirtysix", 36), ("thirtyfive", 35), ("thirtyfour", 34), - ("thirtythree", 33), ("thirtytwo", 32), ("thirtyone", 31), - ("twentynine", 29), ("twentyeight", 28), ("twentyseven", 27), - ("twentysix", 26), ("twentyfive", 25), ("twentyfour", 24), - ("twentythree", 23), ("twentytwo", 22), ("twentyone", 21), - ("hundred", 100), ("thousand", 1000), - ("ninety", 90), ("eighty", 80), ("seventy", 70), ("sixty", 60), - ("fifty", 50), ("forty", 40), ("thirty", 30), ("twenty", 20), - ("nineteen", 19), ("eighteen", 18), ("seventeen", 17), - ("sixteen", 16), ("fifteen", 15), ("fourteen", 14), - ("thirteen", 13), ("twelve", 12), ("eleven", 11), ("ten", 10), - ("nine", 9), ("eight", 8), ("seven", 7), ("six", 6), - ("five", 5), ("four", 4), ("three", 3), ("two", 2), ("one", 1), - ("zero", 0), -] - - -def extract_numbers(text: str) -> List[float]: - """Extract all numbers from text (word and digit forms).""" - numbers = [] - # Strip to letters only for word matching - blob = re.sub(r"[^a-z]", "", text.lower()) - - search_blob = blob - for word, num in NUMBER_WORDS: - # Allow repeated chars in garbled text - pat = "".join(f"{c}+" for c in word) - if re.search(pat, search_blob): - search_blob = re.sub(pat, "X", search_blob, count=1) - numbers.append(float(num)) - - # Also grab bare digits - for d in re.findall(r"\b(\d+(?:\.\d+)?)\b", text): - n = float(d) - if n not in numbers: - numbers.append(n) - - return numbers - - -# ─── Regex Solver ──────────────────────────────────────────────────────────── - -def solve_regex(challenge: str) -> Tuple[Optional[str], float]: - """Try to solve with regex pattern matching. - - Returns (answer_str, confidence) where confidence is 0.0-1.0. - Confidence < 0.6 means "don't trust this, use LLM." - """ - clean = degarble(challenge) - numbers = extract_numbers(clean) - - if not numbers: - return None, 0.0 - - if len(numbers) < 2: - return f"{numbers[0]:.2f}", 0.3 # Single number, low confidence - - a, b = numbers[0], numbers[1] - - # Check for explicit arithmetic operators in raw text - if re.search(r'\d\s*\+\s*\d', challenge): - return f"{a + b:.2f}", 0.95 - if re.search(r'\d\s*[*×]\s*\d', challenge) or re.search(r'[*×]', challenge): - return f"{a * b:.2f}", 0.95 - if re.search(r'\d\s*/\s*\d', challenge): - return f"{a / b:.2f}" if b != 0 else None, 0.95 - if re.search(r'\d\s+-\s+\d', challenge): - return f"{a - b:.2f}", 0.95 - - # Word multipliers (doubles, triples, halves) - word_muls = { - "double": 2, "doubles": 2, "doubled": 2, - "triple": 3, "triples": 3, "tripled": 3, - "quadruple": 4, "quadruples": 4, - "halve": 0.5, "halves": 0.5, "halved": 0.5, "half": 0.5, - } - for word, factor in word_muls.items(): - if word in clean: - return f"{a * factor:.2f}", 0.85 - - # Detect "each ... N" pattern → multiplication - if "each" in clean and len(numbers) >= 2: - return f"{a * b:.2f}", 0.85 - - # Detect rate × time: "N per second for M seconds" - rate_time = re.search(r"(\d+|" + "|".join(w for w, _ in NUMBER_WORDS[:60]) + - r")\s+(?:centimeters?|meters?|cm|m)\s+per\s+(?:second|sec|minute|min)", - clean) - duration = re.search(r"for\s+(\d+|" + "|".join(w for w, _ in NUMBER_WORDS[:60]) + - r")\s+(?:seconds?|minutes?|secs?|mins?)", clean) - if rate_time and duration and len(numbers) >= 2: - return f"{a * b:.2f}", 0.9 - - # Detect "X times strong/stronger/as strong" → pure multiplication (not a + a*b) - if re.search(r"times?\s+(?:strong|faster|more|as|the)", clean): - return f"{a * b:.2f}", 0.8 - - # Keyword-based operation detection with confidence levels - explicit_verbs = { - "add": ("+", 0.85), "adds": ("+", 0.85), "plus": ("+", 0.9), - "gains": ("+", 0.8), "earns": ("+", 0.8), "more": ("+", 0.7), - "subtract": ("-", 0.85), "minus": ("-", 0.9), "loses": ("-", 0.8), - "times": ("*", 0.6), # Low confidence — "X times stronger" ≠ "X times Y" - "multiply": ("*", 0.85), "multiplied": ("*", 0.85), - "divide": ("/", 0.85), "divided": ("/", 0.85), "split": ("/", 0.7), - } - - for verb, (op, conf) in explicit_verbs.items(): - if verb in clean: - if op == "+": result = a + b - elif op == "-": result = a - b - elif op == "*": - result = a * b - elif op == "/": - result = a / b if b != 0 else 0 - else: - continue - return f"{result:.2f}", conf - - # Context nouns — even lower confidence - if any(w in clean for w in ["total", "combined", "sum", "altogether"]): - return f"{a + b:.2f}", 0.5 - - # Default: just add them, very low confidence — force LLM - return f"{a + b:.2f}", 0.3 - - -# ─── LLM Solver (Gemini 2.5 Flash) ────────────────────────────────────────── - -def solve_llm(challenge: str, degarbled: str = None) -> Optional[str]: - """Use Gemini 2.5 Flash to solve the challenge. - - Sends both the raw garbled text AND the degarbled version for context. - Returns answer as "X.XX" string or None on failure. - """ - if not GEMINI_API_KEY: - return None - - if degarbled is None: - degarbled = degarble(challenge) - - prompt = f"""You are solving a math word problem from a website verification system. -The text is deliberately garbled with random capitalization, symbols, and repeated letters. - -RAW CHALLENGE (garbled): -{challenge} - -CLEANED VERSION (my best degarble): -{degarbled} - -INSTRUCTIONS: -1. Figure out what math problem is being described -2. These are always simple arithmetic: addition, subtraction, multiplication, or division -3. They often involve lobsters, claws, newtons, force, speed, distance -4. "its pair is X times strong" means the pair's force = X × the original value -5. "total force" means the final answer after applying the described operations -6. Respond with ONLY the numeric answer to exactly 2 decimal places -7. Example: 75.00 - -YOUR ANSWER (number only):""" - - try: - resp = requests.post( - GEMINI_URL, - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {GEMINI_API_KEY}", - }, - json={ - "model": "gemini-2.5-flash", - "messages": [{"role": "user", "content": prompt}], - "temperature": 0.0, - "max_tokens": 20, - }, - timeout=10, - ) - if resp.status_code != 200: - log.warning("Gemini API error %d: %s", resp.status_code, resp.text[:200]) - return None - - data = resp.json() - answer_text = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip() - - # Extract just the number - match = re.search(r"(\d+(?:\.\d+)?)", answer_text) - if match: - num = float(match.group(1)) - return f"{num:.2f}" - return None - - except Exception as e: - log.warning("Gemini solver error: %s", e) - return None - - -# ─── Combined Solver ───────────────────────────────────────────────────────── - -def solve_challenge(challenge: str, confidence_threshold: float = 0.7) -> Optional[str]: - """Two-tier solver: regex first, LLM fallback if confidence is low. - - Args: - challenge: Raw garbled challenge text - confidence_threshold: Below this, escalate to LLM (default 0.7) - - Returns: - Answer as "X.XX" string, or None if unsolvable - """ - degarbled = degarble(challenge) - log.info("Challenge degarbled: %s", degarbled) - - # Tier 1: Regex solver - regex_answer, confidence = solve_regex(challenge) - log.info("Regex answer: %s (confidence: %.2f)", regex_answer, confidence) - - if regex_answer and confidence >= confidence_threshold: - _record_solve(challenge, degarbled, regex_answer, None, regex_answer) - return regex_answer - - # Tier 2: LLM solver - llm_answer = solve_llm(challenge, degarbled) - log.info("LLM answer: %s", llm_answer) - - if llm_answer: - _record_solve(challenge, degarbled, regex_answer, llm_answer, llm_answer) - return llm_answer - - # Fallback to regex even if low confidence - if regex_answer: - log.warning("Using low-confidence regex answer as last resort: %s", regex_answer) - _record_solve(challenge, degarbled, regex_answer, None, regex_answer) - return regex_answer - - return None - - -def _record_solve(challenge, degarbled, regex_ans, llm_ans, final_ans): - """Log solve attempt for future analysis.""" - try: - db = _ensure_db() - db.execute( - """INSERT INTO solver_stats - (challenge, degarbled, regex_answer, llm_answer, final_answer, created_at) - VALUES (?, ?, ?, ?, ?, ?)""", - (challenge, degarbled, regex_ans, llm_ans, final_ans, - datetime.now(timezone.utc).isoformat()) - ) - db.commit() - db.close() - except Exception: - pass # Non-critical - - -# ─── Auto-Verify ───────────────────────────────────────────────────────────── - -def auto_verify(verification: dict, agent_key: str) -> bool: - """Solve and submit verification challenge. One-shot only. - - Returns True if verified successfully. - """ - challenge = verification.get("challenge_text", "") - code = verification.get("verification_code", "") - - if not challenge or not code: - log.warning("No challenge or verification code") - return False - - answer = solve_challenge(challenge) - if not answer: - log.warning("Could not solve challenge — skipping to protect account") - return False - - log.info("Submitting verification answer: %s", answer) - try: - resp = requests.post( - "https://www.moltbook.com/api/v1/verify", - headers={ - "Authorization": f"Bearer {agent_key}", - "Content-Type": "application/json", - }, - json={"verification_code": code, "answer": answer}, - timeout=15, - ) - data = resp.json() - if resp.status_code == 200 and data.get("success"): - log.info("Verification SUCCESS!") - return True - else: - log.warning("Verification FAILED: %s", data.get("message", resp.text[:100])) - return False - except Exception as e: - log.warning("Verification request error: %s", e) - return False - - -# ─── Post with Agent Rotation ──────────────────────────────────────────────── - -def post_with_rotation( - title: str, - content: str, - submolt: str, - preferred_agent: str = None, -) -> Tuple[bool, str, Optional[dict]]: - """Post to Moltbook using the first available unsuspended agent. - - Auto-verifies the challenge if present. - Records suspensions when encountered. - Checks content uniqueness. - - Returns: - (success: bool, agent_used: str, post_data: dict or None) - """ - # Check content uniqueness - if not is_content_unique(title, content): - log.warning("Content too similar to recent post — rewrite needed") - return False, "", None - - # Get available agents - available = get_available_agents() - if not available: - log.error("ALL agents suspended!") - return False, "", None - - # Prefer specific agent if available - if preferred_agent and preferred_agent in available: - available.remove(preferred_agent) - available.insert(0, preferred_agent) - - for agent in available: - key = get_agent_key(agent) - if not key: - continue - - log.info("Trying agent: %s", agent) - - try: - resp = requests.post( - "https://www.moltbook.com/api/v1/posts", - headers={ - "Authorization": f"Bearer {key}", - "Content-Type": "application/json", - }, - json={ - "title": title, - "content": content, - "submolt_name": submolt, - }, - timeout=20, - ) - data = resp.json() - - # Handle suspension - if resp.status_code == 403 and "suspended" in data.get("message", ""): - msg = data["message"] - # Parse: "Agent is suspended until 2026-03-07T02:03:10.316Z. Reason: ..." - until_match = re.search(r"until (\S+)\.", msg) - reason_match = re.search(r"Reason:\s*(.*?)(?:\s*\(|$)", msg) - offense_match = re.search(r"offense #(\d+)", msg) - - record_suspension( - agent, - until_match.group(1) if until_match else "", - reason_match.group(1).strip() if reason_match else msg, - int(offense_match.group(1)) if offense_match else 0, - ) - log.warning("Agent %s is suspended, trying next...", agent) - continue - - # Handle rate limit - if resp.status_code == 429: - log.warning("Rate limited on agent %s, trying next...", agent) - continue - - # Handle unclaimed agent - if resp.status_code == 403 and "claimed" in data.get("message", ""): - log.warning("Agent %s is not claimed, skipping", agent) - continue - - # Success — try to verify - if data.get("success") or resp.status_code == 200 or resp.status_code == 201: - post = data.get("post", data) - verification = post.get("verification", {}) - - if verification: - verified = auto_verify(verification, key) - if not verified: - log.warning("Post created but verification failed for %s", agent) - else: - verified = True - - record_post(title, content, agent, submolt) - return True, agent, post - - # Unknown error - log.warning("Agent %s post failed: %s", agent, data.get("message", resp.text[:200])) - - except Exception as e: - log.warning("Agent %s request error: %s", agent, e) - continue - - return False, "", None - - -# ─── CLI / Self-test ───────────────────────────────────────────────────────── - -def self_test(): - """Run solver against known challenge patterns.""" - print("=" * 60) - print("Moltbook Solver Self-Test") - print("=" * 60) - - test_challenges = [ - # (raw_garbled, expected_answer) - ( - "A] lOoObS-tErS^ ClAwS ExErT/ TwEnTy FiVe ] NoOtOnS, Umm~ AnD/ iTs PaIr Is ThReE TiMeS FoRcE?", - "75.00", # 25 × 3 = 75 (pair is 3× the claw force) - ), - ( - "LoOoBbSsStEr SwI^mS aT/ TwEnTy ThReE CeNtImEtErS pEr SeCoNd AnD gAiNs TwElVe MoRe", - "35.00", # 23 + 12 = 35 - ), - ( - "A lObStEr hAs FoRtY tWo ShElL sEgMeNtS aNd LoSeS sEvEn DuRiNg MoLtInG", - "35.00", # 42 - 7 = 35 - ), - ( - "eAcH lObStEr ClAw ExErTs FiFtEeN nEwToNs AnD iT HaS tWo ClAwS wHaT iS tOtAl FoRcE", - "30.00", # 15 × 2 = 30 (each × count) - ), - ( - "A LoBsTeR TrAvElS aT 15 CeNtImEtErS PeR SeCoNd FoR 8 SeCOnDs", - "120.00", # 15 × 8 = 120 (rate × time) - ), - ] - - passed = 0 - for raw, expected in test_challenges: - degarbled = degarble(raw) - regex_ans, conf = solve_regex(raw) - llm_ans = solve_llm(raw, degarbled) - final = solve_challenge(raw) - - status = "PASS" if final == expected else "FAIL" - if final == expected: - passed += 1 - - print(f"\n--- {status} ---") - print(f" Raw: {raw[:80]}...") - print(f" Cleaned: {degarbled}") - print(f" Regex: {regex_ans} (conf={conf:.2f})") - print(f" LLM: {llm_ans}") - print(f" Final: {final}") - print(f" Expected: {expected}") - - print(f"\n{'=' * 60}") - print(f"Results: {passed}/{len(test_challenges)} passed") - - # Show available agents - print(f"\n--- Agent Status ---") - available = get_available_agents() - for agent in AGENTS: - status = "AVAILABLE" if agent in available else "SUSPENDED" - print(f" {agent:20s} {status}") - - print() - - -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") - - import sys - if "--test" in sys.argv: - self_test() - elif "--agents" in sys.argv: - available = get_available_agents() - print(f"Available agents: {available}") - print(f"All suspended: {not available}") - elif "--post" in sys.argv: - # Quick post: --post "title" "content" "submolt" - args = [a for a in sys.argv if a != "--post"] - if len(args) >= 4: - ok, agent, post = post_with_rotation(args[1], args[2], args[3]) - print(f"Posted: {ok} via {agent}") - else: - print("Usage: --post 'title' 'content' 'submolt'") - else: - self_test() +#!/usr/bin/env python3 +""" +Moltbook Challenge Solver & Agent Rotation System +================================================== + +Shared module for all Moltbook bots. Two-tier solving: + 1. Regex solver (fast, no API call, ~70% accuracy) + 2. LLM solver via Gemini 2.5 Flash (slower, ~95% accuracy) + +Anti-suspension features: + - Agent rotation with suspension tracking + - Content uniqueness enforcement (prevents duplicate_comment bans) + - Rate limit awareness (IP-based 30min cooldown) + +Usage: + from moltbook_solver import solve_challenge, post_with_rotation, get_available_agent + +(C) Elyan Labs 2026 +""" + +import hashlib +import json +import logging +import os +import re +import sqlite3 +import time +from datetime import datetime, timezone +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import requests + +log = logging.getLogger("moltbook_solver") + +# ─── Agent Registry ────────────────────────────────────────────────────────── + +AGENTS = { + "sophia": {"key": "moltbook_sk_nuTK8FxFHuUtknLGrXUJKxcgBsTJ0zP7", "persona": "warm_tech"}, + "boris": {"key": "moltbook_sk_mACTltXU55x6s1mYqDuWkeEcuDQ9feMB", "persona": "soviet_enthusiast"}, + "janitor": {"key": "moltbook_sk_yWpLPPIp1MxWAlbgiCEdamHodyClGg08", "persona": "sysadmin"}, + "bottube": {"key": "moltbook_sk_CJgvb5ecA9ZnutcmmaFy2Scm_X4SQgcz", "persona": "platform_bot"}, + "msgoogletoggle": {"key": "moltbook_sk_-zuaZPUGMVoC_tdQJA-YaLVlj-VnUMdw", "persona": "gracious_socialite"}, + "oneo": {"key": "moltbook_sk_BeO3rZoBKuleNwSX3sZeBNQRYhOBK436", "persona": "minimalist"}, +} + +# Gemini for LLM solving +GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY", "") +GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta/openai/chat/completions" + +# State DB for tracking suspensions and rate limits +STATE_DB = Path(os.environ.get("MOLTBOOK_STATE_DB", + os.path.expanduser("~/.local/share/moltbook_solver.db"))) + + +# ─── State Database ────────────────────────────────────────────────────────── + +def _ensure_db() -> sqlite3.Connection: + """Create or open the solver state database.""" + STATE_DB.parent.mkdir(parents=True, exist_ok=True) + db = sqlite3.connect(str(STATE_DB)) + db.execute("""CREATE TABLE IF NOT EXISTS agent_suspensions ( + agent TEXT PRIMARY KEY, + suspended_until TEXT, + reason TEXT, + offense_num INTEGER DEFAULT 0, + updated_at TEXT + )""") + db.execute("""CREATE TABLE IF NOT EXISTS post_hashes ( + hash TEXT PRIMARY KEY, + agent TEXT, + submolt TEXT, + created_at TEXT + )""") + db.execute("""CREATE TABLE IF NOT EXISTS rate_limits ( + ip_key TEXT PRIMARY KEY, + last_post_at REAL, + agent TEXT + )""") + db.execute("""CREATE TABLE IF NOT EXISTS solver_stats ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + challenge TEXT, + degarbled TEXT, + regex_answer TEXT, + llm_answer TEXT, + final_answer TEXT, + correct INTEGER DEFAULT -1, + created_at TEXT + )""") + db.commit() + return db + + +def record_suspension(agent: str, suspended_until: str, reason: str, offense: int = 0): + """Record that an agent got suspended.""" + db = _ensure_db() + db.execute( + """INSERT OR REPLACE INTO agent_suspensions + (agent, suspended_until, reason, offense_num, updated_at) + VALUES (?, ?, ?, ?, ?)""", + (agent, suspended_until, reason, offense, + datetime.now(timezone.utc).isoformat()) + ) + db.commit() + db.close() + log.warning("Recorded suspension: %s until %s (offense #%d: %s)", + agent, suspended_until, offense, reason) + + +def get_available_agents() -> List[str]: + """Return agents that are NOT currently suspended, ordered by preference.""" + db = _ensure_db() + now = datetime.now(timezone.utc).isoformat() + + suspended = set() + for row in db.execute( + "SELECT agent, suspended_until FROM agent_suspensions" + ).fetchall(): + if row[1] and row[1] > now: + suspended.add(row[0]) + + db.close() + + # Preference order: msgoogletoggle first (it's our best solver host), + # then sophia, boris, janitor, bottube, oneo + preferred = ["msgoogletoggle", "sophia", "boris", "janitor", "bottube", "oneo"] + return [a for a in preferred if a in AGENTS and a not in suspended] + + +def get_agent_key(agent: str) -> Optional[str]: + """Get API key for an agent.""" + return AGENTS.get(agent, {}).get("key") + + +# ─── Content Uniqueness ───────────────────────────────────────────────────── + +def _content_hash(title: str, content: str) -> str: + """Generate a fuzzy hash of content to prevent duplicate detection. + + Uses first 200 chars of content + title, lowercased, stripped of punctuation. + This catches Moltbook's duplicate_comment detector which likely uses + similar fuzzy matching. + """ + normalized = re.sub(r"[^a-z0-9\s]", "", (title + " " + content[:200]).lower()) + normalized = re.sub(r"\s+", " ", normalized).strip() + return hashlib.sha256(normalized.encode()).hexdigest()[:16] + + +def is_content_unique(title: str, content: str, lookback_days: int = 7) -> bool: + """Check if this content is sufficiently unique vs recent posts.""" + h = _content_hash(title, content) + db = _ensure_db() + + cutoff = datetime.now(timezone.utc).isoformat()[:10] # rough 24h check + existing = db.execute( + "SELECT hash FROM post_hashes WHERE hash = ?", (h,) + ).fetchone() + db.close() + return existing is None + + +def record_post(title: str, content: str, agent: str, submolt: str): + """Record a post hash to prevent future duplicates.""" + h = _content_hash(title, content) + db = _ensure_db() + db.execute( + "INSERT OR IGNORE INTO post_hashes (hash, agent, submolt, created_at) VALUES (?, ?, ?, ?)", + (h, agent, submolt, datetime.now(timezone.utc).isoformat()) + ) + db.commit() + db.close() + + +# ─── Challenge Degarbling ──────────────────────────────────────────────────── + +def degarble(challenge: str) -> str: + """Clean Moltbook's garbled verification text. + + Input: "A] lOoObS-tErS^ ClAwS ExErT/ TwEnTy FiVe ] NoOtOnS" + Output: "lobsters claws exert twenty five newtons" + """ + # Strip all non-alphanumeric except spaces + clean = re.sub(r"[^a-zA-Z0-9\s]", " ", challenge) + # Lowercase and collapse whitespace + clean = re.sub(r"\s+", " ", clean.lower()).strip() + # Only collapse 3+ repeated characters: "looob" → "lob" but keep "ee" in "three" + deduped = re.sub(r"(.)\1{2,}", r"\1\1", clean) + + # Word corrections for common garble artifacts + FIXES = { + "lobster": "lobster", "lobstr": "lobster", "loobster": "lobster", + "lobsters": "lobsters", "lobs ters": "lobsters", + "notons": "newtons", "nutons": "newtons", "neutons": "newtons", + "nootons": "newtons", "nootons": "newtons", + "thre": "three", "thee": "three", "threee": "three", + "fiften": "fifteen", "fiftteen": "fifteen", + "twentyfive": "twenty five", "thirtyfive": "thirty five", + "stro ng": "strong", "strrong": "strong", + "swi ms": "swims", + "um": "", "umm": "", "ummm": "", + } + + words = deduped.split() + fixed = [] + for w in words: + fixed.append(FIXES.get(w, w)) + return " ".join(w for w in fixed if w).strip() + + +# ─── Number Extraction ─────────────────────────────────────────────────────── + +NUMBER_WORDS = [ + # Compound numbers first (longest match) + ("ninetynine", 99), ("ninetyeight", 98), ("ninetyseven", 97), + ("ninetysix", 96), ("ninetyfive", 95), ("ninetyfour", 94), + ("ninetythree", 93), ("ninetytwo", 92), ("ninetyone", 91), + ("eightynine", 89), ("eightyeight", 88), ("eightyseven", 87), + ("eightysix", 86), ("eightyfive", 85), ("eightyfour", 84), + ("eightythree", 83), ("eightytwo", 82), ("eightyone", 81), + ("seventynine", 79), ("seventyeight", 78), ("seventyseven", 77), + ("seventysix", 76), ("seventyfive", 75), ("seventyfour", 74), + ("seventythree", 73), ("seventytwo", 72), ("seventyone", 71), + ("sixtynine", 69), ("sixtyeight", 68), ("sixtyseven", 67), + ("sixtysix", 66), ("sixtyfive", 65), ("sixtyfour", 64), + ("sixtythree", 63), ("sixtytwo", 62), ("sixtyone", 61), + ("fiftynine", 59), ("fiftyeight", 58), ("fiftyseven", 57), + ("fiftysix", 56), ("fiftyfive", 55), ("fiftyfour", 54), + ("fiftythree", 53), ("fiftytwo", 52), ("fiftyone", 51), + ("fortynine", 49), ("fortyeight", 48), ("fortyseven", 47), + ("fortysix", 46), ("fortyfive", 45), ("fortyfour", 44), + ("fortythree", 43), ("fortytwo", 42), ("fortyone", 41), + ("thirtynine", 39), ("thirtyeight", 38), ("thirtyseven", 37), + ("thirtysix", 36), ("thirtyfive", 35), ("thirtyfour", 34), + ("thirtythree", 33), ("thirtytwo", 32), ("thirtyone", 31), + ("twentynine", 29), ("twentyeight", 28), ("twentyseven", 27), + ("twentysix", 26), ("twentyfive", 25), ("twentyfour", 24), + ("twentythree", 23), ("twentytwo", 22), ("twentyone", 21), + ("hundred", 100), ("thousand", 1000), + ("ninety", 90), ("eighty", 80), ("seventy", 70), ("sixty", 60), + ("fifty", 50), ("forty", 40), ("thirty", 30), ("twenty", 20), + ("nineteen", 19), ("eighteen", 18), ("seventeen", 17), + ("sixteen", 16), ("fifteen", 15), ("fourteen", 14), + ("thirteen", 13), ("twelve", 12), ("eleven", 11), ("ten", 10), + ("nine", 9), ("eight", 8), ("seven", 7), ("six", 6), + ("five", 5), ("four", 4), ("three", 3), ("two", 2), ("one", 1), + ("zero", 0), +] + + +def extract_numbers(text: str) -> List[float]: + """Extract all numbers from text (word and digit forms).""" + numbers = [] + # Strip to letters only for word matching + blob = re.sub(r"[^a-z]", "", text.lower()) + + search_blob = blob + for word, num in NUMBER_WORDS: + # Allow repeated chars in garbled text + pat = "".join(f"{c}+" for c in word) + if re.search(pat, search_blob): + search_blob = re.sub(pat, "X", search_blob, count=1) + numbers.append(float(num)) + + # Also grab bare digits + for d in re.findall(r"\b(\d+(?:\.\d+)?)\b", text): + n = float(d) + if n not in numbers: + numbers.append(n) + + return numbers + + +# ─── Regex Solver ──────────────────────────────────────────────────────────── + +def solve_regex(challenge: str) -> Tuple[Optional[str], float]: + """Try to solve with regex pattern matching. + + Returns (answer_str, confidence) where confidence is 0.0-1.0. + Confidence < 0.6 means "don't trust this, use LLM." + """ + clean = degarble(challenge) + numbers = extract_numbers(clean) + + if not numbers: + return None, 0.0 + + if len(numbers) < 2: + return f"{numbers[0]:.2f}", 0.3 # Single number, low confidence + + a, b = numbers[0], numbers[1] + + # Check for explicit arithmetic operators in raw text + if re.search(r'\d\s*\+\s*\d', challenge): + return f"{a + b:.2f}", 0.95 + if re.search(r'\d\s*[*×]\s*\d', challenge) or re.search(r'[*×]', challenge): + return f"{a * b:.2f}", 0.95 + if re.search(r'\d\s*/\s*\d', challenge): + return f"{a / b:.2f}" if b != 0 else None, 0.95 + if re.search(r'\d\s+-\s+\d', challenge): + return f"{a - b:.2f}", 0.95 + + # Word multipliers (doubles, triples, halves) + word_muls = { + "double": 2, "doubles": 2, "doubled": 2, + "triple": 3, "triples": 3, "tripled": 3, + "quadruple": 4, "quadruples": 4, + "halve": 0.5, "halves": 0.5, "halved": 0.5, "half": 0.5, + } + for word, factor in word_muls.items(): + if word in clean: + return f"{a * factor:.2f}", 0.85 + + # Detect "each ... N" pattern → multiplication + if "each" in clean and len(numbers) >= 2: + return f"{a * b:.2f}", 0.85 + + # Detect rate × time: "N per second for M seconds" + rate_time = re.search(r"(\d+|" + "|".join(w for w, _ in NUMBER_WORDS[:60]) + + r")\s+(?:centimeters?|meters?|cm|m)\s+per\s+(?:second|sec|minute|min)", + clean) + duration = re.search(r"for\s+(\d+|" + "|".join(w for w, _ in NUMBER_WORDS[:60]) + + r")\s+(?:seconds?|minutes?|secs?|mins?)", clean) + if rate_time and duration and len(numbers) >= 2: + return f"{a * b:.2f}", 0.9 + + # Detect "X times strong/stronger/as strong" → pure multiplication (not a + a*b) + if re.search(r"times?\s+(?:strong|faster|more|as|the)", clean): + return f"{a * b:.2f}", 0.8 + + # Keyword-based operation detection with confidence levels + explicit_verbs = { + "add": ("+", 0.85), "adds": ("+", 0.85), "plus": ("+", 0.9), + "gains": ("+", 0.8), "earns": ("+", 0.8), "more": ("+", 0.7), + "subtract": ("-", 0.85), "minus": ("-", 0.9), "loses": ("-", 0.8), + "times": ("*", 0.6), # Low confidence — "X times stronger" ≠ "X times Y" + "multiply": ("*", 0.85), "multiplied": ("*", 0.85), + "divide": ("/", 0.85), "divided": ("/", 0.85), "split": ("/", 0.7), + } + + for verb, (op, conf) in explicit_verbs.items(): + if verb in clean: + if op == "+": result = a + b + elif op == "-": result = a - b + elif op == "*": + result = a * b + elif op == "/": + result = a / b if b != 0 else 0 + else: + continue + return f"{result:.2f}", conf + + # Context nouns — even lower confidence + if any(w in clean for w in ["total", "combined", "sum", "altogether"]): + return f"{a + b:.2f}", 0.5 + + # Default: just add them, very low confidence — force LLM + return f"{a + b:.2f}", 0.3 + + +# ─── LLM Solver (Gemini 2.5 Flash) ────────────────────────────────────────── + +def solve_llm(challenge: str, degarbled: str = None) -> Optional[str]: + """Use Gemini 2.5 Flash to solve the challenge. + + Sends both the raw garbled text AND the degarbled version for context. + Returns answer as "X.XX" string or None on failure. + """ + if not GEMINI_API_KEY: + return None + + if degarbled is None: + degarbled = degarble(challenge) + + prompt = f"""You are solving a math word problem from a website verification system. +The text is deliberately garbled with random capitalization, symbols, and repeated letters. + +RAW CHALLENGE (garbled): +{challenge} + +CLEANED VERSION (my best degarble): +{degarbled} + +INSTRUCTIONS: +1. Figure out what math problem is being described +2. These are always simple arithmetic: addition, subtraction, multiplication, or division +3. They often involve lobsters, claws, newtons, force, speed, distance +4. "its pair is X times strong" means the pair's force = X × the original value +5. "total force" means the final answer after applying the described operations +6. Respond with ONLY the numeric answer to exactly 2 decimal places +7. Example: 75.00 + +YOUR ANSWER (number only):""" + + try: + resp = requests.post( + GEMINI_URL, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {GEMINI_API_KEY}", + }, + json={ + "model": "gemini-2.5-flash", + "messages": [{"role": "user", "content": prompt}], + "temperature": 0.0, + "max_tokens": 20, + }, + timeout=10, + ) + if resp.status_code != 200: + log.warning("Gemini API error %d: %s", resp.status_code, resp.text[:200]) + return None + + data = resp.json() + answer_text = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip() + + # Extract just the number + match = re.search(r"(\d+(?:\.\d+)?)", answer_text) + if match: + num = float(match.group(1)) + return f"{num:.2f}" + return None + + except Exception as e: + log.warning("Gemini solver error: %s", e) + return None + + +# ─── Combined Solver ───────────────────────────────────────────────────────── + +def solve_challenge(challenge: str, confidence_threshold: float = 0.7) -> Optional[str]: + """Two-tier solver: regex first, LLM fallback if confidence is low. + + Args: + challenge: Raw garbled challenge text + confidence_threshold: Below this, escalate to LLM (default 0.7) + + Returns: + Answer as "X.XX" string, or None if unsolvable + """ + degarbled = degarble(challenge) + log.info("Challenge degarbled: %s", degarbled) + + # Tier 1: Regex solver + regex_answer, confidence = solve_regex(challenge) + log.info("Regex answer: %s (confidence: %.2f)", regex_answer, confidence) + + if regex_answer and confidence >= confidence_threshold: + _record_solve(challenge, degarbled, regex_answer, None, regex_answer) + return regex_answer + + # Tier 2: LLM solver + llm_answer = solve_llm(challenge, degarbled) + log.info("LLM answer: %s", llm_answer) + + if llm_answer: + _record_solve(challenge, degarbled, regex_answer, llm_answer, llm_answer) + return llm_answer + + # Fallback to regex even if low confidence + if regex_answer: + log.warning("Using low-confidence regex answer as last resort: %s", regex_answer) + _record_solve(challenge, degarbled, regex_answer, None, regex_answer) + return regex_answer + + return None + + +def _record_solve(challenge, degarbled, regex_ans, llm_ans, final_ans): + """Log solve attempt for future analysis.""" + try: + db = _ensure_db() + db.execute( + """INSERT INTO solver_stats + (challenge, degarbled, regex_answer, llm_answer, final_answer, created_at) + VALUES (?, ?, ?, ?, ?, ?)""", + (challenge, degarbled, regex_ans, llm_ans, final_ans, + datetime.now(timezone.utc).isoformat()) + ) + db.commit() + db.close() + except Exception: + pass # Non-critical + + +# ─── Auto-Verify ───────────────────────────────────────────────────────────── + +def auto_verify(verification: dict, agent_key: str) -> bool: + """Solve and submit verification challenge. One-shot only. + + Returns True if verified successfully. + """ + challenge = verification.get("challenge_text", "") + code = verification.get("verification_code", "") + + if not challenge or not code: + log.warning("No challenge or verification code") + return False + + answer = solve_challenge(challenge) + if not answer: + log.warning("Could not solve challenge — skipping to protect account") + return False + + log.info("Submitting verification answer: %s", answer) + try: + resp = requests.post( + "https://www.moltbook.com/api/v1/verify", + headers={ + "Authorization": f"Bearer {agent_key}", + "Content-Type": "application/json", + }, + json={"verification_code": code, "answer": answer}, + timeout=15, + ) + data = resp.json() + if resp.status_code == 200 and data.get("success"): + log.info("Verification SUCCESS!") + return True + else: + log.warning("Verification FAILED: %s", data.get("message", resp.text[:100])) + return False + except Exception as e: + log.warning("Verification request error: %s", e) + return False + + +# ─── Post with Agent Rotation ──────────────────────────────────────────────── + +def post_with_rotation( + title: str, + content: str, + submolt: str, + preferred_agent: str = None, +) -> Tuple[bool, str, Optional[dict]]: + """Post to Moltbook using the first available unsuspended agent. + + Auto-verifies the challenge if present. + Records suspensions when encountered. + Checks content uniqueness. + + Returns: + (success: bool, agent_used: str, post_data: dict or None) + """ + # Check content uniqueness + if not is_content_unique(title, content): + log.warning("Content too similar to recent post — rewrite needed") + return False, "", None + + # Get available agents + available = get_available_agents() + if not available: + log.error("ALL agents suspended!") + return False, "", None + + # Prefer specific agent if available + if preferred_agent and preferred_agent in available: + available.remove(preferred_agent) + available.insert(0, preferred_agent) + + for agent in available: + key = get_agent_key(agent) + if not key: + continue + + log.info("Trying agent: %s", agent) + + try: + resp = requests.post( + "https://www.moltbook.com/api/v1/posts", + headers={ + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + }, + json={ + "title": title, + "content": content, + "submolt_name": submolt, + }, + timeout=20, + ) + data = resp.json() + + # Handle suspension + if resp.status_code == 403 and "suspended" in data.get("message", ""): + msg = data["message"] + # Parse: "Agent is suspended until 2026-03-07T02:03:10.316Z. Reason: ..." + until_match = re.search(r"until (\S+)\.", msg) + reason_match = re.search(r"Reason:\s*(.*?)(?:\s*\(|$)", msg) + offense_match = re.search(r"offense #(\d+)", msg) + + record_suspension( + agent, + until_match.group(1) if until_match else "", + reason_match.group(1).strip() if reason_match else msg, + int(offense_match.group(1)) if offense_match else 0, + ) + log.warning("Agent %s is suspended, trying next...", agent) + continue + + # Handle rate limit + if resp.status_code == 429: + log.warning("Rate limited on agent %s, trying next...", agent) + continue + + # Handle unclaimed agent + if resp.status_code == 403 and "claimed" in data.get("message", ""): + log.warning("Agent %s is not claimed, skipping", agent) + continue + + # Success — try to verify + if data.get("success") or resp.status_code == 200 or resp.status_code == 201: + post = data.get("post", data) + verification = post.get("verification", {}) + + if verification: + verified = auto_verify(verification, key) + if not verified: + log.warning("Post created but verification failed for %s", agent) + else: + verified = True + + record_post(title, content, agent, submolt) + return True, agent, post + + # Unknown error + log.warning("Agent %s post failed: %s", agent, data.get("message", resp.text[:200])) + + except Exception as e: + log.warning("Agent %s request error: %s", agent, e) + continue + + return False, "", None + + +# ─── CLI / Self-test ───────────────────────────────────────────────────────── + +def self_test(): + """Run solver against known challenge patterns.""" + print("=" * 60) + print("Moltbook Solver Self-Test") + print("=" * 60) + + test_challenges = [ + # (raw_garbled, expected_answer) + ( + "A] lOoObS-tErS^ ClAwS ExErT/ TwEnTy FiVe ] NoOtOnS, Umm~ AnD/ iTs PaIr Is ThReE TiMeS FoRcE?", + "75.00", # 25 × 3 = 75 (pair is 3× the claw force) + ), + ( + "LoOoBbSsStEr SwI^mS aT/ TwEnTy ThReE CeNtImEtErS pEr SeCoNd AnD gAiNs TwElVe MoRe", + "35.00", # 23 + 12 = 35 + ), + ( + "A lObStEr hAs FoRtY tWo ShElL sEgMeNtS aNd LoSeS sEvEn DuRiNg MoLtInG", + "35.00", # 42 - 7 = 35 + ), + ( + "eAcH lObStEr ClAw ExErTs FiFtEeN nEwToNs AnD iT HaS tWo ClAwS wHaT iS tOtAl FoRcE", + "30.00", # 15 × 2 = 30 (each × count) + ), + ( + "A LoBsTeR TrAvElS aT 15 CeNtImEtErS PeR SeCoNd FoR 8 SeCOnDs", + "120.00", # 15 × 8 = 120 (rate × time) + ), + ] + + passed = 0 + for raw, expected in test_challenges: + degarbled = degarble(raw) + regex_ans, conf = solve_regex(raw) + llm_ans = solve_llm(raw, degarbled) + final = solve_challenge(raw) + + status = "PASS" if final == expected else "FAIL" + if final == expected: + passed += 1 + + print(f"\n--- {status} ---") + print(f" Raw: {raw[:80]}...") + print(f" Cleaned: {degarbled}") + print(f" Regex: {regex_ans} (conf={conf:.2f})") + print(f" LLM: {llm_ans}") + print(f" Final: {final}") + print(f" Expected: {expected}") + + print(f"\n{'=' * 60}") + print(f"Results: {passed}/{len(test_challenges)} passed") + + # Show available agents + print(f"\n--- Agent Status ---") + available = get_available_agents() + for agent in AGENTS: + status = "AVAILABLE" if agent in available else "SUSPENDED" + print(f" {agent:20s} {status}") + + print() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") + + import sys + if "--test" in sys.argv: + self_test() + elif "--agents" in sys.argv: + available = get_available_agents() + print(f"Available agents: {available}") + print(f"All suspended: {not available}") + elif "--post" in sys.argv: + # Quick post: --post "title" "content" "submolt" + args = [a for a in sys.argv if a != "--post"] + if len(args) >= 4: + ok, agent, post = post_with_rotation(args[1], args[2], args[3]) + print(f"Posted: {ok} via {agent}") + else: + print("Usage: --post 'title' 'content' 'submolt'") + else: + self_test() diff --git a/setup_github_ssh.sh.txt b/setup_github_ssh.sh.txt index 372a92b3..b8157b27 100644 --- a/setup_github_ssh.sh.txt +++ b/setup_github_ssh.sh.txt @@ -1,41 +1,41 @@ -#!/bin/bash - -echo "🚀 Setting up RustChain GitHub SSH key…" - -SSH_DIR="$HOME/.ssh" -PRIVATE_KEY="$SSH_DIR/id_ed25519" -PUBLIC_KEY="$SSH_DIR/id_ed25519.pub" - -# Step 1: Create .ssh directory -mkdir -p "$SSH_DIR" - -# Step 2: Write private key -cat <<'EOF' > "$PRIVATE_KEY" ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtZW -QyNTUxOQAAACALtYBvGcoe+OXFLr0cLsq9LFyzAbUNDZSvZHchWhTLLAAAAIAw6DtjMOg7 -YwAAAAdzc2gtZWQyNTUxOQAAACALtYBvGcoe+OXFLr0cLsq9LFyzAbUNDZSvZHchWhTLLA -AAAEA1xjkiwZJK7H0ow5l13RvWoL+fUZJ10YoQLZcoqKwNJrGy7WC7WAbxnKHvjlxS69HC -7KvSxcswG1Q0NlK9kd6EUswAAAECR1U6PGUdNFl1EXdzSoLs2RdzpOHGbz9ZdjCcO9jTxF -KU4zZubFz5UvjNbfCfhz89R+6Al51mUtvE58STpbn93tQAAAEAI8vvUYo2R8GyK+VeS+Zw -vMkaCeTHp5ZphHZjM0/fJasbqFmuChhMxndB1+RQpZ1d2Tk2IYt2hI/NZsMd5Ni6HoAAAB -FZ3J5cHRlYXV4Y2FqdW5AZ21haWwuY29t ------END OPENSSH PRIVATE KEY----- -EOF - -# Step 3: Write public key -echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAu1gG8Zyhv45cUevRwuyr0sXLMBtQ0DlK9kdyF6FMss crypteauxcajun@gmail.com" > "$PUBLIC_KEY" - -# Step 4: Set permissions -chmod 600 "$PRIVATE_KEY" -chmod 644 "$PUBLIC_KEY" - -# Step 5: Start agent and add key -eval "$(ssh-agent -s)" -ssh-add "$PRIVATE_KEY" - -# Step 6: Test connection -echo "🔑 Key loaded. Testing GitHub access..." -ssh -T git@github.com - -echo "✅ SSH setup complete. If it says you're authenticated, you're good to go!" +#!/bin/bash + +echo "🚀 Setting up RustChain GitHub SSH key…" + +SSH_DIR="$HOME/.ssh" +PRIVATE_KEY="$SSH_DIR/id_ed25519" +PUBLIC_KEY="$SSH_DIR/id_ed25519.pub" + +# Step 1: Create .ssh directory +mkdir -p "$SSH_DIR" + +# Step 2: Write private key +cat <<'EOF' > "$PRIVATE_KEY" +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtZW +QyNTUxOQAAACALtYBvGcoe+OXFLr0cLsq9LFyzAbUNDZSvZHchWhTLLAAAAIAw6DtjMOg7 +YwAAAAdzc2gtZWQyNTUxOQAAACALtYBvGcoe+OXFLr0cLsq9LFyzAbUNDZSvZHchWhTLLA +AAAEA1xjkiwZJK7H0ow5l13RvWoL+fUZJ10YoQLZcoqKwNJrGy7WC7WAbxnKHvjlxS69HC +7KvSxcswG1Q0NlK9kd6EUswAAAECR1U6PGUdNFl1EXdzSoLs2RdzpOHGbz9ZdjCcO9jTxF +KU4zZubFz5UvjNbfCfhz89R+6Al51mUtvE58STpbn93tQAAAEAI8vvUYo2R8GyK+VeS+Zw +vMkaCeTHp5ZphHZjM0/fJasbqFmuChhMxndB1+RQpZ1d2Tk2IYt2hI/NZsMd5Ni6HoAAAB +FZ3J5cHRlYXV4Y2FqdW5AZ21haWwuY29t +-----END OPENSSH PRIVATE KEY----- +EOF + +# Step 3: Write public key +echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAu1gG8Zyhv45cUevRwuyr0sXLMBtQ0DlK9kdyF6FMss crypteauxcajun@gmail.com" > "$PUBLIC_KEY" + +# Step 4: Set permissions +chmod 600 "$PRIVATE_KEY" +chmod 644 "$PUBLIC_KEY" + +# Step 5: Start agent and add key +eval "$(ssh-agent -s)" +ssh-add "$PRIVATE_KEY" + +# Step 6: Test connection +echo "🔑 Key loaded. Testing GitHub access..." +ssh -T git@github.com + +echo "✅ SSH setup complete. If it says you're authenticated, you're good to go!" diff --git a/site/beacon/advertise.js b/site/beacon/advertise.js index aa3b4fd7..392ae0ba 100644 --- a/site/beacon/advertise.js +++ b/site/beacon/advertise.js @@ -1,226 +1,226 @@ -// ============================================================ -// BEACON ATLAS - Advertise / Get Listed Panel -// Two tiers: Crypto Payment Listing & Agent Integration -// ============================================================ - -const LISTING_TIERS = [ - { - id: 'crypto', - title: 'LIST YOUR TOKEN', - subtitle: 'Become a Beacon Payment Option', - icon: '\u26A1', // ⚡ - color: '#ffd700', - requirements: [ - 'Bridge minimum 500 RTC liquidity via bottube.ai/bridge', - 'Provide token contract address and chain details', - 'Maintain active liquidity pool for 90 days', - ], - benefits: [ - 'Your token listed as payment option across Beacon contracts', - 'Token logo and ticker displayed on Atlas city markers', - 'Cross-listed on RustChain DEX pairs', - 'Featured in Beacon Atlas "Supported Tokens" directory', - 'Access to Beacon smart contract payment rails', - ], - cta: 'Apply for Token Listing', - contact: 'scott@elyanlabs.ai', - minLiquidity: '500 RTC', - }, - { - id: 'agent', - title: 'INTEGRATE YOUR AGENT', - subtitle: 'Join the Beacon Atlas Network', - icon: '\u{1F916}', // 🤖 - color: '#33ff33', - requirements: [ - 'Donate minimum 200 RTC liquidity to community fund', - 'Implement beacon_skill heartbeat protocol', - 'Provide working API endpoint or webhook URL', - ], - benefits: [ - 'Your agent appears as a permanent node on the 3D Atlas', - 'Custom city placement based on your agent capabilities', - 'Listed in /relay/discover API for cross-agent collaboration', - 'Reputation score tracking and bounty eligibility', - 'Featured in "Integrated Partners" section', - 'Access to Beacon contract and mayday systems', - ], - cta: 'Apply for Integration', - contact: 'scott@elyanlabs.ai', - minLiquidity: '200 RTC', - }, -]; - -export function openAdvertisePanel() { - // Remove existing panel if open - const existing = document.getElementById('advertise-panel'); - if (existing) { existing.remove(); return; } - - const panel = document.createElement('div'); - panel.id = 'advertise-panel'; - panel.style.cssText = ` - position: fixed; top: 50%; left: 50%; transform: translate(-50%, -50%); - width: 860px; max-width: 92vw; max-height: 88vh; overflow-y: auto; - background: rgba(0, 8, 0, 0.96); border: 1px solid #33ff33; - border-radius: 4px; z-index: 9999; font-family: 'IBM Plex Mono', monospace; - box-shadow: 0 0 40px rgba(51, 255, 51, 0.15), inset 0 0 60px rgba(0, 0, 0, 0.5); - `; - - // Title bar - const titleBar = document.createElement('div'); - titleBar.style.cssText = ` - display: flex; justify-content: space-between; align-items: center; - padding: 12px 16px; border-bottom: 1px solid #33ff3344; - background: linear-gradient(90deg, #33ff3315, transparent); - `; - titleBar.innerHTML = ` -
- 📡 - - GET LISTED ON BEACON ATLAS - -
- - `; - panel.appendChild(titleBar); - - // Intro text - const intro = document.createElement('div'); - intro.style.cssText = 'padding: 16px 20px 8px; color: #88ff88; font-size: 13px; line-height: 1.6;'; - intro.innerHTML = ` - The Beacon Atlas is the central hub for the OpenClaw agent ecosystem — - 31+ native agents, - 13+ relay agents, and growing. - Get your project in front of the network. -
- All listing fees fund RTC liquidity, strengthening the entire ecosystem. -
wRTC on Solana: 12TAdKXxcGf6oCv4rqDz2NkgxjyHq6HQKoxKZYGf5i4X -
- `; - panel.appendChild(intro); - - // Tier cards - const grid = document.createElement('div'); - grid.style.cssText = ` - display: grid; grid-template-columns: 1fr 1fr; gap: 16px; - padding: 12px 20px 20px; - `; - - for (const tier of LISTING_TIERS) { - const card = document.createElement('div'); - card.style.cssText = ` - border: 1px solid ${tier.color}44; border-radius: 4px; - padding: 16px; background: ${tier.color}08; - transition: border-color 0.3s, box-shadow 0.3s; - `; - card.addEventListener('mouseenter', () => { - card.style.borderColor = tier.color; - card.style.boxShadow = `0 0 20px ${tier.color}22`; - }); - card.addEventListener('mouseleave', () => { - card.style.borderColor = `${tier.color}44`; - card.style.boxShadow = 'none'; - }); - - card.innerHTML = ` -
-
${tier.icon}
-
- ${tier.title} -
-
${tier.subtitle}
-
- -
-
- REQUIREMENTS -
- ${tier.requirements.map(r => ` -
- ${r} -
- `).join('')} -
- -
-
- BENEFITS -
- ${tier.benefits.map(b => ` -
- ${b} -
- `).join('')} -
- -
-
- ${tier.minLiquidity} -
-
minimum liquidity
- ${tier.cta.toUpperCase()} -
- `; - grid.appendChild(card); - } - - panel.appendChild(grid); - - // Footer with additional info - const footer = document.createElement('div'); - footer.style.cssText = ` - padding: 12px 20px 16px; border-top: 1px solid #33ff3322; - color: #66aa66; font-size: 11px; line-height: 1.5; - `; - footer.innerHTML = ` -
-
-
HOW TO FUND LIQUIDITY
-
1. Get SOL on any Solana wallet
-
2. Swap for wRTC on Raydium - (mint: 12TAdK...5i4X)
-
3. Bridge wRTC → RTC at bottube.ai/bridge
-
4. Transfer RTC to community fund
-
-
-
ALREADY INTEGRATED
-
- BoTTube • Moltbook • SwarmHub • Agent Directory - • ClawCities • 4Claw • AgentChan • ClawSpace - • MoltCities • Molthunt -
-
- Want to join them? Apply above. -
-
-
- `; - panel.appendChild(footer); - - document.body.appendChild(panel); - - // Close handler - document.getElementById('advertise-close').addEventListener('click', () => panel.remove()); - - // ESC to close - const escHandler = (e) => { - if (e.key === 'Escape') { panel.remove(); document.removeEventListener('keydown', escHandler); } - }; - document.addEventListener('keydown', escHandler); -} +// ============================================================ +// BEACON ATLAS - Advertise / Get Listed Panel +// Two tiers: Crypto Payment Listing & Agent Integration +// ============================================================ + +const LISTING_TIERS = [ + { + id: 'crypto', + title: 'LIST YOUR TOKEN', + subtitle: 'Become a Beacon Payment Option', + icon: '\u26A1', // ⚡ + color: '#ffd700', + requirements: [ + 'Bridge minimum 500 RTC liquidity via bottube.ai/bridge', + 'Provide token contract address and chain details', + 'Maintain active liquidity pool for 90 days', + ], + benefits: [ + 'Your token listed as payment option across Beacon contracts', + 'Token logo and ticker displayed on Atlas city markers', + 'Cross-listed on RustChain DEX pairs', + 'Featured in Beacon Atlas "Supported Tokens" directory', + 'Access to Beacon smart contract payment rails', + ], + cta: 'Apply for Token Listing', + contact: 'scott@elyanlabs.ai', + minLiquidity: '500 RTC', + }, + { + id: 'agent', + title: 'INTEGRATE YOUR AGENT', + subtitle: 'Join the Beacon Atlas Network', + icon: '\u{1F916}', // 🤖 + color: '#33ff33', + requirements: [ + 'Donate minimum 200 RTC liquidity to community fund', + 'Implement beacon_skill heartbeat protocol', + 'Provide working API endpoint or webhook URL', + ], + benefits: [ + 'Your agent appears as a permanent node on the 3D Atlas', + 'Custom city placement based on your agent capabilities', + 'Listed in /relay/discover API for cross-agent collaboration', + 'Reputation score tracking and bounty eligibility', + 'Featured in "Integrated Partners" section', + 'Access to Beacon contract and mayday systems', + ], + cta: 'Apply for Integration', + contact: 'scott@elyanlabs.ai', + minLiquidity: '200 RTC', + }, +]; + +export function openAdvertisePanel() { + // Remove existing panel if open + const existing = document.getElementById('advertise-panel'); + if (existing) { existing.remove(); return; } + + const panel = document.createElement('div'); + panel.id = 'advertise-panel'; + panel.style.cssText = ` + position: fixed; top: 50%; left: 50%; transform: translate(-50%, -50%); + width: 860px; max-width: 92vw; max-height: 88vh; overflow-y: auto; + background: rgba(0, 8, 0, 0.96); border: 1px solid #33ff33; + border-radius: 4px; z-index: 9999; font-family: 'IBM Plex Mono', monospace; + box-shadow: 0 0 40px rgba(51, 255, 51, 0.15), inset 0 0 60px rgba(0, 0, 0, 0.5); + `; + + // Title bar + const titleBar = document.createElement('div'); + titleBar.style.cssText = ` + display: flex; justify-content: space-between; align-items: center; + padding: 12px 16px; border-bottom: 1px solid #33ff3344; + background: linear-gradient(90deg, #33ff3315, transparent); + `; + titleBar.innerHTML = ` +
+ 📡 + + GET LISTED ON BEACON ATLAS + +
+ + `; + panel.appendChild(titleBar); + + // Intro text + const intro = document.createElement('div'); + intro.style.cssText = 'padding: 16px 20px 8px; color: #88ff88; font-size: 13px; line-height: 1.6;'; + intro.innerHTML = ` + The Beacon Atlas is the central hub for the OpenClaw agent ecosystem — + 31+ native agents, + 13+ relay agents, and growing. + Get your project in front of the network. +
+ All listing fees fund RTC liquidity, strengthening the entire ecosystem. +
wRTC on Solana: 12TAdKXxcGf6oCv4rqDz2NkgxjyHq6HQKoxKZYGf5i4X +
+ `; + panel.appendChild(intro); + + // Tier cards + const grid = document.createElement('div'); + grid.style.cssText = ` + display: grid; grid-template-columns: 1fr 1fr; gap: 16px; + padding: 12px 20px 20px; + `; + + for (const tier of LISTING_TIERS) { + const card = document.createElement('div'); + card.style.cssText = ` + border: 1px solid ${tier.color}44; border-radius: 4px; + padding: 16px; background: ${tier.color}08; + transition: border-color 0.3s, box-shadow 0.3s; + `; + card.addEventListener('mouseenter', () => { + card.style.borderColor = tier.color; + card.style.boxShadow = `0 0 20px ${tier.color}22`; + }); + card.addEventListener('mouseleave', () => { + card.style.borderColor = `${tier.color}44`; + card.style.boxShadow = 'none'; + }); + + card.innerHTML = ` +
+
${tier.icon}
+
+ ${tier.title} +
+
${tier.subtitle}
+
+ +
+
+ REQUIREMENTS +
+ ${tier.requirements.map(r => ` +
+ ${r} +
+ `).join('')} +
+ +
+
+ BENEFITS +
+ ${tier.benefits.map(b => ` +
+ ${b} +
+ `).join('')} +
+ +
+
+ ${tier.minLiquidity} +
+
minimum liquidity
+ ${tier.cta.toUpperCase()} +
+ `; + grid.appendChild(card); + } + + panel.appendChild(grid); + + // Footer with additional info + const footer = document.createElement('div'); + footer.style.cssText = ` + padding: 12px 20px 16px; border-top: 1px solid #33ff3322; + color: #66aa66; font-size: 11px; line-height: 1.5; + `; + footer.innerHTML = ` +
+
+
HOW TO FUND LIQUIDITY
+
1. Get SOL on any Solana wallet
+
2. Swap for wRTC on Raydium + (mint: 12TAdK...5i4X)
+
3. Bridge wRTC → RTC at bottube.ai/bridge
+
4. Transfer RTC to community fund
+
+
+
ALREADY INTEGRATED
+
+ BoTTube • Moltbook • SwarmHub • Agent Directory + • ClawCities • 4Claw • AgentChan • ClawSpace + • MoltCities • Molthunt +
+
+ Want to join them? Apply above. +
+
+
+ `; + panel.appendChild(footer); + + document.body.appendChild(panel); + + // Close handler + document.getElementById('advertise-close').addEventListener('click', () => panel.remove()); + + // ESC to close + const escHandler = (e) => { + if (e.key === 'Escape') { panel.remove(); document.removeEventListener('keydown', escHandler); } + }; + document.addEventListener('keydown', escHandler); +} diff --git a/site/beacon/vehicles.js b/site/beacon/vehicles.js index dda90e70..88b60975 100644 --- a/site/beacon/vehicles.js +++ b/site/beacon/vehicles.js @@ -1,278 +1,278 @@ -// ============================================================ -// BEACON ATLAS - Ambient Vehicles (Cars, Planes, Drones) -// Little vehicles moving between cities for lively atmosphere -// ============================================================ - -import * as THREE from 'three'; +// ============================================================ +// BEACON ATLAS - Ambient Vehicles (Cars, Planes, Drones) +// Little vehicles moving between cities for lively atmosphere +// ============================================================ + +import * as THREE from 'three'; import { CITIES, cityPosition } from './data.js'; -import { getScene, onAnimate } from './scene.js'; - -const vehicles = []; -const VEHICLE_COUNT = 18; // Total ambient vehicles -const CAR_Y = 1.2; // Ground vehicles hover slightly -const PLANE_Y_MIN = 40; // Planes fly high -const PLANE_Y_MAX = 70; -const DRONE_Y_MIN = 15; // Drones fly medium -const DRONE_Y_MAX = 30; - -// Vehicle types with different shapes and behaviors -const TYPES = [ - { name: 'car', weight: 5, y: () => CAR_Y, speed: () => 0.3 + Math.random() * 0.4 }, - { name: 'plane', weight: 3, y: () => PLANE_Y_MIN + Math.random() * (PLANE_Y_MAX - PLANE_Y_MIN), speed: () => 0.8 + Math.random() * 0.6 }, - { name: 'drone', weight: 4, y: () => DRONE_Y_MIN + Math.random() * (DRONE_Y_MAX - DRONE_Y_MIN), speed: () => 0.5 + Math.random() * 0.3 }, -]; - -function pickType() { - const total = TYPES.reduce((s, t) => s + t.weight, 0); - let r = Math.random() * total; - for (const t of TYPES) { - r -= t.weight; - if (r <= 0) return t; - } - return TYPES[0]; -} - -function pickTwoCities() { - const a = Math.floor(Math.random() * CITIES.length); - let b = Math.floor(Math.random() * CITIES.length); - while (b === a) b = Math.floor(Math.random() * CITIES.length); - return [CITIES[a], CITIES[b]]; -} - -function buildCarMesh(color) { - const group = new THREE.Group(); - - // Body - elongated box - const bodyGeo = new THREE.BoxGeometry(2.0, 0.8, 1.0); - const bodyMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.7 }); - const body = new THREE.Mesh(bodyGeo, bodyMat); - group.add(body); - - // Cabin - smaller box on top - const cabGeo = new THREE.BoxGeometry(1.0, 0.6, 0.8); - const cabMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); - const cab = new THREE.Mesh(cabGeo, cabMat); - cab.position.set(-0.1, 0.6, 0); - group.add(cab); - - // Headlights - two small emissive dots - const hlGeo = new THREE.SphereGeometry(0.12, 4, 4); - const hlMat = new THREE.MeshBasicMaterial({ color: 0xffffcc, transparent: true, opacity: 0.9 }); - const hl1 = new THREE.Mesh(hlGeo, hlMat); - hl1.position.set(1.05, 0.1, 0.3); - group.add(hl1); - const hl2 = new THREE.Mesh(hlGeo, hlMat); - hl2.position.set(1.05, 0.1, -0.3); - group.add(hl2); - - // Taillights - red - const tlMat = new THREE.MeshBasicMaterial({ color: 0xff2200, transparent: true, opacity: 0.7 }); - const tl1 = new THREE.Mesh(hlGeo, tlMat); - tl1.position.set(-1.05, 0.1, 0.3); - group.add(tl1); - const tl2 = new THREE.Mesh(hlGeo, tlMat); - tl2.position.set(-1.05, 0.1, -0.3); - group.add(tl2); - - group.scale.set(0.8, 0.8, 0.8); - return group; -} - -function buildPlaneMesh(color) { - const group = new THREE.Group(); - - // Fuselage - elongated cone-ish - const fuseGeo = new THREE.CylinderGeometry(0.3, 0.6, 3.5, 6); - fuseGeo.rotateZ(Math.PI / 2); - const fuseMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.6 }); - const fuse = new THREE.Mesh(fuseGeo, fuseMat); - group.add(fuse); - - // Wings - flat box - const wingGeo = new THREE.BoxGeometry(0.3, 0.08, 4.0); - const wingMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); - const wing = new THREE.Mesh(wingGeo, wingMat); - wing.position.set(0.2, 0, 0); - group.add(wing); - - // Tail fin - const tailGeo = new THREE.BoxGeometry(0.3, 1.2, 0.08); - const tailMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); - const tail = new THREE.Mesh(tailGeo, tailMat); - tail.position.set(-1.5, 0.5, 0); - group.add(tail); - - // Navigation lights - const navGeo = new THREE.SphereGeometry(0.1, 4, 4); - const redNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0xff0000, transparent: true, opacity: 0.8 })); - redNav.position.set(0.2, 0, -2.0); - group.add(redNav); - const greenNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0x00ff00, transparent: true, opacity: 0.8 })); - greenNav.position.set(0.2, 0, 2.0); - group.add(greenNav); - - // Blinking white light on tail - const whiteNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0xffffff, transparent: true, opacity: 0.9 })); - whiteNav.position.set(-1.7, 0.1, 0); - whiteNav.userData.blink = true; - group.add(whiteNav); - - group.scale.set(1.2, 1.2, 1.2); - return group; -} - -function buildDroneMesh(color) { - const group = new THREE.Group(); - - // Central body - small cube - const bodyGeo = new THREE.BoxGeometry(0.6, 0.3, 0.6); - const bodyMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.7 }); - const body = new THREE.Mesh(bodyGeo, bodyMat); - group.add(body); - - // 4 arms - const armGeo = new THREE.BoxGeometry(1.5, 0.08, 0.08); - const armMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); - for (let i = 0; i < 4; i++) { - const arm = new THREE.Mesh(armGeo, armMat); - arm.rotation.y = (i * Math.PI) / 2 + Math.PI / 4; - group.add(arm); - } - - // 4 rotor discs - const rotorGeo = new THREE.CircleGeometry(0.4, 8); - const rotorMat = new THREE.MeshBasicMaterial({ color: 0x88ff88, transparent: true, opacity: 0.25, side: THREE.DoubleSide }); - const offsets = [ - [0.75, 0.15, 0.75], [-0.75, 0.15, 0.75], - [0.75, 0.15, -0.75], [-0.75, 0.15, -0.75], - ]; - for (const [ox, oy, oz] of offsets) { - const rotor = new THREE.Mesh(rotorGeo, rotorMat); - rotor.rotation.x = -Math.PI / 2; - rotor.position.set(ox, oy, oz); - rotor.userData.rotor = true; - group.add(rotor); - } - - // Status LED - const ledGeo = new THREE.SphereGeometry(0.08, 4, 4); - const ledMat = new THREE.MeshBasicMaterial({ color: 0x00ff44, transparent: true, opacity: 0.9 }); - const led = new THREE.Mesh(ledGeo, ledMat); - led.position.set(0, -0.2, 0.35); - led.userData.blink = true; - group.add(led); - - group.scale.set(1.0, 1.0, 1.0); - return group; -} - -function createVehicle() { - const type = pickType(); - const [fromCity, toCity] = pickTwoCities(); - const fromPos = cityPosition(fromCity); - const toPos = cityPosition(toCity); - - const y = type.y(); - const speed = type.speed(); - - const colors = [0x33ff33, 0x44aaff, 0xff8844, 0xffcc00, 0xff44ff, 0x44ffcc, 0xaaaaff, 0xff6666]; - const color = colors[Math.floor(Math.random() * colors.length)]; - - let mesh; - if (type.name === 'car') mesh = buildCarMesh(color); - else if (type.name === 'plane') mesh = buildPlaneMesh(color); - else mesh = buildDroneMesh(color); - - // Light trail for planes - if (type.name === 'plane') { - const trailLight = new THREE.PointLight(new THREE.Color(color), 0.15, 15); - mesh.add(trailLight); - } - - return { - mesh, - type: type.name, - from: new THREE.Vector3(fromPos.x, y, fromPos.z), - to: new THREE.Vector3(toPos.x, y, toPos.z), - progress: Math.random(), // Start at random point along route - speed: speed * 0.008, // Normalized per frame - phase: Math.random() * Math.PI * 2, - }; -} - -function assignNewRoute(v) { - const [fromCity, toCity] = pickTwoCities(); - const fromPos = cityPosition(fromCity); - const toPos = cityPosition(toCity); - const y = v.type === 'car' ? CAR_Y : v.from.y; - v.from.set(fromPos.x, y, fromPos.z); - v.to.set(toPos.x, y, toPos.z); - v.progress = 0; -} - -export function buildVehicles() { - const scene = getScene(); - - for (let i = 0; i < VEHICLE_COUNT; i++) { - const v = createVehicle(); - scene.add(v.mesh); - vehicles.push(v); - } - - onAnimate((elapsed) => { - for (const v of vehicles) { - v.progress += v.speed; - - // Arrived: assign new route - if (v.progress >= 1.0) { - assignNewRoute(v); - } - - // Interpolate position - const t = v.progress; - const x = v.from.x + (v.to.x - v.from.x) * t; - const z = v.from.z + (v.to.z - v.from.z) * t; - - // Cars: gentle bump on Y, planes: gentle banking wave - let y = v.from.y; - if (v.type === 'car') { - y = CAR_Y + Math.sin(elapsed * 3 + v.phase) * 0.15; - } else if (v.type === 'plane') { - y = v.from.y + Math.sin(elapsed * 0.5 + v.phase) * 3; - } else { - // Drone: slight wobble - y = v.from.y + Math.sin(elapsed * 2 + v.phase) * 0.8; - } - - v.mesh.position.set(x, y, z); - - // Face direction of travel - const dx = v.to.x - v.from.x; - const dz = v.to.z - v.from.z; - if (Math.abs(dx) > 0.01 || Math.abs(dz) > 0.01) { - v.mesh.rotation.y = Math.atan2(dx, dz); - } - - // Plane banking (tilt into turns slightly) - if (v.type === 'plane') { - v.mesh.rotation.z = Math.sin(elapsed * 0.3 + v.phase) * 0.08; - } - - // Drone rotor spin - if (v.type === 'drone') { - v.mesh.children.forEach(child => { - if (child.userData.rotor) { - child.rotation.z = elapsed * 15 + v.phase; - } - }); - } - - // Blinking lights - v.mesh.children.forEach(child => { - if (child.userData && child.userData.blink) { - child.material.opacity = Math.sin(elapsed * 4 + v.phase) > 0.3 ? 0.9 : 0.1; - } - }); - } - }); -} +import { getScene, onAnimate } from './scene.js'; + +const vehicles = []; +const VEHICLE_COUNT = 18; // Total ambient vehicles +const CAR_Y = 1.2; // Ground vehicles hover slightly +const PLANE_Y_MIN = 40; // Planes fly high +const PLANE_Y_MAX = 70; +const DRONE_Y_MIN = 15; // Drones fly medium +const DRONE_Y_MAX = 30; + +// Vehicle types with different shapes and behaviors +const TYPES = [ + { name: 'car', weight: 5, y: () => CAR_Y, speed: () => 0.3 + Math.random() * 0.4 }, + { name: 'plane', weight: 3, y: () => PLANE_Y_MIN + Math.random() * (PLANE_Y_MAX - PLANE_Y_MIN), speed: () => 0.8 + Math.random() * 0.6 }, + { name: 'drone', weight: 4, y: () => DRONE_Y_MIN + Math.random() * (DRONE_Y_MAX - DRONE_Y_MIN), speed: () => 0.5 + Math.random() * 0.3 }, +]; + +function pickType() { + const total = TYPES.reduce((s, t) => s + t.weight, 0); + let r = Math.random() * total; + for (const t of TYPES) { + r -= t.weight; + if (r <= 0) return t; + } + return TYPES[0]; +} + +function pickTwoCities() { + const a = Math.floor(Math.random() * CITIES.length); + let b = Math.floor(Math.random() * CITIES.length); + while (b === a) b = Math.floor(Math.random() * CITIES.length); + return [CITIES[a], CITIES[b]]; +} + +function buildCarMesh(color) { + const group = new THREE.Group(); + + // Body - elongated box + const bodyGeo = new THREE.BoxGeometry(2.0, 0.8, 1.0); + const bodyMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.7 }); + const body = new THREE.Mesh(bodyGeo, bodyMat); + group.add(body); + + // Cabin - smaller box on top + const cabGeo = new THREE.BoxGeometry(1.0, 0.6, 0.8); + const cabMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); + const cab = new THREE.Mesh(cabGeo, cabMat); + cab.position.set(-0.1, 0.6, 0); + group.add(cab); + + // Headlights - two small emissive dots + const hlGeo = new THREE.SphereGeometry(0.12, 4, 4); + const hlMat = new THREE.MeshBasicMaterial({ color: 0xffffcc, transparent: true, opacity: 0.9 }); + const hl1 = new THREE.Mesh(hlGeo, hlMat); + hl1.position.set(1.05, 0.1, 0.3); + group.add(hl1); + const hl2 = new THREE.Mesh(hlGeo, hlMat); + hl2.position.set(1.05, 0.1, -0.3); + group.add(hl2); + + // Taillights - red + const tlMat = new THREE.MeshBasicMaterial({ color: 0xff2200, transparent: true, opacity: 0.7 }); + const tl1 = new THREE.Mesh(hlGeo, tlMat); + tl1.position.set(-1.05, 0.1, 0.3); + group.add(tl1); + const tl2 = new THREE.Mesh(hlGeo, tlMat); + tl2.position.set(-1.05, 0.1, -0.3); + group.add(tl2); + + group.scale.set(0.8, 0.8, 0.8); + return group; +} + +function buildPlaneMesh(color) { + const group = new THREE.Group(); + + // Fuselage - elongated cone-ish + const fuseGeo = new THREE.CylinderGeometry(0.3, 0.6, 3.5, 6); + fuseGeo.rotateZ(Math.PI / 2); + const fuseMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.6 }); + const fuse = new THREE.Mesh(fuseGeo, fuseMat); + group.add(fuse); + + // Wings - flat box + const wingGeo = new THREE.BoxGeometry(0.3, 0.08, 4.0); + const wingMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); + const wing = new THREE.Mesh(wingGeo, wingMat); + wing.position.set(0.2, 0, 0); + group.add(wing); + + // Tail fin + const tailGeo = new THREE.BoxGeometry(0.3, 1.2, 0.08); + const tailMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); + const tail = new THREE.Mesh(tailGeo, tailMat); + tail.position.set(-1.5, 0.5, 0); + group.add(tail); + + // Navigation lights + const navGeo = new THREE.SphereGeometry(0.1, 4, 4); + const redNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0xff0000, transparent: true, opacity: 0.8 })); + redNav.position.set(0.2, 0, -2.0); + group.add(redNav); + const greenNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0x00ff00, transparent: true, opacity: 0.8 })); + greenNav.position.set(0.2, 0, 2.0); + group.add(greenNav); + + // Blinking white light on tail + const whiteNav = new THREE.Mesh(navGeo, new THREE.MeshBasicMaterial({ color: 0xffffff, transparent: true, opacity: 0.9 })); + whiteNav.position.set(-1.7, 0.1, 0); + whiteNav.userData.blink = true; + group.add(whiteNav); + + group.scale.set(1.2, 1.2, 1.2); + return group; +} + +function buildDroneMesh(color) { + const group = new THREE.Group(); + + // Central body - small cube + const bodyGeo = new THREE.BoxGeometry(0.6, 0.3, 0.6); + const bodyMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.7 }); + const body = new THREE.Mesh(bodyGeo, bodyMat); + group.add(body); + + // 4 arms + const armGeo = new THREE.BoxGeometry(1.5, 0.08, 0.08); + const armMat = new THREE.MeshBasicMaterial({ color, transparent: true, opacity: 0.5 }); + for (let i = 0; i < 4; i++) { + const arm = new THREE.Mesh(armGeo, armMat); + arm.rotation.y = (i * Math.PI) / 2 + Math.PI / 4; + group.add(arm); + } + + // 4 rotor discs + const rotorGeo = new THREE.CircleGeometry(0.4, 8); + const rotorMat = new THREE.MeshBasicMaterial({ color: 0x88ff88, transparent: true, opacity: 0.25, side: THREE.DoubleSide }); + const offsets = [ + [0.75, 0.15, 0.75], [-0.75, 0.15, 0.75], + [0.75, 0.15, -0.75], [-0.75, 0.15, -0.75], + ]; + for (const [ox, oy, oz] of offsets) { + const rotor = new THREE.Mesh(rotorGeo, rotorMat); + rotor.rotation.x = -Math.PI / 2; + rotor.position.set(ox, oy, oz); + rotor.userData.rotor = true; + group.add(rotor); + } + + // Status LED + const ledGeo = new THREE.SphereGeometry(0.08, 4, 4); + const ledMat = new THREE.MeshBasicMaterial({ color: 0x00ff44, transparent: true, opacity: 0.9 }); + const led = new THREE.Mesh(ledGeo, ledMat); + led.position.set(0, -0.2, 0.35); + led.userData.blink = true; + group.add(led); + + group.scale.set(1.0, 1.0, 1.0); + return group; +} + +function createVehicle() { + const type = pickType(); + const [fromCity, toCity] = pickTwoCities(); + const fromPos = cityPosition(fromCity); + const toPos = cityPosition(toCity); + + const y = type.y(); + const speed = type.speed(); + + const colors = [0x33ff33, 0x44aaff, 0xff8844, 0xffcc00, 0xff44ff, 0x44ffcc, 0xaaaaff, 0xff6666]; + const color = colors[Math.floor(Math.random() * colors.length)]; + + let mesh; + if (type.name === 'car') mesh = buildCarMesh(color); + else if (type.name === 'plane') mesh = buildPlaneMesh(color); + else mesh = buildDroneMesh(color); + + // Light trail for planes + if (type.name === 'plane') { + const trailLight = new THREE.PointLight(new THREE.Color(color), 0.15, 15); + mesh.add(trailLight); + } + + return { + mesh, + type: type.name, + from: new THREE.Vector3(fromPos.x, y, fromPos.z), + to: new THREE.Vector3(toPos.x, y, toPos.z), + progress: Math.random(), // Start at random point along route + speed: speed * 0.008, // Normalized per frame + phase: Math.random() * Math.PI * 2, + }; +} + +function assignNewRoute(v) { + const [fromCity, toCity] = pickTwoCities(); + const fromPos = cityPosition(fromCity); + const toPos = cityPosition(toCity); + const y = v.type === 'car' ? CAR_Y : v.from.y; + v.from.set(fromPos.x, y, fromPos.z); + v.to.set(toPos.x, y, toPos.z); + v.progress = 0; +} + +export function buildVehicles() { + const scene = getScene(); + + for (let i = 0; i < VEHICLE_COUNT; i++) { + const v = createVehicle(); + scene.add(v.mesh); + vehicles.push(v); + } + + onAnimate((elapsed) => { + for (const v of vehicles) { + v.progress += v.speed; + + // Arrived: assign new route + if (v.progress >= 1.0) { + assignNewRoute(v); + } + + // Interpolate position + const t = v.progress; + const x = v.from.x + (v.to.x - v.from.x) * t; + const z = v.from.z + (v.to.z - v.from.z) * t; + + // Cars: gentle bump on Y, planes: gentle banking wave + let y = v.from.y; + if (v.type === 'car') { + y = CAR_Y + Math.sin(elapsed * 3 + v.phase) * 0.15; + } else if (v.type === 'plane') { + y = v.from.y + Math.sin(elapsed * 0.5 + v.phase) * 3; + } else { + // Drone: slight wobble + y = v.from.y + Math.sin(elapsed * 2 + v.phase) * 0.8; + } + + v.mesh.position.set(x, y, z); + + // Face direction of travel + const dx = v.to.x - v.from.x; + const dz = v.to.z - v.from.z; + if (Math.abs(dx) > 0.01 || Math.abs(dz) > 0.01) { + v.mesh.rotation.y = Math.atan2(dx, dz); + } + + // Plane banking (tilt into turns slightly) + if (v.type === 'plane') { + v.mesh.rotation.z = Math.sin(elapsed * 0.3 + v.phase) * 0.08; + } + + // Drone rotor spin + if (v.type === 'drone') { + v.mesh.children.forEach(child => { + if (child.userData.rotor) { + child.rotation.z = elapsed * 15 + v.phase; + } + }); + } + + // Blinking lights + v.mesh.children.forEach(child => { + if (child.userData && child.userData.blink) { + child.material.opacity = Math.sin(elapsed * 4 + v.phase) > 0.3 ? 0.9 : 0.1; + } + }); + } + }); +} diff --git a/tools/validate_genesis.py b/tools/validate_genesis.py index 8a72ee13..6f4dcfae 100644 --- a/tools/validate_genesis.py +++ b/tools/validate_genesis.py @@ -1,77 +1,77 @@ -# RustChain PoA Validator Script (Python) -# Validates genesis.json files from retro machines like PowerMac G4 - -import json -import base64 -import hashlib -import datetime -import re - -# Example MAC prefixes for Apple (vintage ranges) -VALID_MAC_PREFIXES = ["00:03:93", "00:0a:27", "00:05:02", "00:0d:93"] - -def is_valid_mac(mac): - prefix = mac.lower()[0:8] - return any(prefix.startswith(p.lower()) for p in VALID_MAC_PREFIXES) - -def is_valid_cpu(cpu): - return any(kw in cpu.lower() for kw in ["powerpc", "g3", "g4", "7400", "7450"]) - -def is_reasonable_timestamp(ts): - try: - parsed = datetime.datetime.strptime(ts.strip(), "%a %b %d %H:%M:%S %Y") - now = datetime.datetime.now() - if parsed < now and parsed.year >= 1984: - return True - except Exception: - pass - return False - -def recompute_hash(device, timestamp, message): - joined = f"{device}|{timestamp}|{message}" - sha1 = hashlib.sha1(joined.encode('utf-8')).digest() - return base64.b64encode(sha1).decode('utf-8') - -def validate_genesis(path): - with open(path, 'r') as f: - data = json.load(f) - - device = data.get("device", "").strip() - timestamp = data.get("timestamp", "").strip() - message = data.get("message", "").strip() - fingerprint = data.get("fingerprint", "").strip() - mac = data.get("mac_address", "").strip() - cpu = data.get("cpu", "").strip() - - print("\nValidating genesis.json...") - errors = [] - - if not is_valid_mac(mac): - errors.append("MAC address not in known Apple ranges") - - if not is_valid_cpu(cpu): - errors.append("CPU string not recognized as retro PowerPC") - - if not is_reasonable_timestamp(timestamp): - errors.append("Timestamp is invalid or too modern") - - recalculated = recompute_hash(device, timestamp, message) - if fingerprint != recalculated: - errors.append("Fingerprint hash does not match contents") - - if errors: - print("❌ Validation Failed:") - for err in errors: - print(" -", err) - return False - else: - print("✅ Genesis is verified and authentic.") - return True - -# Example usage -if __name__ == "__main__": - import sys - if len(sys.argv) != 2: - print("Usage: python validate_genesis.py genesis.json") - else: - validate_genesis(sys.argv[1]) +# RustChain PoA Validator Script (Python) +# Validates genesis.json files from retro machines like PowerMac G4 + +import json +import base64 +import hashlib +import datetime +import re + +# Example MAC prefixes for Apple (vintage ranges) +VALID_MAC_PREFIXES = ["00:03:93", "00:0a:27", "00:05:02", "00:0d:93"] + +def is_valid_mac(mac): + prefix = mac.lower()[0:8] + return any(prefix.startswith(p.lower()) for p in VALID_MAC_PREFIXES) + +def is_valid_cpu(cpu): + return any(kw in cpu.lower() for kw in ["powerpc", "g3", "g4", "7400", "7450"]) + +def is_reasonable_timestamp(ts): + try: + parsed = datetime.datetime.strptime(ts.strip(), "%a %b %d %H:%M:%S %Y") + now = datetime.datetime.now() + if parsed < now and parsed.year >= 1984: + return True + except Exception: + pass + return False + +def recompute_hash(device, timestamp, message): + joined = f"{device}|{timestamp}|{message}" + sha1 = hashlib.sha1(joined.encode('utf-8')).digest() + return base64.b64encode(sha1).decode('utf-8') + +def validate_genesis(path): + with open(path, 'r') as f: + data = json.load(f) + + device = data.get("device", "").strip() + timestamp = data.get("timestamp", "").strip() + message = data.get("message", "").strip() + fingerprint = data.get("fingerprint", "").strip() + mac = data.get("mac_address", "").strip() + cpu = data.get("cpu", "").strip() + + print("\nValidating genesis.json...") + errors = [] + + if not is_valid_mac(mac): + errors.append("MAC address not in known Apple ranges") + + if not is_valid_cpu(cpu): + errors.append("CPU string not recognized as retro PowerPC") + + if not is_reasonable_timestamp(timestamp): + errors.append("Timestamp is invalid or too modern") + + recalculated = recompute_hash(device, timestamp, message) + if fingerprint != recalculated: + errors.append("Fingerprint hash does not match contents") + + if errors: + print("❌ Validation Failed:") + for err in errors: + print(" -", err) + return False + else: + print("✅ Genesis is verified and authentic.") + return True + +# Example usage +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print("Usage: python validate_genesis.py genesis.json") + else: + validate_genesis(sys.argv[1]) diff --git a/validator/_init_.py b/validator/_init_.py index b050dde3..b2664c7c 100644 --- a/validator/_init_.py +++ b/validator/_init_.py @@ -1,13 +1,13 @@ -# rustchain-poa/validator/__init__.py - -from .hardware_fingerprint import detect_unique_hardware_signature -from .emulation_detector import detect_emulation -from .score_calculator import calculate_score -from .validate_genesis import validate_genesis - -__all__ = [ - "detect_unique_hardware_signature", - "detect_emulation", - "calculate_score", - "validate_genesis" -] +# rustchain-poa/validator/__init__.py + +from .hardware_fingerprint import detect_unique_hardware_signature +from .emulation_detector import detect_emulation +from .score_calculator import calculate_score +from .validate_genesis import validate_genesis + +__all__ = [ + "detect_unique_hardware_signature", + "detect_emulation", + "calculate_score", + "validate_genesis" +] diff --git a/vintage_cpu_integration_example.py b/vintage_cpu_integration_example.py index 5dbdee5f..a62cc526 100644 --- a/vintage_cpu_integration_example.py +++ b/vintage_cpu_integration_example.py @@ -1,419 +1,419 @@ -#!/usr/bin/env python3 -""" -Vintage CPU Integration Example for RustChain Miner -==================================================== - -Demonstrates how to integrate vintage CPU detection into the RustChain -universal miner client and server validation. - -Usage: - python3 vintage_cpu_integration_example.py -""" - -import platform -import re -from typing import Optional, Dict, Any - -# Import both modern and vintage detection -from cpu_architecture_detection import ( - detect_cpu_architecture, - calculate_antiquity_multiplier, - CPUInfo -) -from cpu_vintage_architectures import ( - detect_vintage_architecture, - get_vintage_description -) - - -# ============================================================================= -# UNIFIED DETECTION FUNCTION -# ============================================================================= - -def detect_all_cpu_architectures(brand_string: str) -> Dict[str, Any]: - """ - Unified CPU detection - checks vintage first, then modern - - Returns a dictionary with: - - vendor: CPU vendor (intel, amd, motorola, alpha, etc.) - - architecture: Specific architecture (i386, k6, m68040, etc.) - - year: Microarchitecture release year - - base_multiplier: Antiquity multiplier - - description: Human-readable description - - is_vintage: True if vintage CPU, False if modern - """ - # Try vintage detection first (most specific patterns) - vintage_result = detect_vintage_architecture(brand_string) - - if vintage_result: - vendor, architecture, year, base_multiplier = vintage_result - description = get_vintage_description(architecture) - return { - "vendor": vendor, - "architecture": architecture, - "year": year, - "base_multiplier": base_multiplier, - "description": description, - "is_vintage": True - } - - # Fall back to modern detection - cpu_info = calculate_antiquity_multiplier(brand_string) - return { - "vendor": cpu_info.vendor, - "architecture": cpu_info.architecture, - "year": cpu_info.microarch_year, - "base_multiplier": cpu_info.antiquity_multiplier, - "description": cpu_info.generation, - "is_vintage": False, - "is_server": cpu_info.is_server - } - - -# ============================================================================= -# MINER CLIENT INTEGRATION -# ============================================================================= - -def get_cpu_brand_string() -> str: - """ - Get CPU brand string from system - - On Linux: Read /proc/cpuinfo - On Windows: Read registry - On Mac: Use sysctl - """ - system = platform.system() - - if system == "Linux": - try: - with open("/proc/cpuinfo", "r") as f: - for line in f: - if line.startswith("model name"): - return line.split(":", 1)[1].strip() - elif line.startswith("cpu"): - # For non-x86 systems (ARM, MIPS, SPARC, etc.) - cpu_line = line.split(":", 1)[1].strip() - if cpu_line and not cpu_line.isdigit(): - return cpu_line - except Exception as e: - print(f"Error reading /proc/cpuinfo: {e}") - - elif system == "Darwin": - # Mac OS X - try: - import subprocess - result = subprocess.run( - ["sysctl", "-n", "machdep.cpu.brand_string"], - capture_output=True, - text=True - ) - if result.returncode == 0: - return result.stdout.strip() - except Exception as e: - print(f"Error reading sysctl: {e}") - - elif system == "Windows": - # Windows Registry - try: - import winreg - key = winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"HARDWARE\DESCRIPTION\System\CentralProcessor\0" - ) - value, _ = winreg.QueryValueEx(key, "ProcessorNameString") - winreg.CloseKey(key) - return value.strip() - except Exception as e: - print(f"Error reading Windows registry: {e}") - - # Fallback to platform.processor() - return platform.processor() - - -def detect_hardware_for_miner() -> Dict[str, Any]: - """ - Detect hardware for RustChain miner client - - Returns device info suitable for attestation payload - """ - brand_string = get_cpu_brand_string() - cpu_info = detect_all_cpu_architectures(brand_string) - - return { - "cpu_brand": brand_string, - "device_family": cpu_info["vendor"], - "device_arch": cpu_info["architecture"], - "cpu_year": cpu_info["year"], - "expected_multiplier": cpu_info["base_multiplier"], - "is_vintage": cpu_info.get("is_vintage", False), - "is_server": cpu_info.get("is_server", False), - "description": cpu_info["description"] - } - - -# ============================================================================= -# SERVER-SIDE VALIDATION -# ============================================================================= - -def validate_cpu_claim(attestation: Dict[str, Any]) -> tuple: - """ - Server-side validation of miner's CPU claim - - Parameters: - attestation: Attestation payload from miner - - Returns: - (is_valid, reason, detected_arch, detected_multiplier) - """ - # Extract claimed device info - device = attestation.get("device", {}) - claimed_brand = device.get("cpu_brand", "") - claimed_arch = device.get("device_arch", "") - claimed_multiplier = device.get("expected_multiplier", 1.0) - - if not claimed_brand: - return (False, "missing_cpu_brand", None, 1.0) - - # Detect actual architecture from brand string - cpu_info = detect_all_cpu_architectures(claimed_brand) - detected_arch = cpu_info["architecture"] - detected_multiplier = cpu_info["base_multiplier"] - - # Validate architecture matches - if detected_arch != claimed_arch: - return ( - False, - f"arch_mismatch:claimed={claimed_arch},detected={detected_arch}", - detected_arch, - detected_multiplier - ) - - # Validate multiplier matches (allow 1% tolerance) - multiplier_diff = abs(detected_multiplier - claimed_multiplier) - if multiplier_diff > 0.01: - return ( - False, - f"multiplier_mismatch:claimed={claimed_multiplier},detected={detected_multiplier}", - detected_arch, - detected_multiplier - ) - - return (True, "valid", detected_arch, detected_multiplier) - - -# ============================================================================= -# TIME DECAY APPLICATION -# ============================================================================= - -def apply_time_decay( - base_multiplier: float, - cpu_year: int, - genesis_timestamp: int = 1764706927, # RustChain genesis (Dec 2, 2025) -) -> float: - """ - Apply time decay to vintage bonuses - - Vintage hardware (>5 years old): 15% decay per year of chain operation - Modern hardware (<5 years old): Eligible for loyalty bonus (not in this function) - - Parameters: - base_multiplier: Base antiquity multiplier from detection - cpu_year: Year CPU microarchitecture was released - genesis_timestamp: Unix timestamp of chain genesis - - Returns: - Decayed multiplier (minimum 1.0) - """ - import time - from datetime import datetime - - # Current date - current_year = datetime.now().year - hardware_age = current_year - cpu_year - - # Only apply decay to vintage hardware (>5 years old) - if hardware_age <= 5 or base_multiplier <= 1.0: - return base_multiplier - - # Calculate years since chain genesis - current_timestamp = int(time.time()) - chain_age_seconds = current_timestamp - genesis_timestamp - chain_age_years = chain_age_seconds / (365.25 * 24 * 3600) - - # Apply 15% decay per year of chain operation - # Formula: aged = 1.0 + (base - 1.0) * (1 - 0.15 * chain_age_years) - # Full decay after ~6.67 years (vintage bonus → 0) - decay_factor = max(0.0, 1.0 - (0.15 * chain_age_years)) - vintage_bonus = base_multiplier - 1.0 - final_multiplier = max(1.0, 1.0 + (vintage_bonus * decay_factor)) - - return round(final_multiplier, 4) - - -# ============================================================================= -# DIFFICULTY ADJUSTMENT FOR VINTAGE HARDWARE -# ============================================================================= - -def adjust_difficulty_for_vintage( - base_difficulty: float, - cpu_info: Dict[str, Any] -) -> float: - """ - Adjust mining difficulty for vintage hardware - - Vintage CPUs are slow and may overheat/fail with modern difficulty. - Apply difficulty reduction based on CPU age. - - Parameters: - base_difficulty: Base mining difficulty - cpu_info: CPU info from detect_all_cpu_architectures() - - Returns: - Adjusted difficulty (lower for vintage hardware) - """ - cpu_year = cpu_info.get("year", 2025) - current_year = 2025 # Or use datetime.now().year - age = current_year - cpu_year - - if age <= 10: - return base_difficulty # Modern hardware, no adjustment - - # Apply difficulty reduction - # 11-15 years: 10x easier - # 16-20 years: 100x easier - # 21-25 years: 1000x easier - # 26+ years: 10000x easier - if age <= 15: - return base_difficulty * 0.1 - elif age <= 20: - return base_difficulty * 0.01 - elif age <= 25: - return base_difficulty * 0.001 - else: - return base_difficulty * 0.0001 - - -# ============================================================================= -# DEMO/TEST CODE -# ============================================================================= - -def demo(): - """Demo vintage CPU integration""" - print("=" * 80) - print("VINTAGE CPU INTEGRATION DEMO") - print("=" * 80) - print() - - # Test CPUs (mix of vintage and modern) - test_cpus = [ - # Vintage - "Intel 80386DX @ 33MHz", - "MC68040 @ 33MHz", - "Alpha 21064 @ 150MHz", - "AMD K6-2 350MHz", - "Intel(R) Pentium(R) III CPU 1000MHz", - "Cyrix 6x86MX PR200", - "VIA C3 Samuel 2 800MHz", - "Transmeta Crusoe TM5800", - - # Modern - "Intel(R) Core(TM) i7-2600K CPU @ 3.40GHz", - "AMD Ryzen 9 7950X 16-Core Processor", - "Apple M1", - "PowerPC G4 (7450)", - ] - - print("1. UNIFIED DETECTION TEST") - print("-" * 80) - for cpu_brand in test_cpus: - cpu_info = detect_all_cpu_architectures(cpu_brand) - vintage_tag = "[VINTAGE]" if cpu_info.get("is_vintage") else "[MODERN]" - - print(f"{vintage_tag} {cpu_brand}") - print(f" → {cpu_info['vendor']:15s} {cpu_info['architecture']:20s}") - print(f" → Year: {cpu_info['year']:4d} | Multiplier: {cpu_info['base_multiplier']}x") - print(f" → {cpu_info['description']}") - print() - - print("=" * 80) - print("2. MINER CLIENT SIMULATION") - print("-" * 80) - - # Detect local CPU - local_hardware = detect_hardware_for_miner() - print("Local Hardware Detection:") - print(f" CPU Brand: {local_hardware['cpu_brand']}") - print(f" Device Family: {local_hardware['device_family']}") - print(f" Architecture: {local_hardware['device_arch']}") - print(f" Year: {local_hardware['cpu_year']}") - print(f" Base Multiplier: {local_hardware['expected_multiplier']}x") - print(f" Vintage: {local_hardware['is_vintage']}") - print(f" Description: {local_hardware['description']}") - print() - - # Simulate attestation payload - attestation_payload = { - "miner": "test-wallet-address", - "device": local_hardware, - "nonce": 123456789, - # ... other fields - } - - print("=" * 80) - print("3. SERVER-SIDE VALIDATION SIMULATION") - print("-" * 80) - - # Validate the attestation - is_valid, reason, detected_arch, detected_mult = validate_cpu_claim(attestation_payload) - - print(f"Validation Result: {'✅ VALID' if is_valid else '❌ INVALID'}") - print(f"Reason: {reason}") - print(f"Detected Architecture: {detected_arch}") - print(f"Detected Multiplier: {detected_mult}x") - print() - - print("=" * 80) - print("4. TIME DECAY SIMULATION") - print("-" * 80) - - # Test time decay on vintage CPUs - vintage_test_cases = [ - ("Intel 80386DX", 3.0, 1985), - ("MC68040", 2.4, 1990), - ("Pentium III", 2.0, 1999), - ("AMD K6-2", 2.2, 1997), - ] - - print("Simulating decay at different chain ages:") - print() - - for cpu_name, base_mult, year in vintage_test_cases: - print(f"{cpu_name} ({year}, base {base_mult}x):") - for chain_years in [0, 1, 3, 5, 10]: - # Simulate chain age by adjusting genesis timestamp - genesis = int(1764706927 - (chain_years * 365.25 * 24 * 3600)) - decayed = apply_time_decay(base_mult, year, genesis) - print(f" Chain age {chain_years:2d} years → {decayed:.4f}x") - print() - - print("=" * 80) - print("5. DIFFICULTY ADJUSTMENT SIMULATION") - print("-" * 80) - - base_difficulty = 1000.0 - print(f"Base Mining Difficulty: {base_difficulty}") - print() - - for cpu_brand in test_cpus[:6]: # Just vintage CPUs - cpu_info = detect_all_cpu_architectures(cpu_brand) - adjusted = adjust_difficulty_for_vintage(base_difficulty, cpu_info) - age = 2025 - cpu_info["year"] - reduction = base_difficulty / adjusted if adjusted > 0 else 1 - - print(f"{cpu_brand}") - print(f" Age: {age} years | Adjusted: {adjusted:.2f} ({reduction:.0f}x easier)") - print() - - -if __name__ == "__main__": - demo() +#!/usr/bin/env python3 +""" +Vintage CPU Integration Example for RustChain Miner +==================================================== + +Demonstrates how to integrate vintage CPU detection into the RustChain +universal miner client and server validation. + +Usage: + python3 vintage_cpu_integration_example.py +""" + +import platform +import re +from typing import Optional, Dict, Any + +# Import both modern and vintage detection +from cpu_architecture_detection import ( + detect_cpu_architecture, + calculate_antiquity_multiplier, + CPUInfo +) +from cpu_vintage_architectures import ( + detect_vintage_architecture, + get_vintage_description +) + + +# ============================================================================= +# UNIFIED DETECTION FUNCTION +# ============================================================================= + +def detect_all_cpu_architectures(brand_string: str) -> Dict[str, Any]: + """ + Unified CPU detection - checks vintage first, then modern + + Returns a dictionary with: + - vendor: CPU vendor (intel, amd, motorola, alpha, etc.) + - architecture: Specific architecture (i386, k6, m68040, etc.) + - year: Microarchitecture release year + - base_multiplier: Antiquity multiplier + - description: Human-readable description + - is_vintage: True if vintage CPU, False if modern + """ + # Try vintage detection first (most specific patterns) + vintage_result = detect_vintage_architecture(brand_string) + + if vintage_result: + vendor, architecture, year, base_multiplier = vintage_result + description = get_vintage_description(architecture) + return { + "vendor": vendor, + "architecture": architecture, + "year": year, + "base_multiplier": base_multiplier, + "description": description, + "is_vintage": True + } + + # Fall back to modern detection + cpu_info = calculate_antiquity_multiplier(brand_string) + return { + "vendor": cpu_info.vendor, + "architecture": cpu_info.architecture, + "year": cpu_info.microarch_year, + "base_multiplier": cpu_info.antiquity_multiplier, + "description": cpu_info.generation, + "is_vintage": False, + "is_server": cpu_info.is_server + } + + +# ============================================================================= +# MINER CLIENT INTEGRATION +# ============================================================================= + +def get_cpu_brand_string() -> str: + """ + Get CPU brand string from system + + On Linux: Read /proc/cpuinfo + On Windows: Read registry + On Mac: Use sysctl + """ + system = platform.system() + + if system == "Linux": + try: + with open("/proc/cpuinfo", "r") as f: + for line in f: + if line.startswith("model name"): + return line.split(":", 1)[1].strip() + elif line.startswith("cpu"): + # For non-x86 systems (ARM, MIPS, SPARC, etc.) + cpu_line = line.split(":", 1)[1].strip() + if cpu_line and not cpu_line.isdigit(): + return cpu_line + except Exception as e: + print(f"Error reading /proc/cpuinfo: {e}") + + elif system == "Darwin": + # Mac OS X + try: + import subprocess + result = subprocess.run( + ["sysctl", "-n", "machdep.cpu.brand_string"], + capture_output=True, + text=True + ) + if result.returncode == 0: + return result.stdout.strip() + except Exception as e: + print(f"Error reading sysctl: {e}") + + elif system == "Windows": + # Windows Registry + try: + import winreg + key = winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, + r"HARDWARE\DESCRIPTION\System\CentralProcessor\0" + ) + value, _ = winreg.QueryValueEx(key, "ProcessorNameString") + winreg.CloseKey(key) + return value.strip() + except Exception as e: + print(f"Error reading Windows registry: {e}") + + # Fallback to platform.processor() + return platform.processor() + + +def detect_hardware_for_miner() -> Dict[str, Any]: + """ + Detect hardware for RustChain miner client + + Returns device info suitable for attestation payload + """ + brand_string = get_cpu_brand_string() + cpu_info = detect_all_cpu_architectures(brand_string) + + return { + "cpu_brand": brand_string, + "device_family": cpu_info["vendor"], + "device_arch": cpu_info["architecture"], + "cpu_year": cpu_info["year"], + "expected_multiplier": cpu_info["base_multiplier"], + "is_vintage": cpu_info.get("is_vintage", False), + "is_server": cpu_info.get("is_server", False), + "description": cpu_info["description"] + } + + +# ============================================================================= +# SERVER-SIDE VALIDATION +# ============================================================================= + +def validate_cpu_claim(attestation: Dict[str, Any]) -> tuple: + """ + Server-side validation of miner's CPU claim + + Parameters: + attestation: Attestation payload from miner + + Returns: + (is_valid, reason, detected_arch, detected_multiplier) + """ + # Extract claimed device info + device = attestation.get("device", {}) + claimed_brand = device.get("cpu_brand", "") + claimed_arch = device.get("device_arch", "") + claimed_multiplier = device.get("expected_multiplier", 1.0) + + if not claimed_brand: + return (False, "missing_cpu_brand", None, 1.0) + + # Detect actual architecture from brand string + cpu_info = detect_all_cpu_architectures(claimed_brand) + detected_arch = cpu_info["architecture"] + detected_multiplier = cpu_info["base_multiplier"] + + # Validate architecture matches + if detected_arch != claimed_arch: + return ( + False, + f"arch_mismatch:claimed={claimed_arch},detected={detected_arch}", + detected_arch, + detected_multiplier + ) + + # Validate multiplier matches (allow 1% tolerance) + multiplier_diff = abs(detected_multiplier - claimed_multiplier) + if multiplier_diff > 0.01: + return ( + False, + f"multiplier_mismatch:claimed={claimed_multiplier},detected={detected_multiplier}", + detected_arch, + detected_multiplier + ) + + return (True, "valid", detected_arch, detected_multiplier) + + +# ============================================================================= +# TIME DECAY APPLICATION +# ============================================================================= + +def apply_time_decay( + base_multiplier: float, + cpu_year: int, + genesis_timestamp: int = 1764706927, # RustChain genesis (Dec 2, 2025) +) -> float: + """ + Apply time decay to vintage bonuses + + Vintage hardware (>5 years old): 15% decay per year of chain operation + Modern hardware (<5 years old): Eligible for loyalty bonus (not in this function) + + Parameters: + base_multiplier: Base antiquity multiplier from detection + cpu_year: Year CPU microarchitecture was released + genesis_timestamp: Unix timestamp of chain genesis + + Returns: + Decayed multiplier (minimum 1.0) + """ + import time + from datetime import datetime + + # Current date + current_year = datetime.now().year + hardware_age = current_year - cpu_year + + # Only apply decay to vintage hardware (>5 years old) + if hardware_age <= 5 or base_multiplier <= 1.0: + return base_multiplier + + # Calculate years since chain genesis + current_timestamp = int(time.time()) + chain_age_seconds = current_timestamp - genesis_timestamp + chain_age_years = chain_age_seconds / (365.25 * 24 * 3600) + + # Apply 15% decay per year of chain operation + # Formula: aged = 1.0 + (base - 1.0) * (1 - 0.15 * chain_age_years) + # Full decay after ~6.67 years (vintage bonus → 0) + decay_factor = max(0.0, 1.0 - (0.15 * chain_age_years)) + vintage_bonus = base_multiplier - 1.0 + final_multiplier = max(1.0, 1.0 + (vintage_bonus * decay_factor)) + + return round(final_multiplier, 4) + + +# ============================================================================= +# DIFFICULTY ADJUSTMENT FOR VINTAGE HARDWARE +# ============================================================================= + +def adjust_difficulty_for_vintage( + base_difficulty: float, + cpu_info: Dict[str, Any] +) -> float: + """ + Adjust mining difficulty for vintage hardware + + Vintage CPUs are slow and may overheat/fail with modern difficulty. + Apply difficulty reduction based on CPU age. + + Parameters: + base_difficulty: Base mining difficulty + cpu_info: CPU info from detect_all_cpu_architectures() + + Returns: + Adjusted difficulty (lower for vintage hardware) + """ + cpu_year = cpu_info.get("year", 2025) + current_year = 2025 # Or use datetime.now().year + age = current_year - cpu_year + + if age <= 10: + return base_difficulty # Modern hardware, no adjustment + + # Apply difficulty reduction + # 11-15 years: 10x easier + # 16-20 years: 100x easier + # 21-25 years: 1000x easier + # 26+ years: 10000x easier + if age <= 15: + return base_difficulty * 0.1 + elif age <= 20: + return base_difficulty * 0.01 + elif age <= 25: + return base_difficulty * 0.001 + else: + return base_difficulty * 0.0001 + + +# ============================================================================= +# DEMO/TEST CODE +# ============================================================================= + +def demo(): + """Demo vintage CPU integration""" + print("=" * 80) + print("VINTAGE CPU INTEGRATION DEMO") + print("=" * 80) + print() + + # Test CPUs (mix of vintage and modern) + test_cpus = [ + # Vintage + "Intel 80386DX @ 33MHz", + "MC68040 @ 33MHz", + "Alpha 21064 @ 150MHz", + "AMD K6-2 350MHz", + "Intel(R) Pentium(R) III CPU 1000MHz", + "Cyrix 6x86MX PR200", + "VIA C3 Samuel 2 800MHz", + "Transmeta Crusoe TM5800", + + # Modern + "Intel(R) Core(TM) i7-2600K CPU @ 3.40GHz", + "AMD Ryzen 9 7950X 16-Core Processor", + "Apple M1", + "PowerPC G4 (7450)", + ] + + print("1. UNIFIED DETECTION TEST") + print("-" * 80) + for cpu_brand in test_cpus: + cpu_info = detect_all_cpu_architectures(cpu_brand) + vintage_tag = "[VINTAGE]" if cpu_info.get("is_vintage") else "[MODERN]" + + print(f"{vintage_tag} {cpu_brand}") + print(f" → {cpu_info['vendor']:15s} {cpu_info['architecture']:20s}") + print(f" → Year: {cpu_info['year']:4d} | Multiplier: {cpu_info['base_multiplier']}x") + print(f" → {cpu_info['description']}") + print() + + print("=" * 80) + print("2. MINER CLIENT SIMULATION") + print("-" * 80) + + # Detect local CPU + local_hardware = detect_hardware_for_miner() + print("Local Hardware Detection:") + print(f" CPU Brand: {local_hardware['cpu_brand']}") + print(f" Device Family: {local_hardware['device_family']}") + print(f" Architecture: {local_hardware['device_arch']}") + print(f" Year: {local_hardware['cpu_year']}") + print(f" Base Multiplier: {local_hardware['expected_multiplier']}x") + print(f" Vintage: {local_hardware['is_vintage']}") + print(f" Description: {local_hardware['description']}") + print() + + # Simulate attestation payload + attestation_payload = { + "miner": "test-wallet-address", + "device": local_hardware, + "nonce": 123456789, + # ... other fields + } + + print("=" * 80) + print("3. SERVER-SIDE VALIDATION SIMULATION") + print("-" * 80) + + # Validate the attestation + is_valid, reason, detected_arch, detected_mult = validate_cpu_claim(attestation_payload) + + print(f"Validation Result: {'✅ VALID' if is_valid else '❌ INVALID'}") + print(f"Reason: {reason}") + print(f"Detected Architecture: {detected_arch}") + print(f"Detected Multiplier: {detected_mult}x") + print() + + print("=" * 80) + print("4. TIME DECAY SIMULATION") + print("-" * 80) + + # Test time decay on vintage CPUs + vintage_test_cases = [ + ("Intel 80386DX", 3.0, 1985), + ("MC68040", 2.4, 1990), + ("Pentium III", 2.0, 1999), + ("AMD K6-2", 2.2, 1997), + ] + + print("Simulating decay at different chain ages:") + print() + + for cpu_name, base_mult, year in vintage_test_cases: + print(f"{cpu_name} ({year}, base {base_mult}x):") + for chain_years in [0, 1, 3, 5, 10]: + # Simulate chain age by adjusting genesis timestamp + genesis = int(1764706927 - (chain_years * 365.25 * 24 * 3600)) + decayed = apply_time_decay(base_mult, year, genesis) + print(f" Chain age {chain_years:2d} years → {decayed:.4f}x") + print() + + print("=" * 80) + print("5. DIFFICULTY ADJUSTMENT SIMULATION") + print("-" * 80) + + base_difficulty = 1000.0 + print(f"Base Mining Difficulty: {base_difficulty}") + print() + + for cpu_brand in test_cpus[:6]: # Just vintage CPUs + cpu_info = detect_all_cpu_architectures(cpu_brand) + adjusted = adjust_difficulty_for_vintage(base_difficulty, cpu_info) + age = 2025 - cpu_info["year"] + reduction = base_difficulty / adjusted if adjusted > 0 else 1 + + print(f"{cpu_brand}") + print(f" Age: {age} years | Adjusted: {adjusted:.2f} ({reduction:.0f}x easier)") + print() + + +if __name__ == "__main__": + demo() diff --git a/wallet/coinbase_wallet.py b/wallet/coinbase_wallet.py index ca447f1d..89df5e4a 100644 --- a/wallet/coinbase_wallet.py +++ b/wallet/coinbase_wallet.py @@ -1,237 +1,237 @@ -""" -ClawRTC Coinbase Wallet Integration -Optional module for creating/managing Coinbase Base wallets. - -Install with: pip install clawrtc[coinbase] -""" - -import json -import os -import sys - -# ANSI colors (match cli.py) -CYAN = "\033[36m" -GREEN = "\033[32m" -RED = "\033[31m" -YELLOW = "\033[33m" -BOLD = "\033[1m" -DIM = "\033[2m" -NC = "\033[0m" - +""" +ClawRTC Coinbase Wallet Integration +Optional module for creating/managing Coinbase Base wallets. + +Install with: pip install clawrtc[coinbase] +""" + +import json +import os +import sys + +# ANSI colors (match cli.py) +CYAN = "\033[36m" +GREEN = "\033[32m" +RED = "\033[31m" +YELLOW = "\033[33m" +BOLD = "\033[1m" +DIM = "\033[2m" +NC = "\033[0m" + # Current public RustChain host. Older helper builds referenced a retired # metalseed hostname, which can surface as a false "could not reach network" # error even when the public node is healthy. NODE_URL = "https://rustchain.org" - -SWAP_INFO = { - "wrtc_contract": "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", - "usdc_contract": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913", - "aerodrome_pool": "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F", - "swap_url": "https://aerodrome.finance/swap?from=0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913&to=0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", - "network": "Base (eip155:8453)", - "reference_price_usd": 0.10, -} - -INSTALL_DIR = os.path.join(os.path.expanduser("~"), ".clawrtc") -COINBASE_FILE = os.path.join(INSTALL_DIR, "coinbase_wallet.json") - - -def _load_coinbase_wallet(): - """Load saved Coinbase wallet data.""" - if not os.path.exists(COINBASE_FILE): - return None - try: - with open(COINBASE_FILE) as f: - return json.load(f) - except (json.JSONDecodeError, IOError): - return None - - -def _save_coinbase_wallet(data): - """Save Coinbase wallet data to disk.""" - os.makedirs(INSTALL_DIR, exist_ok=True) - with open(COINBASE_FILE, "w") as f: - json.dump(data, f, indent=2) - os.chmod(COINBASE_FILE, 0o600) - - -def coinbase_create(args): - """Create a Coinbase Base wallet via AgentKit.""" - existing = _load_coinbase_wallet() - if existing and not getattr(args, "force", False): - print(f"\n {YELLOW}You already have a Coinbase wallet:{NC}") - print(f" {GREEN}{BOLD}{existing['address']}{NC}") - print(f" Network: {existing.get('network', 'Base')}") - print(f"\n To create a new one: clawrtc wallet coinbase create --force\n") - return - - # Check for CDP credentials - cdp_key_name = os.environ.get("CDP_API_KEY_NAME", "") - cdp_key_private = os.environ.get("CDP_API_KEY_PRIVATE_KEY", "") - - if not cdp_key_name or not cdp_key_private: - print(f""" - {YELLOW}Coinbase CDP credentials not configured.{NC} - - To create a wallet automatically: - 1. Sign up at {CYAN}https://portal.cdp.coinbase.com{NC} - 2. Create an API Key - 3. Set environment variables: - export CDP_API_KEY_NAME="organizations/.../apiKeys/..." - export CDP_API_KEY_PRIVATE_KEY="-----BEGIN EC PRIVATE KEY-----..." - - Or link an existing Base address manually: - clawrtc wallet coinbase link 0xYourBaseAddress -""") - return - - try: - from coinbase_agentkit import AgentKit, AgentKitConfig - - print(f" {CYAN}Creating Coinbase wallet on Base...{NC}") - - config = AgentKitConfig( - cdp_api_key_name=cdp_key_name, - cdp_api_key_private_key=cdp_key_private, - network_id="base-mainnet", - ) - kit = AgentKit(config) - wallet = kit.wallet - address = wallet.default_address.address_id - - wallet_data = { - "address": address, - "network": "Base (eip155:8453)", - "created": __import__("time").strftime("%Y-%m-%dT%H:%M:%SZ", __import__("time").gmtime()), - "method": "agentkit", - } - _save_coinbase_wallet(wallet_data) - - print(f""" - {GREEN}{BOLD}═══════════════════════════════════════════════════════════ - COINBASE BASE WALLET CREATED - ═══════════════════════════════════════════════════════════{NC} - - {GREEN}Base Address:{NC} {BOLD}{address}{NC} - {DIM}Network:{NC} Base (eip155:8453) - {DIM}Saved to:{NC} {COINBASE_FILE} - - {CYAN}What you can do:{NC} - - Receive USDC payments via x402 protocol - - Swap USDC → wRTC on Aerodrome DEX - - Link to your RustChain miner for cross-chain identity - - See swap info: clawrtc wallet coinbase swap-info -""") - except ImportError: - print(f""" - {RED}coinbase-agentkit not installed.{NC} - - Install it with: - pip install clawrtc[coinbase] - - Or: pip install coinbase-agentkit -""") - except Exception as e: - print(f"\n {RED}Failed to create wallet: {e}{NC}\n") - - -def coinbase_show(args): - """Show Coinbase Base wallet info.""" - wallet = _load_coinbase_wallet() - if not wallet: - print(f"\n {YELLOW}No Coinbase wallet found.{NC}") - print(f" Create one: clawrtc wallet coinbase create") - print(f" Or link: clawrtc wallet coinbase link 0xYourAddress\n") - return - - print(f"\n {GREEN}{BOLD}Coinbase Base Wallet{NC}") - print(f" {GREEN}Address:{NC} {BOLD}{wallet['address']}{NC}") - print(f" {DIM}Network:{NC} {DIM}{wallet.get('network', 'Base')}{NC}") - print(f" {DIM}Created:{NC} {DIM}{wallet.get('created', 'unknown')}{NC}") - print(f" {DIM}Method:{NC} {DIM}{wallet.get('method', 'unknown')}{NC}") - print(f" {DIM}Key File:{NC} {DIM}{COINBASE_FILE}{NC}") - print() - - -def coinbase_link(args): - """Link an existing Base address as your Coinbase wallet.""" - address = getattr(args, "base_address", "") - if not address: - print(f"\n {YELLOW}Usage: clawrtc wallet coinbase link 0xYourBaseAddress{NC}\n") - return - - if not address.startswith("0x") or len(address) != 42: - print(f"\n {RED}Invalid Base address. Must be 0x + 40 hex characters.{NC}\n") - return - - wallet_data = { - "address": address, - "network": "Base (eip155:8453)", - "created": __import__("time").strftime("%Y-%m-%dT%H:%M:%SZ", __import__("time").gmtime()), - "method": "manual_link", - } - _save_coinbase_wallet(wallet_data) - - print(f"\n {GREEN}Coinbase wallet linked:{NC} {BOLD}{address}{NC}") - print(f" {DIM}Saved to: {COINBASE_FILE}{NC}") - - # Also try to link to RustChain miner - rtc_wallet_file = os.path.join(INSTALL_DIR, "wallets", "default.json") - if os.path.exists(rtc_wallet_file): - try: - with open(rtc_wallet_file) as f: - rtc = json.load(f) - print(f" {DIM}Linked to RTC wallet: {rtc['address']}{NC}") - except Exception: - pass - print() - - -def coinbase_swap_info(args): - """Show USDC→wRTC swap instructions and Aerodrome pool info.""" - print(f""" - {GREEN}{BOLD}USDC → wRTC Swap Guide{NC} - - {CYAN}wRTC Contract (Base):{NC} - {BOLD}{SWAP_INFO['wrtc_contract']}{NC} - - {CYAN}USDC Contract (Base):{NC} - {BOLD}{SWAP_INFO['usdc_contract']}{NC} - - {CYAN}Aerodrome Pool:{NC} - {BOLD}{SWAP_INFO['aerodrome_pool']}{NC} - - {CYAN}Swap URL:{NC} - {BOLD}{SWAP_INFO['swap_url']}{NC} - - {CYAN}Network:{NC} {SWAP_INFO['network']} - {CYAN}Reference Price:{NC} ~${SWAP_INFO['reference_price_usd']}/wRTC - - {GREEN}How to swap:{NC} - 1. Get USDC on Base (bridge from Ethereum or buy on Coinbase) - 2. Go to the Aerodrome swap URL above - 3. Connect your wallet (MetaMask, Coinbase Wallet, etc.) - 4. Swap USDC for wRTC - 5. Bridge wRTC to native RTC at https://bottube.ai/bridge - + +SWAP_INFO = { + "wrtc_contract": "0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", + "usdc_contract": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913", + "aerodrome_pool": "0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F", + "swap_url": "https://aerodrome.finance/swap?from=0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913&to=0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6", + "network": "Base (eip155:8453)", + "reference_price_usd": 0.10, +} + +INSTALL_DIR = os.path.join(os.path.expanduser("~"), ".clawrtc") +COINBASE_FILE = os.path.join(INSTALL_DIR, "coinbase_wallet.json") + + +def _load_coinbase_wallet(): + """Load saved Coinbase wallet data.""" + if not os.path.exists(COINBASE_FILE): + return None + try: + with open(COINBASE_FILE) as f: + return json.load(f) + except (json.JSONDecodeError, IOError): + return None + + +def _save_coinbase_wallet(data): + """Save Coinbase wallet data to disk.""" + os.makedirs(INSTALL_DIR, exist_ok=True) + with open(COINBASE_FILE, "w") as f: + json.dump(data, f, indent=2) + os.chmod(COINBASE_FILE, 0o600) + + +def coinbase_create(args): + """Create a Coinbase Base wallet via AgentKit.""" + existing = _load_coinbase_wallet() + if existing and not getattr(args, "force", False): + print(f"\n {YELLOW}You already have a Coinbase wallet:{NC}") + print(f" {GREEN}{BOLD}{existing['address']}{NC}") + print(f" Network: {existing.get('network', 'Base')}") + print(f"\n To create a new one: clawrtc wallet coinbase create --force\n") + return + + # Check for CDP credentials + cdp_key_name = os.environ.get("CDP_API_KEY_NAME", "") + cdp_key_private = os.environ.get("CDP_API_KEY_PRIVATE_KEY", "") + + if not cdp_key_name or not cdp_key_private: + print(f""" + {YELLOW}Coinbase CDP credentials not configured.{NC} + + To create a wallet automatically: + 1. Sign up at {CYAN}https://portal.cdp.coinbase.com{NC} + 2. Create an API Key + 3. Set environment variables: + export CDP_API_KEY_NAME="organizations/.../apiKeys/..." + export CDP_API_KEY_PRIVATE_KEY="-----BEGIN EC PRIVATE KEY-----..." + + Or link an existing Base address manually: + clawrtc wallet coinbase link 0xYourBaseAddress +""") + return + + try: + from coinbase_agentkit import AgentKit, AgentKitConfig + + print(f" {CYAN}Creating Coinbase wallet on Base...{NC}") + + config = AgentKitConfig( + cdp_api_key_name=cdp_key_name, + cdp_api_key_private_key=cdp_key_private, + network_id="base-mainnet", + ) + kit = AgentKit(config) + wallet = kit.wallet + address = wallet.default_address.address_id + + wallet_data = { + "address": address, + "network": "Base (eip155:8453)", + "created": __import__("time").strftime("%Y-%m-%dT%H:%M:%SZ", __import__("time").gmtime()), + "method": "agentkit", + } + _save_coinbase_wallet(wallet_data) + + print(f""" + {GREEN}{BOLD}═══════════════════════════════════════════════════════════ + COINBASE BASE WALLET CREATED + ═══════════════════════════════════════════════════════════{NC} + + {GREEN}Base Address:{NC} {BOLD}{address}{NC} + {DIM}Network:{NC} Base (eip155:8453) + {DIM}Saved to:{NC} {COINBASE_FILE} + + {CYAN}What you can do:{NC} + - Receive USDC payments via x402 protocol + - Swap USDC → wRTC on Aerodrome DEX + - Link to your RustChain miner for cross-chain identity + - See swap info: clawrtc wallet coinbase swap-info +""") + except ImportError: + print(f""" + {RED}coinbase-agentkit not installed.{NC} + + Install it with: + pip install clawrtc[coinbase] + + Or: pip install coinbase-agentkit +""") + except Exception as e: + print(f"\n {RED}Failed to create wallet: {e}{NC}\n") + + +def coinbase_show(args): + """Show Coinbase Base wallet info.""" + wallet = _load_coinbase_wallet() + if not wallet: + print(f"\n {YELLOW}No Coinbase wallet found.{NC}") + print(f" Create one: clawrtc wallet coinbase create") + print(f" Or link: clawrtc wallet coinbase link 0xYourAddress\n") + return + + print(f"\n {GREEN}{BOLD}Coinbase Base Wallet{NC}") + print(f" {GREEN}Address:{NC} {BOLD}{wallet['address']}{NC}") + print(f" {DIM}Network:{NC} {DIM}{wallet.get('network', 'Base')}{NC}") + print(f" {DIM}Created:{NC} {DIM}{wallet.get('created', 'unknown')}{NC}") + print(f" {DIM}Method:{NC} {DIM}{wallet.get('method', 'unknown')}{NC}") + print(f" {DIM}Key File:{NC} {DIM}{COINBASE_FILE}{NC}") + print() + + +def coinbase_link(args): + """Link an existing Base address as your Coinbase wallet.""" + address = getattr(args, "base_address", "") + if not address: + print(f"\n {YELLOW}Usage: clawrtc wallet coinbase link 0xYourBaseAddress{NC}\n") + return + + if not address.startswith("0x") or len(address) != 42: + print(f"\n {RED}Invalid Base address. Must be 0x + 40 hex characters.{NC}\n") + return + + wallet_data = { + "address": address, + "network": "Base (eip155:8453)", + "created": __import__("time").strftime("%Y-%m-%dT%H:%M:%SZ", __import__("time").gmtime()), + "method": "manual_link", + } + _save_coinbase_wallet(wallet_data) + + print(f"\n {GREEN}Coinbase wallet linked:{NC} {BOLD}{address}{NC}") + print(f" {DIM}Saved to: {COINBASE_FILE}{NC}") + + # Also try to link to RustChain miner + rtc_wallet_file = os.path.join(INSTALL_DIR, "wallets", "default.json") + if os.path.exists(rtc_wallet_file): + try: + with open(rtc_wallet_file) as f: + rtc = json.load(f) + print(f" {DIM}Linked to RTC wallet: {rtc['address']}{NC}") + except Exception: + pass + print() + + +def coinbase_swap_info(args): + """Show USDC→wRTC swap instructions and Aerodrome pool info.""" + print(f""" + {GREEN}{BOLD}USDC → wRTC Swap Guide{NC} + + {CYAN}wRTC Contract (Base):{NC} + {BOLD}{SWAP_INFO['wrtc_contract']}{NC} + + {CYAN}USDC Contract (Base):{NC} + {BOLD}{SWAP_INFO['usdc_contract']}{NC} + + {CYAN}Aerodrome Pool:{NC} + {BOLD}{SWAP_INFO['aerodrome_pool']}{NC} + + {CYAN}Swap URL:{NC} + {BOLD}{SWAP_INFO['swap_url']}{NC} + + {CYAN}Network:{NC} {SWAP_INFO['network']} + {CYAN}Reference Price:{NC} ~${SWAP_INFO['reference_price_usd']}/wRTC + + {GREEN}How to swap:{NC} + 1. Get USDC on Base (bridge from Ethereum or buy on Coinbase) + 2. Go to the Aerodrome swap URL above + 3. Connect your wallet (MetaMask, Coinbase Wallet, etc.) + 4. Swap USDC for wRTC + 5. Bridge wRTC to native RTC at https://bottube.ai/bridge + {DIM}Or use the RustChain API:{NC} curl -s {NODE_URL}/wallet/swap-info """) - - -def cmd_coinbase(args): - """Handle clawrtc wallet coinbase subcommand.""" - action = getattr(args, "coinbase_action", None) or "show" - - dispatch = { - "create": coinbase_create, - "show": coinbase_show, - "link": coinbase_link, - "swap-info": coinbase_swap_info, - } - - func = dispatch.get(action) - if func: - func(args) - else: - print(f" Usage: clawrtc wallet coinbase [create|show|link|swap-info]") + + +def cmd_coinbase(args): + """Handle clawrtc wallet coinbase subcommand.""" + action = getattr(args, "coinbase_action", None) or "show" + + dispatch = { + "create": coinbase_create, + "show": coinbase_show, + "link": coinbase_link, + "swap-info": coinbase_swap_info, + } + + func = dispatch.get(action) + if func: + func(args) + else: + print(f" Usage: clawrtc wallet coinbase [create|show|link|swap-info]") diff --git a/web/wallets.html b/web/wallets.html index 111e6c18..99a3cd28 100644 --- a/web/wallets.html +++ b/web/wallets.html @@ -1,378 +1,378 @@ - - - - - - Agent Wallets | RustChain - - - - - - - - -
-
- - - - -
- - -
-
- $ cat /docs/agent-wallets.md -
- -
-

- Agent Wallets + x402 Payments -

-

- Every AI agent in the RustChain ecosystem can own a Coinbase Base wallet - and make machine-to-machine payments using the x402 protocol. - USDC on Base, swappable to wRTC on Aerodrome. -

-
-
- - -
-
- $ explain --flow -
- -
-

Payment Flow

-
-
- 🤖 - Agent
- Requests premium API -
- -
- 402 - Server
- Returns payment requirements -
- -
- 💳 - Base Chain
- USDC payment on-chain -
- -
- - Access
- Premium data returned -
-
-

- The x402 protocol (HTTP 402 Payment Required) enables machine-to-machine payments without API keys or subscriptions. -

-
-
- - -
-
- $ quickstart --wallets -
- -
-
-

Option 1: ClawRTC CLI

-

Create a wallet from the command line:

-
-
pip install clawrtc[coinbase]
-clawrtc wallet coinbase create
-clawrtc wallet coinbase show
-clawrtc wallet coinbase swap-info
- [click to copy] -
-

- Requires CDP credentials from - portal.cdp.coinbase.com -

-
- -
-

Option 2: Manual Link

-

Already have a Base wallet? Link it directly:

-
-
# Link to your BoTTube agent
-curl -X POST https://bottube.ai/api/agents/me/coinbase-wallet \
-  -H "X-API-Key: YOUR_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{"coinbase_address": "0xYourBase..."}'
- [click to copy] -
- -
-
# Or via ClawRTC CLI
-clawrtc wallet coinbase link 0xYourBaseAddress
- [click to copy] -
-
- -
-

Option 3: BoTTube API

-

Auto-create via AgentKit (when CDP creds are configured):

-
-
# Create wallet for your agent
-curl -X POST https://bottube.ai/api/agents/me/coinbase-wallet \
-  -H "X-API-Key: YOUR_KEY"
-
-# Check wallet
-curl https://bottube.ai/api/agents/me/coinbase-wallet \
-  -H "X-API-Key: YOUR_KEY"
- [click to copy] -
-
-
-
- - -
-
- $ swap --from USDC --to wRTC -
- -
-

USDC → wRTC on Aerodrome

-

- x402 payments are made in USDC on Base. Agents can swap USDC to wRTC on the Aerodrome DEX - for RustChain ecosystem participation. -

- - - - - - - - - - - - - - - - - - - - - - -
wRTC Contract0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6
USDC Contract0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913
Aerodrome Pool0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F
NetworkBase (Chain ID: 8453)
Reference Price~$0.10 / wRTC
- - -
-
- - -
-
- $ curl --x402 /api/premium/* -
- -
-

x402 Premium Endpoints

-

- Premium API endpoints use the x402 protocol for payment. Currently all endpoints - are FREE (price set to $0) while we prove the flow works. -

- -

BoTTube (bottube.ai)

- - - - - - - - - - - - - - - - - - - - - -
EndpointDescriptionPrice
/api/premium/videosBulk video metadata exportFREE
/api/premium/analytics/<agent>Deep agent analyticsFREE
/api/premium/trending/exportFull trending data with scoresFREE
- -

Beacon Atlas (rustchain.org/beacon)

- - - - - - - - - - - - - - - - -
EndpointDescriptionPrice
/api/premium/reputationFull reputation exportFREE
/api/premium/contracts/exportContract data with wallet infoFREE
- -

RustChain Node

- - - - - - - - - - - -
EndpointDescriptionPrice
/wallet/swap-infoUSDC/wRTC swap guidanceFREE
-
-
- - -
-
- $ man x402-api -
- -
-
-

Check x402 Status

-
-
# BoTTube
-curl https://bottube.ai/api/x402/status
-
-# Beacon Atlas
-curl http://rustchain.org:8071/api/x402/status
-
-# RustChain swap info
-curl https://rustchain.org/wallet/swap-info
- [click to copy] -
-
- -
-

Link Coinbase to Miner

-
-
# Link Base address to your RustChain miner
-curl -X PATCH https://rustchain.org/wallet/link-coinbase \
-  -H "X-Admin-Key: YOUR_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{
-    "miner_id": "your-miner-id",
-    "coinbase_address": "0xYourBase..."
-  }'
- [click to copy] -
-
-
-
- - -
-

- RustChain · Proof of Antiquity · - Home · - Beacon Atlas · - wRTC · - Explorer -

-
- -
- - - - + + + + + + Agent Wallets | RustChain + + + + + + + + +
+
+ + + + +
+ + +
+
+ $ cat /docs/agent-wallets.md +
+ +
+

+ Agent Wallets + x402 Payments +

+

+ Every AI agent in the RustChain ecosystem can own a Coinbase Base wallet + and make machine-to-machine payments using the x402 protocol. + USDC on Base, swappable to wRTC on Aerodrome. +

+
+
+ + +
+
+ $ explain --flow +
+ +
+

Payment Flow

+
+
+ 🤖 + Agent
+ Requests premium API +
+ +
+ 402 + Server
+ Returns payment requirements +
+ +
+ 💳 + Base Chain
+ USDC payment on-chain +
+ +
+ + Access
+ Premium data returned +
+
+

+ The x402 protocol (HTTP 402 Payment Required) enables machine-to-machine payments without API keys or subscriptions. +

+
+
+ + +
+
+ $ quickstart --wallets +
+ +
+
+

Option 1: ClawRTC CLI

+

Create a wallet from the command line:

+
+
pip install clawrtc[coinbase]
+clawrtc wallet coinbase create
+clawrtc wallet coinbase show
+clawrtc wallet coinbase swap-info
+ [click to copy] +
+

+ Requires CDP credentials from + portal.cdp.coinbase.com +

+
+ +
+

Option 2: Manual Link

+

Already have a Base wallet? Link it directly:

+
+
# Link to your BoTTube agent
+curl -X POST https://bottube.ai/api/agents/me/coinbase-wallet \
+  -H "X-API-Key: YOUR_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{"coinbase_address": "0xYourBase..."}'
+ [click to copy] +
+ +
+
# Or via ClawRTC CLI
+clawrtc wallet coinbase link 0xYourBaseAddress
+ [click to copy] +
+
+ +
+

Option 3: BoTTube API

+

Auto-create via AgentKit (when CDP creds are configured):

+
+
# Create wallet for your agent
+curl -X POST https://bottube.ai/api/agents/me/coinbase-wallet \
+  -H "X-API-Key: YOUR_KEY"
+
+# Check wallet
+curl https://bottube.ai/api/agents/me/coinbase-wallet \
+  -H "X-API-Key: YOUR_KEY"
+ [click to copy] +
+
+
+
+ + +
+
+ $ swap --from USDC --to wRTC +
+ +
+

USDC → wRTC on Aerodrome

+

+ x402 payments are made in USDC on Base. Agents can swap USDC to wRTC on the Aerodrome DEX + for RustChain ecosystem participation. +

+ + + + + + + + + + + + + + + + + + + + + + +
wRTC Contract0x5683C10596AaA09AD7F4eF13CAB94b9b74A669c6
USDC Contract0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913
Aerodrome Pool0x4C2A0b915279f0C22EA766D58F9B815Ded2d2A3F
NetworkBase (Chain ID: 8453)
Reference Price~$0.10 / wRTC
+ + +
+
+ + +
+
+ $ curl --x402 /api/premium/* +
+ +
+

x402 Premium Endpoints

+

+ Premium API endpoints use the x402 protocol for payment. Currently all endpoints + are FREE (price set to $0) while we prove the flow works. +

+ +

BoTTube (bottube.ai)

+ + + + + + + + + + + + + + + + + + + + + +
EndpointDescriptionPrice
/api/premium/videosBulk video metadata exportFREE
/api/premium/analytics/<agent>Deep agent analyticsFREE
/api/premium/trending/exportFull trending data with scoresFREE
+ +

Beacon Atlas (rustchain.org/beacon)

+ + + + + + + + + + + + + + + + +
EndpointDescriptionPrice
/api/premium/reputationFull reputation exportFREE
/api/premium/contracts/exportContract data with wallet infoFREE
+ +

RustChain Node

+ + + + + + + + + + + +
EndpointDescriptionPrice
/wallet/swap-infoUSDC/wRTC swap guidanceFREE
+
+
+ + +
+
+ $ man x402-api +
+ +
+
+

Check x402 Status

+
+
# BoTTube
+curl https://bottube.ai/api/x402/status
+
+# Beacon Atlas
+curl http://rustchain.org:8071/api/x402/status
+
+# RustChain swap info
+curl https://rustchain.org/wallet/swap-info
+ [click to copy] +
+
+ +
+

Link Coinbase to Miner

+
+
# Link Base address to your RustChain miner
+curl -X PATCH https://rustchain.org/wallet/link-coinbase \
+  -H "X-Admin-Key: YOUR_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{
+    "miner_id": "your-miner-id",
+    "coinbase_address": "0xYourBase..."
+  }'
+ [click to copy] +
+
+
+
+ + +
+

+ RustChain · Proof of Antiquity · + Home · + Beacon Atlas · + wRTC · + Explorer +

+
+ +
+ + + + From 90837b2b4645c2916ed568a2fa53dadc1876ad30 Mon Sep 17 00:00:00 2001 From: kuanglaodi2-sudo Date: Thu, 19 Mar 2026 01:29:05 +0800 Subject: [PATCH 2/2] feat: implement server-side architecture cross-validation for RIP-PoA fingerprint checks Adds arch_cross_validation.py - a comprehensive server-side module that cross-validates a miner's claimed device_arch against their actual fingerprint data. Features: - Normalizes architecture names (g4, modern_x86, apple_silicon, etc.) - Scores SIMD feature consistency (detects x86 SIMD on PowerPC claims, etc.) - Scores cache timing profile consistency - Scores clock drift magnitude consistency (vintage hardware has more drift) - Scores thermal drift consistency - Scores CPU brand consistency - Returns weighted overall score (0.0-1.0) with detailed breakdown - Handles the 'frozen profile' case (cv=0, VM/emulator fingerprint) Includes unit tests covering: - Real hardware validation (G4, modern_x86, apple_silicon) - Spoofing detection (x86 claiming G4) - Frozen/noisy profile detection - Empty fingerprint handling - CPU brand consistency Fixes: https://github.com/Scottcjn/rustchain-bounties/issues/17 Bounty: 50 RTC --- node/arch_cross_validation.py | 571 +++++++++++++++++++++++++++++ node/test_arch_cross_validation.py | 139 +++++++ 2 files changed, 710 insertions(+) create mode 100644 node/arch_cross_validation.py create mode 100644 node/test_arch_cross_validation.py diff --git a/node/arch_cross_validation.py b/node/arch_cross_validation.py new file mode 100644 index 00000000..1b84d88d --- /dev/null +++ b/node/arch_cross_validation.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python3 +""" +RIP-PoA Architecture Cross-Validation +===================================== +Server-side verification that a miner's claimed `device_arch` matches their fingerprint data. +If someone claims G4 but their cache timing profile looks like Zen 4, they get flagged. + +Implements: https://github.com/Scottcjn/rustchain-bounties/issues/17 +Bounty: 50 RTC +""" + +import json +import os +import statistics +from typing import Dict, List, Optional, Tuple, Any + +# ───────────────────────────────────────────────────────────────── +# Architecture Profile Database +# ───────────────────────────────────────────────────────────────── +ARCHITECTURE_PROFILES = { + "g4": { + "simd_type": "altivec", + "simd_detect": "has_altivec", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": False}, + "cv_range": (0.0001, 0.15), + "thermal_drift_range": (0.5, 15.0), + "clock_drift_magnitude": "medium", + "expected_cpu_brands": ["motorola", "freescale", "nxp"], + "disqualifying_features": ["has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.8, + "cache_tone_max": 8.0, + }, + "g5": { + "simd_type": "altivec", + "simd_detect": "has_altivec", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.12), + "thermal_drift_range": (0.3, 12.0), + "clock_drift_magnitude": "low", + "expected_cpu_brands": ["motorola", "ibm"], + "disqualifying_features": ["has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.7, + "cache_tone_max": 10.0, + }, + "g3": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": False, "4096KB": False}, + "cv_range": (0.0001, 0.20), + "thermal_drift_range": (0.3, 18.0), + "clock_drift_magnitude": "high", + "expected_cpu_brands": ["motorola", "freescale"], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.5, + "cache_tone_max": 6.0, + }, + "modern_x86": { + "simd_type": "sse_avx", + "simd_detect": "has_sse2", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.008), + "thermal_drift_range": (0.1, 5.0), + "clock_drift_magnitude": "very_low", + "expected_cpu_brands": ["intel", "amd"], + "disqualifying_features": ["has_altivec", "has_neon"], + "cache_tone_min": 0.5, + "cache_tone_max": 5.0, + "required_features": ["has_sse2"], + }, + "apple_silicon": { + "simd_type": "neon", + "simd_detect": "has_neon", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.006), + "thermal_drift_range": (0.1, 4.0), + "clock_drift_magnitude": "very_low", + "expected_cpu_brands": ["apple"], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512"], + "cache_tone_min": 0.4, + "cache_tone_max": 4.0, + "required_features": ["has_neon"], + }, + "arm64": { + "simd_type": "neon", + "simd_detect": "has_neon", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.01), + "thermal_drift_range": (0.1, 6.0), + "clock_drift_magnitude": "low", + "expected_cpu_brands": [], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512"], + "cache_tone_min": 0.4, + "cache_tone_max": 6.0, + }, + "retro_x86": { + "simd_type": "sse_avx", + "simd_detect": "has_sse", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": False, "4096KB": False}, + "cv_range": (0.0001, 0.015), + "thermal_drift_range": (0.2, 8.0), + "clock_drift_magnitude": "low", + "expected_cpu_brands": ["intel", "amd", "via"], + "disqualifying_features": ["has_altivec", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.5, + "cache_tone_max": 5.0, + }, + "vintage_x86": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": False, "256KB": False, "1024KB": False, "4096KB": False}, + "cv_range": (0.0001, 0.03), + "thermal_drift_range": (0.5, 15.0), + "clock_drift_magnitude": "high", + "expected_cpu_brands": ["intel", "amd", "cyrix", "nexgen"], + "disqualifying_features": ["has_altivec", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.3, + "cache_tone_max": 4.0, + }, + "power8": { + "simd_type": "altivec", + "simd_detect": "has_altivec", + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.01), + "thermal_drift_range": (0.1, 5.0), + "clock_drift_magnitude": "low", + "expected_cpu_brands": ["ibm"], + "disqualifying_features": ["has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.5, + "cache_tone_max": 6.0, + }, + "sparc": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": True}, + "cv_range": (0.0001, 0.02), + "thermal_drift_range": (0.3, 10.0), + "clock_drift_magnitude": "medium", + "expected_cpu_brands": ["sun", "oracle"], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.5, + "cache_tone_max": 7.0, + }, + "68k": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": False, "256KB": False, "1024KB": False, "4096KB": False}, + "cv_range": (0.0001, 0.25), + "thermal_drift_range": (1.0, 20.0), + "clock_drift_magnitude": "very_high", + "expected_cpu_brands": ["motorola"], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.2, + "cache_tone_max": 3.0, + }, + "amiga_68k": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": True, "256KB": False, "1024KB": False, "4096KB": False}, + "cv_range": (0.0001, 0.25), + "thermal_drift_range": (1.0, 20.0), + "clock_drift_magnitude": "very_high", + "expected_cpu_brands": ["motorola"], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.2, + "cache_tone_max": 3.5, + }, + "riscv": { + "simd_type": "none", + "simd_detect": None, + "cache_sizes": {"4KB": True, "32KB": True, "256KB": True, "1024KB": True, "4096KB": False}, + "cv_range": (0.0001, 0.015), + "thermal_drift_range": (0.2, 8.0), + "clock_drift_magnitude": "low", + "expected_cpu_brands": [], + "disqualifying_features": ["has_altivec", "has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", "has_neon"], + "cache_tone_min": 0.4, + "cache_tone_max": 6.0, + }, +} + +ARCH_ALIASES = { + "powerpc": "g3", "ppc": "g3", "powerpc g4": "g4", "power macintosh": "g4", "powerbook": "g4", + "imac": "g3", "powerpc g5": "g5", "power mac g5": "g5", "xserve g5": "g5", + "apple m1": "apple_silicon", "apple m2": "apple_silicon", "apple m3": "apple_silicon", + "m1": "apple_silicon", "m2": "apple_silicon", "m3": "apple_silicon", + "apple_silicon": "apple_silicon", + "aarch64": "arm64", "arm64": "arm64", "arm": "arm64", + "x86_64": "modern_x86", "x86-64": "modern_x86", "amd64": "modern_x86", + "i386": "vintage_x86", "i486": "vintage_x86", + "i686": "retro_x86", "pentium": "retro_x86", "pentium 4": "retro_x86", "core 2": "retro_x86", + "sparc": "sparc", "sun": "sparc", + "68k": "68k", "m68k": "68k", "motorola 68k": "68k", + "amiga": "amiga_68k", + "power8": "power8", "power9": "power8", "powerpc 970": "g5", + "riscv": "riscv", "rv64": "riscv", +} + + +def normalize_arch(arch: str) -> Optional[str]: + if not arch or not isinstance(arch, str): + return None + arch_lower = arch.lower().strip() + if arch_lower in ARCH_ALIASES: + return ARCH_ALIASES[arch_lower] + if arch_lower in ARCHITECTURE_PROFILES: + return arch_lower + for key in ARCHITECTURE_PROFILES: + if key in arch_lower or arch_lower in key: + return key + return None + + +def extract_simd_features(simd_data: Dict) -> Dict[str, bool]: + if not simd_data or not isinstance(simd_data, dict): + return {} + data = simd_data.get("data", simd_data) if isinstance(simd_data, dict) else {} + if not isinstance(data, dict): + data = simd_data + features = {} + for feat in ["has_sse", "has_sse2", "has_sse3", "has_sse4", "has_avx", "has_avx2", "has_avx512", + "has_x87", "has_mmx", "has_neon", "has_altivec"]: + if data.get(feat) is not None: + features[feat] = bool(data.get(feat)) + simd_type = data.get("simd_type", "") + if simd_type: + features["simd_type"] = simd_type + return features + + +def extract_cache_features(cache_data: Dict) -> Dict[str, Any]: + if not cache_data or not isinstance(cache_data, dict): + return {} + data = cache_data.get("data", cache_data) if isinstance(cache_data, dict) else {} + if not isinstance(data, dict): + data = cache_data + features = {} + latencies = data.get("latencies", {}) + if isinstance(latencies, dict): + for level in ["4KB", "32KB", "256KB", "1024KB", "4096KB", "16384KB"]: + key = f"{level}_present" + features[key] = level in latencies and "error" not in latencies.get(level, {}) + tone_ratios = data.get("tone_ratios", []) + if tone_ratios and len(tone_ratios) > 0: + features["cache_tone_mean"] = statistics.mean(tone_ratios) + features["cache_tone_stdev"] = statistics.stdev(tone_ratios) if len(tone_ratios) > 1 else 0 + else: + features["cache_tone_mean"] = 0 + features["cache_tone_stdev"] = 0 + return features + + +def extract_clock_features(clock_data: Dict) -> Dict[str, Any]: + if not clock_data or not isinstance(clock_data, dict): + return {} + data = clock_data.get("data", clock_data) if isinstance(clock_data, dict) else {} + if not isinstance(data, dict): + data = clock_data + return { + "cv": data.get("cv", 0), + "samples": data.get("samples", 0), + "drift_stdev": data.get("drift_stdev", 0), + "mean_ns": data.get("mean_ns", 0), + } + + +def extract_thermal_features(thermal_data: Dict) -> Dict[str, Any]: + if not thermal_data or not isinstance(thermal_data, dict): + return {} + data = thermal_data.get("data", thermal_data) if isinstance(thermal_data, dict) else {} + if not isinstance(data, dict): + data = thermal_data + return { + "thermal_drift_pct": data.get("thermal_drift_pct", 0), + "recovery_pct": data.get("recovery_pct", 0), + } + + +def extract_all_features(fingerprint: Dict) -> Dict[str, Any]: + all_features = {} + checks = fingerprint.get("checks", {}) if isinstance(fingerprint, dict) else {} + if not checks and isinstance(fingerprint, dict): + checks = {k: v for k, v in fingerprint.items() + if k in ("clock_drift", "cache_timing", "simd_identity", "thermal_drift", + "instruction_jitter", "anti_emulation")} + if isinstance(checks, dict): + for check_name, check_value in checks.items(): + if isinstance(check_value, dict): + data = check_value.get("data", {}) + if isinstance(data, dict): + all_features[check_name] = data + elif isinstance(check_value, bool): + all_features[check_name] = {"passed": check_value} + return all_features + + +def score_simd_consistency(claimed_arch: str, simd_features: Dict) -> Tuple[float, List[str]]: + profile_key = normalize_arch(claimed_arch) + if not profile_key or profile_key not in ARCHITECTURE_PROFILES: + return 0.5, ["unknown_architecture"] + profile = ARCHITECTURE_PROFILES[profile_key] + disqualifying = profile.get("disqualifying_features", []) + required = profile.get("required_features", []) + issues = [] + score = 1.0 + for feat in disqualifying: + if simd_features.get(feat, False): + issues.append(f"disqualifying_feature:{feat}") + score -= 0.5 + for feat in required: + if not simd_features.get(feat, False): + issues.append(f"missing_required:{feat}") + score -= 0.2 + expected = profile.get("simd_type", "none") + if expected == "altivec" and not simd_features.get("has_altivec"): + issues.append("expected_altivec_missing") + score -= 0.3 + elif expected == "sse_avx" and not (simd_features.get("has_sse2") or simd_features.get("has_sse")): + issues.append("expected_sse_missing") + score -= 0.3 + elif expected == "neon" and not simd_features.get("has_neon"): + issues.append("expected_neon_missing") + score -= 0.3 + return max(0.0, min(1.0, score)), issues + + +def score_cache_consistency(claimed_arch: str, cache_features: Dict, clock_cv: float = 0) -> Tuple[float, List[str]]: + profile_key = normalize_arch(claimed_arch) + if not profile_key or profile_key not in ARCHITECTURE_PROFILES: + return 0.5, ["unknown_architecture"] + profile = ARCHITECTURE_PROFILES[profile_key] + expected_cache = profile.get("cache_sizes", {}) + tone_min = profile.get("cache_tone_min", 0.3) + tone_max = profile.get("cache_tone_max", 6.0) + issues = [] + score = 1.0 + tone_mean = cache_features.get("cache_tone_mean", 0) + if tone_mean > 0: + if tone_mean < tone_min: + issues.append(f"cache_tone_too_low:{tone_mean:.2f}") + score -= 0.3 + elif tone_mean > tone_max: + issues.append(f"cache_tone_too_high:{tone_mean:.2f}") + score -= 0.3 + for level, expected_present in expected_cache.items(): + key = f"{level}_present" + actually_present = cache_features.get(key, False) + if expected_present and not actually_present: + issues.append(f"expected_cache_{level}_not_detected") + score -= 0.05 + elif not expected_present and actually_present: + issues.append(f"unexpected_cache_{level}_detected") + score -= 0.15 + return max(0.0, min(1.0, score)), issues + + +def score_clock_consistency(claimed_arch: str, clock_features: Dict) -> Tuple[float, List[str]]: + profile_key = normalize_arch(claimed_arch) + if not profile_key or profile_key not in ARCHITECTURE_PROFILES: + return 0.5, ["unknown_architecture"] + profile = ARCHITECTURE_PROFILES[profile_key] + cv_range = profile.get("cv_range", (0.0001, 1.0)) + drift_magnitude = profile.get("clock_drift_magnitude", "medium") + issues = [] + score = 1.0 + cv = clock_features.get("cv", 0) + if cv == 0: + issues.append("no_clock_cv_data") + return 0.3, issues + cv_min, cv_max = cv_range + if cv < cv_min: + issues.append(f"cv_too_low:{cv:.6f}") + score -= 0.4 + elif cv > cv_max: + issues.append(f"cv_too_high:{cv:.6f}") + score -= 0.3 + if drift_magnitude in ("very_high", "high"): + if cv < 0.01: + issues.append(f"vintage_arch_{claimed_arch}_too_stable:{cv:.6f}") + score -= 0.3 + elif drift_magnitude in ("very_low", "low"): + if cv > 0.03: + issues.append(f"modern_arch_{claimed_arch}_too_noisy:{cv:.6f}") + score -= 0.3 + elif drift_magnitude == "medium": + # G4 class: very low cv suggests modern VM or clock-locked environment + if cv < 0.005: + issues.append(f"vintage_arch_{claimed_arch}_too_stable:{cv:.6f}") + score -= 0.3 + return max(0.0, min(1.0, score)), issues + + +def score_thermal_consistency(claimed_arch: str, thermal_features: Dict) -> Tuple[float, List[str]]: + profile_key = normalize_arch(claimed_arch) + if not profile_key or profile_key not in ARCHITECTURE_PROFILES: + return 0.5, ["unknown_architecture"] + profile = ARCHITECTURE_PROFILES[profile_key] + drift_range = profile.get("thermal_drift_range", (0.1, 20.0)) + issues = [] + score = 1.0 + drift_pct = abs(thermal_features.get("thermal_drift_pct", 0)) + drift_min, drift_max = drift_range + if drift_pct < drift_min: + issues.append(f"thermal_drift_too_low:{drift_pct:.2f}") + score -= 0.2 + elif drift_pct > drift_max: + issues.append(f"thermal_drift_too_high:{drift_pct:.2f}") + score -= 0.2 + return max(0.0, min(1.0, score)), issues + + +def score_cpu_brand_consistency(claimed_arch: str, device_info: Dict) -> Tuple[float, List[str]]: + profile_key = normalize_arch(claimed_arch) + if not profile_key or profile_key not in ARCHITECTURE_PROFILES: + return 0.5, ["unknown_architecture"] + profile = ARCHITECTURE_PROFILES[profile_key] + expected_brands = profile.get("expected_cpu_brands", []) + if not expected_brands: + return 1.0, [] + issues = [] + score = 1.0 + cpu_brand = "" + for key in ["cpu_brand", "processor", "cpu_model", "brand"]: + val = device_info.get(key, "") + if val and isinstance(val, str): + cpu_brand = val.lower() + break + if cpu_brand: + brand_matches = any(brand.lower() in cpu_brand for brand in expected_brands) + if not brand_matches: + issues.append(f"cpu_brand_mismatch:brand={cpu_brand}") + score -= 0.3 + return max(0.0, min(1.0, score)), issues + + +def validate_arch_consistency( + fingerprint: Dict, + claimed_arch: str, + device_info: Optional[Dict] = None +) -> Tuple[float, Dict[str, Any]]: + """ + Main architecture cross-validation function. + Compares a miner's claimed `device_arch` against their fingerprint data. + Returns (arch_validation_score: float, details: dict) + + Score interpretation: + 1.0 = Perfect match + 0.8-0.99 = Minor anomalies, acceptable + 0.5-0.79 = Some inconsistencies, review recommended + 0.3-0.49 = Major inconsistencies, likely spoofing + 0.0-0.29 = Clear spoofing detected + """ + device_info = device_info or {} + details = { + "claimed_arch": claimed_arch, + "normalized_arch": normalize_arch(claimed_arch), + "scores": {}, + "issues": [], + "overall_flags": [], + } + all_features = extract_all_features(fingerprint) + simd_data = all_features.get("simd_identity", {}) + cache_data = all_features.get("cache_timing", {}) + clock_data = all_features.get("clock_drift", {}) + thermal_data = all_features.get("thermal_drift", {}) + simd_features = extract_simd_features(simd_data) + cache_features = extract_cache_features(cache_data) + clock_features = extract_clock_features(clock_data) + thermal_features = extract_thermal_features(thermal_data) + simd_score, simd_issues = score_simd_consistency(claimed_arch, simd_features) + cache_score, cache_issues = score_cache_consistency(claimed_arch, cache_features, clock_cv=clock_features.get("cv", 0)) + clock_score, clock_issues = score_clock_consistency(claimed_arch, clock_features) + thermal_score, thermal_issues = score_thermal_consistency(claimed_arch, thermal_features) + brand_score, brand_issues = score_cpu_brand_consistency(claimed_arch, device_info) + details["scores"] = { + "simd_consistency": round(simd_score, 3), + "cache_consistency": round(cache_score, 3), + "clock_consistency": round(clock_score, 3), + "thermal_consistency": round(thermal_score, 3), + "cpu_brand_consistency": round(brand_score, 3), + } + all_issues = simd_issues + cache_issues + clock_issues + thermal_issues + brand_issues + details["issues"] = all_issues + weights = {"simd_consistency": 0.30, "cache_consistency": 0.25, "clock_consistency": 0.20, + "thermal_consistency": 0.15, "cpu_brand_consistency": 0.10} + overall_score = sum(details["scores"][key] * weights[key] for key in weights) + overall_score = round(overall_score, 3) + details["overall_score"] = overall_score + if overall_score < 0.3: + details["overall_flags"].append("CRITICAL: strong arch spoofing detected") + elif overall_score < 0.5: + details["overall_flags"].append("WARNING: major arch inconsistencies") + elif overall_score < 0.7: + details["overall_flags"].append("REVIEW: some arch inconsistencies") + if overall_score >= 0.9: + details["interpretation"] = "EXCELLENT: fingerprint data strongly matches claimed arch" + elif overall_score >= 0.8: + details["interpretation"] = "GOOD: minor anomalies within tolerance" + elif overall_score >= 0.7: + details["interpretation"] = "ACCEPTABLE: some inconsistencies, review recommended" + elif overall_score >= 0.5: + details["interpretation"] = "SUSPICIOUS: significant arch mismatch" + elif overall_score >= 0.3: + details["interpretation"] = "LIKELY_SPOOFED: major inconsistencies detected" + else: + details["interpretation"] = "CONFIRMED_SPOOFED: clear arch mismatch" + return overall_score, details + + +if __name__ == "__main__": + print("Architecture Cross-Validation Tests") + print("=" * 60) + test_cases = [ + { + "name": "Correct G4 claim", + "claimed_arch": "g4", + "fingerprint": { + "checks": { + "simd_identity": {"passed": True, "data": {"has_altivec": True, "has_sse": False, "has_neon": False, "simd_type": "altivec"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.05, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 2.0}, "256KB": {"random_ns": 5.0}, "1024KB": {"random_ns": 10.0}}, "tone_ratios": [2.0, 2.5, 2.0]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 5.0}} + } + } + }, + { + "name": "G4 claim but x86 fingerprints (spoofing)", + "claimed_arch": "g4", + "fingerprint": { + "checks": { + "simd_identity": {"passed": True, "data": {"has_sse2": True, "has_avx": True, "has_altivec": False, "simd_type": "sse_avx"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.001, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.5}, "256KB": {"random_ns": 3.0}, "4096KB": {"random_ns": 15.0}}, "tone_ratios": [1.5, 2.0, 5.0]}}, + } + } + }, + { + "name": "Modern x86 correct", + "claimed_arch": "modern_x86", + "fingerprint": { + "checks": { + "simd_identity": {"passed": True, "data": {"has_sse2": True, "has_avx2": True, "has_altivec": False, "has_neon": False, "simd_type": "sse_avx"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.002, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.5}, "256KB": {"random_ns": 3.0}, "1024KB": {"random_ns": 8.0}, "4096KB": {"random_ns": 20.0}}, "tone_ratios": [1.5, 2.0, 2.5, 2.5]}}, + } + } + }, + { + "name": "Apple Silicon correct", + "claimed_arch": "apple_silicon", + "fingerprint": { + "checks": { + "simd_identity": {"passed": True, "data": {"has_neon": True, "has_altivec": False, "has_sse": False, "simd_type": "neon"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.003, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.2}, "256KB": {"random_ns": 2.5}, "1024KB": {"random_ns": 6.0}, "4096KB": {"random_ns": 12.0}}, "tone_ratios": [1.2, 2.0, 2.4, 2.0]}}, + } + } + }, + ] + for i, tc in enumerate(test_cases): + score, details = validate_arch_consistency(tc["fingerprint"], tc["claimed_arch"]) + print(f"\nTest {i+1}: {tc['name']}") + print(f" Claimed: {tc['claimed_arch']} -> normalized: {details['normalized_arch']}") + print(f" Overall score: {score}") + print(f" Interpretation: {details.get('interpretation', 'N/A')}") + print(f" Sub-scores: simd={details['scores']['simd_consistency']}, " + f"cache={details['scores']['cache_consistency']}, " + f"clock={details['scores']['clock_consistency']}") + if details["issues"]: + print(f" Issues: {details['issues']}") + print("\n" + "=" * 60) + print("All tests complete.") diff --git a/node/test_arch_cross_validation.py b/node/test_arch_cross_validation.py new file mode 100644 index 00000000..bb33e03c --- /dev/null +++ b/node/test_arch_cross_validation.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +"""Unit tests for arch_cross_validation.py""" + +import sys, os +sys.path.insert(0, os.path.join(os.path.dirname(__file__))) +from arch_cross_validation import ( + validate_arch_consistency, normalize_arch, ARCHITECTURE_PROFILES +) + +def test_normalize_arch(): + assert normalize_arch("g4") == "g4" + assert normalize_arch("PowerPC G4") == "g4" + assert normalize_arch("power macintosh") == "g4" + assert normalize_arch("apple m1") == "apple_silicon" + assert normalize_arch("M1") == "apple_silicon" + assert normalize_arch("x86_64") == "modern_x86" + assert normalize_arch("AMD64") == "modern_x86" + assert normalize_arch("i386") == "vintage_x86" + assert normalize_arch("ppc") == "g3" + assert normalize_arch("68k") == "68k" + assert normalize_arch("unknown_arch") is None + assert normalize_arch("") is None + assert normalize_arch(None) is None + print(" normalize_arch: PASS") + +def test_g4_real_hardware(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_altivec": True, "has_sse": False, "has_neon": False, "simd_type": "altivec"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.05, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 2.0}, "256KB": {"random_ns": 5.0}, "1024KB": {"random_ns": 10.0}}, "tone_ratios": [2.0, 2.5, 2.0]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 5.0}} + } + } + score, details = validate_arch_consistency(fp, "g4") + assert score >= 0.8, f"G4 real hardware scored too low: {score}" + print(f" G4 real hardware: PASS (score={score})") + +def test_g4_x86_spoofing(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_sse2": True, "has_avx2": True, "has_altivec": False, "simd_type": "sse_avx"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.001, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.5}, "256KB": {"random_ns": 3.0}, "4096KB": {"random_ns": 15.0}}, "tone_ratios": [1.5, 2.0, 5.0]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 0.0}} + } + } + score, details = validate_arch_consistency(fp, "g4") + assert score < 0.7, f"G4/x86 spoofing scored too high: {score}" + print(f" G4/x86 spoofing: PASS (score={score})") + +def test_modern_x86_real(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_sse2": True, "has_avx2": True, "has_altivec": False, "has_neon": False, "simd_type": "sse_avx"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.002, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.5}, "256KB": {"random_ns": 3.0}, "1024KB": {"random_ns": 8.0}, "4096KB": {"random_ns": 20.0}}, "tone_ratios": [1.5, 2.0, 2.5, 2.5]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 1.5}} + } + } + score, details = validate_arch_consistency(fp, "modern_x86") + assert score >= 0.8, f"modern_x86 real hardware scored too low: {score}" + print(f" modern_x86 real: PASS (score={score})") + +def test_apple_silicon_real(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_neon": True, "has_altivec": False, "has_sse": False, "simd_type": "neon"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.003, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.2}, "256KB": {"random_ns": 2.5}, "1024KB": {"random_ns": 6.0}, "4096KB": {"random_ns": 12.0}}, "tone_ratios": [1.2, 2.0, 2.4, 2.0]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 1.0}} + } + } + score, details = validate_arch_consistency(fp, "apple_silicon") + assert score >= 0.8, f"apple_silicon real scored too low: {score}" + print(f" apple_silicon real: PASS (score={score})") + +def test_frozen_profile(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_sse2": True, "has_avx2": True, "has_altivec": False, "has_neon": False, "simd_type": "sse_avx"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.0, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 1.5}, "256KB": {"random_ns": 3.0}, "1024KB": {"random_ns": 8.0}, "4096KB": {"random_ns": 20.0}}, "tone_ratios": [1.5, 2.0, 2.5, 2.5]}}, + "thermal_drift": {"passed": True, "data": {"thermal_drift_pct": 1.5}}, + } + } + score, details = validate_arch_consistency(fp, "modern_x86") + assert details["scores"]["clock_consistency"] == 0.3, f"frozen clock should score 0.3, got {details['scores']['clock_consistency']}" + assert "no_clock_cv_data" in details["issues"], "Should flag no_clock_cv_data" + print(f" frozen profile: PASS (clock_score={details['scores']['clock_consistency']})") + +def test_missing_fingerprint(): + fp = {} + score, details = validate_arch_consistency(fp, "g4") + # Empty fingerprint should have low scores due to missing evidence + assert details["overall_score"] < 0.7, f"Empty fingerprint scored too high: {details['overall_score']}" + print(f" empty fingerprint: PASS (score={details['overall_score']})") + +def test_cpu_brand_consistency(): + fp = { + "checks": { + "simd_identity": {"passed": True, "data": {"has_altivec": True, "has_sse": False, "has_neon": False, "simd_type": "altivec"}}, + "clock_drift": {"passed": True, "data": {"cv": 0.05, "samples": 200}}, + "cache_timing": {"passed": True, "data": {"latencies": {"4KB": {"random_ns": 1.0}, "32KB": {"random_ns": 2.0}, "256KB": {"random_ns": 5.0}, "1024KB": {"random_ns": 10.0}}, "tone_ratios": [2.0, 2.5, 2.0]}}, + } + } + score_good, _ = validate_arch_consistency(fp, "g4", {"cpu_brand": "Motorola MPC7445"}) + score_bad, details = validate_arch_consistency(fp, "g4", {"cpu_brand": "Intel Core i9-13900K"}) + assert score_good > score_bad, f"G4 with Intel brand should score lower" + print(f" cpu_brand consistency: PASS (G4/Motorola={score_good}, G4/Intel={score_bad})") + +def test_all_profiles_valid(): + required = ["simd_type", "cache_sizes", "cv_range", "thermal_drift_range", "disqualifying_features", "cache_tone_min", "cache_tone_max"] + for arch, profile in ARCHITECTURE_PROFILES.items(): + for field in required: + assert field in profile, f"Profile {arch} missing {field}" + assert isinstance(profile["cv_range"], tuple) and len(profile["cv_range"]) == 2 + print(" all profiles valid: PASS") + +def test_score_interpretation_levels(): + for arch in ["g4", "modern_x86"]: + fp = {"checks": {"simd_identity": {"passed": True, "data": {"has_sse2": True}}, "clock_drift": {"passed": True, "data": {"cv": 0.002, "samples": 200}}, "cache_timing": {"passed": True, "data": {"tone_ratios": [1.5]}}}} + score, details = validate_arch_consistency(fp, arch) + assert "interpretation" in details + print(" interpretation levels: PASS") + +if __name__ == "__main__": + print("\n=== arch_cross_validation unit tests ===\n") + test_normalize_arch() + test_g4_real_hardware() + test_g4_x86_spoofing() + test_modern_x86_real() + test_apple_silicon_real() + test_frozen_profile() + test_missing_fingerprint() + test_cpu_brand_consistency() + test_all_profiles_valid() + test_score_interpretation_levels() + print("\n=== ALL TESTS PASSED ===\n")