diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 12cb59f985..fd442efb0e 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -19,6 +19,9 @@ jobs: - os: ubuntu-24.04-arm platform_pair: linux-arm64 runs-on: ${{ matrix.os }} + permissions: + contents: read + packages: write steps: - name: Checkout @@ -51,7 +54,7 @@ jobs: with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.PACKAGE_TOKEN }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push id: build @@ -76,6 +79,9 @@ jobs: retention-days: 1 merge: runs-on: ubuntu-latest + permissions: + contents: read + packages: write needs: - build steps: @@ -91,7 +97,7 @@ jobs: with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} - password: ${{ secrets.PACKAGE_TOKEN }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 diff --git a/.gitignore b/.gitignore index bc5edba6dc..c5e55989f2 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ *un~ .DS_Store */**/.DS_Store +.claude/ +.gocache/ #* .#* diff --git a/PQ_PLAN.md b/PQ_PLAN.md new file mode 100644 index 0000000000..e17b46258d --- /dev/null +++ b/PQ_PLAN.md @@ -0,0 +1,308 @@ +# Post-Quantum BSC — Implementation Plan + +**Last updated:** 2026-04-13 +**Branch:** `post_quantum_dev` +**Fork activation:** Timestamp-based via `PQForkTime` / `IsPQFork(num, time)` + +--- + +## Status Overview + +| Phase | Name | Status | Notes | +|-------|------|--------|-------| +| 0 | PQ Crypto Primitives | **Complete** | ML-DSA-44, ML-KEM-768, XMSS stubs | +| 1 | PQ Transaction Signing | **Complete** | Tx type `0x05`, `PQSigner`, `pqRecover` precompile | +| 1.5 | PQ Public Key Registry | **Complete** | `pqKeyRegistry` precompile at `0x70` | +| 2 | P2P Handshake: ML-KEM-768 | Pending | Design ready, not yet implemented | +| 3 | Fast Finality Voting: STARK Aggregation | **Complete** (placeholder prover) | STARK prover is a structural placeholder | +| 4 | Integration & Full Benchmark | Pending | Depends on production STARK prover | + +--- + +## Phase 0 — PQ Crypto Primitives (Complete) + +### Deliverables + +| File | Description | +|------|-------------| +| `crypto/pq/mldsa/mldsa.go` | ML-DSA-44 (Dilithium): `GenerateKey`, `Sign`, `Verify`, `PublicKeyFromPrivate`, `PubKeyToAddress` | +| `crypto/pq/mldsa/mldsa_test.go` | Unit tests | +| `crypto/pq/mlkem/mlkem.go` | ML-KEM-768: `GenerateKey`, `Encapsulate`, `Decapsulate` | +| `crypto/pq/mlkem/mlkem_test.go` | Unit tests | +| `crypto/pq/xmss/xmss.go` | XMSS stubs (Sign, Aggregate, VerifyProof — returns "not implemented") | +| `crypto/crypto.go` | Top-level wrappers: `SignPQ`, `VerifyPQ`, `PQPubkeyToAddress` | +| `crypto/pq_signing_test.go` | Integration tests for crypto wrappers | + +### Key Constants + +| Parameter | Value | +|-----------|-------| +| ML-DSA-44 public key | 1312 bytes | +| ML-DSA-44 signature | 2420 bytes | +| ML-KEM-768 ciphertext | ~1088 bytes | + +--- + +## Phase 1 — PQ Transaction Signing (Complete) + +### Deliverables + +| File | Description | +|------|-------------| +| `core/types/pq_transaction.go` | `PQTxData` (type `0x05`) with `From`, `PQSignature` fields | +| `core/types/transaction_signing_pq.go` | `PQSigner`, `pqDispatchSigner`, `SignPQTx` | +| `core/types/pq_transaction_test.go` | Sign/verify/sender recovery test | +| `core/pq_e2e_test.go` | Full chain E2E: genesis → PQ tx → block insert → balance check | +| `params/config.go` | `PQForkTime`, `IsPQFork()`, fork ordering | + +### Design + +- New tx type `0x05` (`PQTxData`) carries an explicit `From` field (20 bytes) and `PQSignature` (2420 bytes) +- `PQSigner.Sender()` looks up `From` in the PQ key registry, verifies ML-DSA-44 signature +- `pqDispatchSigner` wraps the base signer; dispatches PQ txs to `PQSigner`, delegates others to the base +- `MakeSigner()` and `LatestSigner()` auto-wrap with `pqDispatchSigner` when `IsPQFork` is active + +### Precompile: `pqRecover` (`0x68`) + +- Input: `[hash(32) || signature(2420) || pubkey(1312)]` → total 3764 bytes +- Output: address (32 bytes, zero-padded, address at bytes 12-31) +- Gas: 30,000 +- In pre-PQFork maps: coexists with `doubleSignEvidence` via `pqRecoverCompat` (routes by input length) + +--- + +## Phase 1.5 — PQ Public Key Registry (Complete) + +### Precompile: `pqKeyRegistry` (`0x70`) + +- **Register** (input = 1312 bytes): `caller → pubkey` write-once to state trie (41 x 32-byte slots) + - Gas: `SstoreSetGas * 41` (~820k gas, one-time per address) + - Rejects if already registered +- **Lookup** (input = 20 bytes): `query address → 1312-byte pubkey` + - Gas: `SloadGas * 41` (~2.7k gas) + - Returns zeros if unregistered +- State storage: `statedb.SetState(registryAddr, slot, value)` where `slot_i = keccak256(addr ++ i)` for `i in 0..40` +- Stateful precompile: implements `RunStateful(input, caller, stateDB, readOnly)` + +### `PQSigner` Integration + +`PQSigner.Sender(tx)` calls `pqKeyRegistryLookup(From)` to retrieve the pubkey, then verifies the ML-DSA-44 signature against the tx hash. + +### Migration Path + +1. User sends registration tx to `0x70` (one-time, ~820k gas) +2. Node writes `addr → pubkey` into registry state +3. User sends PQ txs (`PQTxData`) — no embedded pubkey, only 2440 bytes overhead vs 65 for secp256k1 + +--- + +## Phase 2 — P2P Handshake: ML-KEM-768 (Pending) + +**Touch points:** `p2p/rlpx/rlpx.go`, `crypto/pq/mlkem/` + +### 2.1 ML-KEM-768 Session Key Exchange + +Replace ECDH inside `handshakeState` only. AES-CTR framing, MAC, multiplexing unchanged. + +| Step | Current | Replacement | +|------|---------|-------------| +| Initiator key | ephemeral secp256k1 keypair | ML-KEM-768 encapsulation key | +| Auth message | ECIES-encrypted token + sig | ML-KEM ciphertext (1088 bytes) + identity sig | +| Shared secret | `ECDH(eph_priv, eph_pub)` | `Decapsulate(ciphertext)` → 32-byte shared secret | +| Derived secrets | `aesSecret`, `macSecret` | Same HKDF derivation | + +**Files to create/modify:** +- `p2p/rlpx/rlpx.go`: add `runInitiatorPQ` / `runRecipientPQ`; `Conn.isPQ bool`; fallback to legacy if peer does not advertise `pq-rlpx` +- `crypto/pq/mlkem/mlkem.go`: already exists (Phase 0) + +### 2.2 Node Identity + +Node discovery (`p2p/enode/`) keeps secp256k1 node IDs — unchanged. Only session key establishment is replaced. + +### 2.3 Benchmark Gate + +- Handshake latency: ML-KEM vs ECDH under simulated 45-peer mesh +- Ciphertext size overhead on first-packet RTT + +--- + +## Phase 3 — Fast Finality Voting: STARK Aggregation (Complete — Placeholder Prover) + +**Touch points:** `core/vote/`, `core/types/vote.go`, `consensus/parlia/`, `core/vm/contracts.go`, `crypto/pq/proofs/` + +### 3.1 STARK Proof System + +| File | Description | +|------|-------------| +| `crypto/pq/proofs/stark_prover.go` | `STARKProver`, `GenerateSTARKProof`, `VerifySTARKProof`, Merkle tree utilities | + +**Current state:** Placeholder implementation that builds Merkle commitments, FRI layer stubs, and query responses, but does **NOT** perform real STARK proving (no polynomial interpolation, no FRI commitment, no constraint evaluation). Marked with `TODO: Replace with a production STARK/leanVM prover`. + +### 3.2 Data Structure Changes — `core/types/vote.go` + +| Type | Description | +|------|-------------| +| `PQPublicKey [1312]byte` | ML-DSA-44 public key type | +| `PQSignature [2420]byte` | ML-DSA-44 signature type | +| `PQVoteEnvelope` | Individual validator PQ vote (VoteAddress, Signature, Data) | +| `PQVoteAttestation` | Aggregated PQ attestation (VoteAddressSet, AggProof, Data, Extra) | +| `PQVoteEnvelope.Verify()` | Verifies ML-DSA-44 signature against `Data.Hash()` | +| `PQVoteEnvelope.Hash()` | Computes envelope hash | + +All existing BLS types (`VoteEnvelope`, `VoteAttestation`, `BLSPublicKey`, `BLSSignature`) are preserved for backward compatibility. + +### 3.3 Vote Signing — `core/vote/pq_vote_signer.go` + +| Function | Description | +|----------|-------------| +| `NewPQVoteSigner(pqKeyPath)` | Creates signer from private key file | +| `NewPQVoteSignerFromRawKey(privKey)` | Creates signer from raw bytes (for testing) | +| `SignVote(vote *PQVoteEnvelope)` | Signs with ML-DSA-44, populates VoteAddress + Signature | + +### 3.4 STARK Signature Aggregation — `consensus/parlia/pq_stark_aggregation.go` + +| Type/Function | Description | +|---------------|-------------| +| `PQVoteData` | Per-vote input: target/source block info + PQ sig/pubkey + validator index | +| `STARKSignatureAggregation` | Output: AggregateProof + CommitteeRoot + VoteDataHash + NumValidators | +| `STARKSignatureAggregator` | Stateful aggregator with mutex-protected prover | +| `Aggregate(votes, voteDataHash)` | Builds execution trace (7 columns), generates STARK proof, computes committee root | +| `Verify(agg, pubkeys, expectedVoteDataHash)` | Checks vote data hash binding → verifies STARK proof → validates committee root | +| `MarshalSTARKAggregation(agg)` | Binary serialization for header storage | +| `UnmarshalSTARKAggregation(data)` | Deserialization with bounds checks (H3 fix: numValidators≤1000, numFRI≤64, numQ≤1024, numAuth≤64) | +| `computeCommitteeRoot(pubkeys)` | SHA-256 Merkle tree over validator public keys | +| `hashSignatureData(sig, pubkey)` | SHA-256 commitment over signature + public key | + +### 3.5 Vote Attestation Assembly & Verification — `consensus/parlia/pq_vote_attestation.go` + +| Function | Description | +|----------|-------------| +| `pqAssembleVoteAttestation(chain, header)` | Collects PQ votes from pool, validates quorum (2/3), STARK aggregates, RLP-encodes into header Extra | +| `pqVerifyVoteAttestation(chain, header, parents)` | Extracts PQVoteAttestation from header, validates source/target blocks, checks quorum, verifies STARK proof | +| `getPQVoteAttestationFromHeader(header, chainConfig, epochLength)` | Decodes PQVoteAttestation from header Extra (handles epoch and non-epoch blocks) | + +### 3.6 Consensus Fork Gating — `consensus/parlia/parlia.go` + +Three fork-gated integration points: + +| Line | Context | Logic | +|------|---------|-------| +| 439 | `getVoteAttestationFromHeader` | Post-PQFork: tries PQ decode first, converts `PQVoteAttestation → VoteAttestation` for downstream consumers | +| 765 | `verifyHeader` | `IsPQFork` → `pqVerifyVoteAttestation()` else `verifyVoteAttestation()` | +| 1770 | `Seal` (block production) | `IsPQFork` → `pqAssembleVoteAttestation()` else `assembleVoteAttestation()` | + +### 3.7 Precompile: `pqAttestationVerify` (`0x6a`) + +- Input: `[proof_len(4)] [proof_bytes] [vote_data_hash(32)] [num_pubkeys(4)] [{pubkey(1312)}...]` +- Full STARK verification: unmarshal → verify STARK proof → validate committee root → check vote data hash binding +- Registered in Hertz+ fork maps (not in Luban/Plato early fork maps) + +### 3.8 Tests + +| File | Tests | +|------|-------| +| `consensus/parlia/pq_stark_aggregation_test.go` | 8 unit tests + 1 benchmark: Basic, Verify, MismatchedPubkeys, EmptyVotes, MarshalUnmarshal, SingleVote, NilVerify, VoteDataHashMismatch, BenchmarkSTARKAggregation_21Validators | +| `consensus/parlia/pq_e2e_test.go` | 4 E2E tests (10 cases): FullFlow (21 validators full pipeline), NegativeCases (6 sub-tests), CommitteeRootDeterminism, IndividualSignatureVerification | + +### 3.9 Review Fixes Applied + +| ID | Issue | Fix | +|----|-------|-----| +| C3 | VoteDataHash not verified during Verify | Added `expectedVoteDataHash` param, enforced binding check (replay protection) | +| C4 | Non-deterministic committee root | Assembly now uses `snap.validators()` sorted order | +| C5 | Downstream consumers break post-fork | `getVoteAttestationFromHeader` converts PQVoteAttestation → VoteAttestation | +| H1 | Epoch blocks skipped in header extraction | Added epoch block handling mirroring legacy function | +| H2 | `BytesToHash` truncation for vote address | Changed to `crypto.Keccak256Hash` | +| H3 | Unmarshal OOM DoS | Added upper bounds on all array lengths | +| H4 | Precompile only checked committee root | Rewrote with full STARK verify flow | +| H5 | Precompile in early fork maps | Removed from Luban/Plato maps | + +### 3.10 Known Limitations + +- **STARK prover is a placeholder.** Structural correctness is ensured (Merkle commitments, data flow) but not cryptographic soundness. Must be replaced with a production STARK/leanVM prover before any public network deployment. +- **PQ assembly path consumes BLS-typed `[]*VoteEnvelope`.** The `VotePool` and `VoteManager` still deal in BLS-typed envelopes (`VoteEnvelope` with 48-byte pubkey / 96-byte sig). `pqAssembleVoteAttestation` adapts by reading `.Signature[:]` and `.VoteAddress[:]` — this works for the current bridge because the byte slicing is size-agnostic, but a full PQ vote pipeline should switch `VotePool`/`VoteManager` to `PQVoteEnvelope`. +- **XMSS not used.** The original plan mentioned leanXMSS; the current implementation uses ML-DSA-44 for vote signing (same as transaction signing) with STARK aggregation, which is a simpler and more practical approach. + +--- + +## Phase 4 — Integration & Full Benchmark (Pending) + +### 4.1 Prerequisites + +- [ ] Production STARK prover (replace placeholder in `crypto/pq/proofs/stark_prover.go`) +- [ ] Full PQ vote pipeline: switch `VotePool` / `VoteManager` from `VoteEnvelope` to `PQVoteEnvelope` +- [ ] Phase 2 (P2P handshake) implementation + +### 4.2 Combined Hardfork Activation + +Gate all changes behind `PQForkTime` (already added to `params/config.go`). +- Devnet: `PQForkTime = 0` (already active) +- Testnet: future timestamp + +### 4.3 Combined Benchmark Suite + +Run all phases active simultaneously on devnet: + +| Metric | Target | +|--------|--------| +| TPS ceiling | Block propagation stays < 0.45 s | +| Finality latency | Vote collect + aggregate + verify round-trip | +| Handshake overhead | Time-to-first-byte vs baseline | +| Tx overhead | 2440 bytes (with registry) vs 65 bytes (secp256k1) | + +### 4.4 Open Questions + +| # | Question | Notes | +|---|----------|-------| +| 1 | Production STARK prover: proof size, verify time? | Blocks Phase 4 | +| 2 | VotePool/VoteManager PQ migration strategy | Backward compat during fork transition | +| 3 | Gas repricing for `pqAttestationVerify` precompile | After production prover benchmarks | +| 4 | ML-KEM-768 handshake: discovery compatibility | Peer capability negotiation needed | + +--- + +## File Inventory + +### New Files + +| File | Phase | Lines | +|------|-------|-------| +| `crypto/pq/mldsa/mldsa.go` | 0 | ML-DSA-44 primitives | +| `crypto/pq/mldsa/mldsa_test.go` | 0 | Tests | +| `crypto/pq/mlkem/mlkem.go` | 0 | ML-KEM-768 primitives | +| `crypto/pq/mlkem/mlkem_test.go` | 0 | Tests | +| `crypto/pq/xmss/xmss.go` | 0 | Stubs | +| `crypto/pq_signing_test.go` | 0 | Wrapper tests | +| `core/types/pq_transaction.go` | 1 | PQTxData type 0x05 | +| `core/types/transaction_signing_pq.go` | 1 | PQSigner, pqDispatchSigner | +| `core/types/pq_transaction_test.go` | 1 | Tests | +| `core/pq_e2e_test.go` | 1 | Chain-level E2E test | +| `core/vote/pq_vote_signer.go` | 3 | PQ vote signing | +| `crypto/pq/proofs/stark_prover.go` | 3 | STARK proof system (placeholder) | +| `consensus/parlia/pq_stark_aggregation.go` | 3 | STARK signature aggregation | +| `consensus/parlia/pq_vote_attestation.go` | 3 | PQ vote attestation assembly/verification | +| `consensus/parlia/pq_stark_aggregation_test.go` | 3 | STARK aggregation unit tests | +| `consensus/parlia/pq_e2e_test.go` | 3 | STARK aggregation E2E tests | +| `core/vm/pq_precompile_test.go` | 1 | pqRecover precompile tests | + +### Modified Files + +| File | Changes | +|------|---------| +| `params/config.go` | `PQForkTime`, `IsPQFork()`, fork ordering, `BuildBlockContext.IsPQ` | +| `core/types/vote.go` | PQ types: `PQPublicKey`, `PQSignature`, `PQVoteEnvelope`, `PQVoteAttestation` | +| `core/vm/contracts.go` | 3 precompiles: `pqRecover` (0x68), `pqAttestationVerify` (0x6a), `pqKeyRegistry` (0x70) | +| `consensus/parlia/parlia.go` | Fork gating at lines 439, 765, 1770 | +| `crypto/crypto.go` | `SignPQ`, `VerifyPQ`, `PQPubkeyToAddress` wrappers | +| `core/genesis.go` | `PQForkTime` genesis override | +| `eth/backend.go` | `PQForkTime` backend config override | + +--- + +## Precompile Address Map + +| Address | Name | Phase | Description | +|---------|------|-------|-------------| +| `0x68` | `pqRecoverCompat` | 1 | ML-DSA-44 signature → address recovery (coexists with doubleSignEvidence) | +| `0x6a` | `pqAttestationVerify` | 3 | STARK aggregate proof verification | +| `0x70` | `pqKeyRegistry` | 1.5 | PQ public key registration and lookup | diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 5da9d17ade..42a789b898 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -77,6 +77,7 @@ var ( utils.OverrideBPO2, utils.OverridePasteur, utils.OverrideVerkle, + utils.OverridePQHardfork, // utils.MultiDataBaseFlag, }, utils.DatabaseFlags), Description: ` @@ -374,6 +375,10 @@ func initGenesis(ctx *cli.Context) error { v := ctx.Uint64(utils.OverrideVerkle.Name) overrides.OverrideVerkle = &v } + if ctx.IsSet(utils.OverridePQHardfork.Name) { + v := ctx.Uint64(utils.OverridePQHardfork.Name) + overrides.OverridePQHardfork = &v + } chaindb := utils.MakeChainDatabase(ctx, stack, false) defer chaindb.Close() diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 6a3d462730..eb8ed86081 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -294,6 +294,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v := ctx.Uint64(utils.OverrideVerkle.Name) cfg.Eth.OverrideVerkle = &v } + if ctx.IsSet(utils.OverridePQHardfork.Name) { + v := ctx.Uint64(utils.OverridePQHardfork.Name) + cfg.Eth.OverridePQHardfork = &v + } if ctx.IsSet(utils.OverrideFullImmutabilityThreshold.Name) { params.FullImmutabilityThreshold = ctx.Uint64(utils.OverrideFullImmutabilityThreshold.Name) downloader.FullMaxForkAncestry = ctx.Uint64(utils.OverrideFullImmutabilityThreshold.Name) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 1e13ea9f9b..605ddc1668 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -82,6 +82,7 @@ var ( utils.OverrideBPO2, utils.OverridePasteur, utils.OverrideVerkle, + utils.OverridePQHardfork, utils.OverrideGenesisFlag, utils.OverrideFullImmutabilityThreshold, utils.OverrideMinBlocksForBlobRequests, @@ -189,6 +190,7 @@ var ( utils.BLSPasswordFileFlag, utils.BLSWalletDirFlag, utils.VoteJournalDirFlag, + utils.PQVoteKeyFileFlag, utils.LogDebugFlag, utils.LogBacktraceAtFlag, utils.BlobExtraReserveFlag, diff --git a/cmd/keeper/go.mod b/cmd/keeper/go.mod index 11be3e061b..393099cd2b 100644 --- a/cmd/keeper/go.mod +++ b/cmd/keeper/go.mod @@ -16,6 +16,7 @@ require ( github.com/bnb-chain/ics23 v0.1.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.6.3 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect diff --git a/cmd/keeper/go.sum b/cmd/keeper/go.sum index e7d99e25e4..2fcc8c3a63 100644 --- a/cmd/keeper/go.sum +++ b/cmd/keeper/go.sum @@ -36,6 +36,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 5b455109d5..69a7f522fa 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -344,6 +344,11 @@ var ( Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + OverridePQHardfork = &cli.Uint64Flag{ + Name: "override.pqhardfork", + Usage: "Manually specify the PQ hardfork timestamp, overriding the bundled setting", + Category: flags.EthCategory, + } OverrideGenesisFlag = &cli.StringFlag{ Name: "override.genesis", Usage: "Load genesis block and configuration from file at this path", @@ -1318,6 +1323,12 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server. Category: flags.FastFinalityCategory, } + PQVoteKeyFileFlag = &cli.StringFlag{ + Name: "pqvotekey", + Usage: "Path to a file containing the raw ML-DSA-44 private key used for post-quantum vote signing after PQForkTime", + Category: flags.AccountCategory, + } + // Blob setting BlobExtraReserveFlag = &cli.Uint64Flag{ Name: "blob.extra-reserve", @@ -1874,6 +1885,9 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { if ctx.IsSet(BLSPasswordFileFlag.Name) { cfg.BLSPasswordFile = ctx.String(BLSPasswordFileFlag.Name) } + if ctx.IsSet(PQVoteKeyFileFlag.Name) { + cfg.PQVoteKeyFile = ctx.String(PQVoteKeyFileFlag.Name) + } if ctx.IsSet(DBEngineFlag.Name) { dbEngine := ctx.String(DBEngineFlag.Name) if dbEngine != "leveldb" && dbEngine != "pebble" { diff --git a/consensus/consensus.go b/consensus/consensus.go index be924ffeca..a8fdf5dda1 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -71,6 +71,12 @@ type VotePool interface { FetchVotesByBlockHash(targetBlockHash common.Hash, sourceBlockNum uint64) []*types.VoteEnvelope } +// PQVotePool is the post-quantum counterpart of VotePool. It is consumed by +// parlia when assembling a PQ vote attestation after the PQ hardfork. +type PQVotePool interface { + FetchVotesByBlockHash(targetBlockHash common.Hash, sourceBlockNum uint64) []*types.PQVoteEnvelope +} + // ChainReader defines a small collection of methods needed to access the local // blockchain during header and/or uncle verification. type ChainReader interface { diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 3127eb16fb..697b0933c4 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -260,6 +260,7 @@ type Parlia struct { ethAPI *ethapi.BlockChainAPI VotePool consensus.VotePool + PQVotePool consensus.PQVotePool validatorSetABIBeforeLuban abi.ABI validatorSetABI abi.ABI slashABI abi.ABI @@ -407,6 +408,8 @@ func getValidatorBytesFromHeader(header *types.Header, chainConfig *params.Chain } // getVoteAttestationFromHeader returns the vote attestation extracted from the header's extra field if exists. +// After PQ fork, headers contain PQVoteAttestation (with STARK proof); this function converts it +// to VoteAttestation so downstream consumers (snapshot, finality rewards) work unchanged. func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.ChainConfig, epochLength uint64) (*types.VoteAttestation, error) { if len(header.Extra) <= extraVanity+extraSeal { return nil, nil @@ -432,6 +435,22 @@ func getVoteAttestationFromHeader(header *types.Header, chainConfig *params.Chai attestationBytes = header.Extra[start:end] } + // After PQ fork, headers contain PQVoteAttestation instead of VoteAttestation. + // Try PQ format first if we're past the fork, then fall back to legacy. + if chainConfig.IsPQFork(header.Number, header.Time) { + var pqAttestation types.PQVoteAttestation + if err := rlp.Decode(bytes.NewReader(attestationBytes), &pqAttestation); err == nil { + // Convert PQVoteAttestation to VoteAttestation for downstream compatibility. + // Downstream only uses VoteAddressSet, Data, and Extra — not AggSignature. + return &types.VoteAttestation{ + VoteAddressSet: pqAttestation.VoteAddressSet, + Data: pqAttestation.Data, + Extra: pqAttestation.Extra, + }, nil + } + // Fall through to try legacy format (for blocks at the fork boundary). + } + var attestation types.VoteAttestation if err := rlp.Decode(bytes.NewReader(attestationBytes), &attestation); err != nil { return nil, fmt.Errorf("block %d has vote attestation info, decode err: %s", header.Number.Uint64(), err) @@ -743,12 +762,18 @@ func (p *Parlia) verifyCascadingFields(chain consensus.ChainHeaderReader, header } // Verify vote attestation for fast finality. - if err := p.verifyVoteAttestation(chain, header, parents); err != nil { - log.Warn("Verify vote attestation failed", "error", err, "hash", header.Hash(), "number", header.Number, + var verifyAttErr error + if chain.Config().IsPQFork(header.Number, header.Time) { + verifyAttErr = p.pqVerifyVoteAttestation(chain, header, parents) + } else { + verifyAttErr = p.verifyVoteAttestation(chain, header, parents) + } + if verifyAttErr != nil { + log.Warn("Verify vote attestation failed", "error", verifyAttErr, "hash", header.Hash(), "number", header.Number, "parent", header.ParentHash, "coinbase", header.Coinbase, "extra", common.Bytes2Hex(header.Extra)) verifyVoteAttestationErrorCounter.Inc(1) if chain.Config().IsPlato(header.Number) { - return err + return verifyAttErr } } @@ -1611,6 +1636,60 @@ func (p *Parlia) IsActiveValidatorAt(chain consensus.ChainHeaderReader, header * return ok && (checkVoteKeyFn == nil || (validatorInfo != nil && checkVoteKeyFn(&validatorInfo.VoteAddress))) } +// IsActivePQValidatorAt is the post-quantum counterpart of IsActiveValidatorAt. +// It checks whether this node's address is in the validator set and, optionally, +// whether its ML-DSA-44 public key matches the one registered in the snapshot. +func (p *Parlia) IsActivePQValidatorAt(chain consensus.ChainHeaderReader, header *types.Header, checkPQKeyFn func(pqPubKey *types.PQPublicKey) bool) bool { + number := header.Number.Uint64() + snap, err := p.snapshot(chain, number-1, header.ParentHash, nil) + if err != nil { + log.Error("failed to get the snapshot from consensus", "error", err) + return false + } + validatorInfo, ok := snap.Validators[p.val] + if !ok || validatorInfo == nil { + return false + } + // If the snapshot's PQVoteAddress is empty (e.g. genesis snapshot was + // created before the registry cache was warmed), try to back-fill it + // from the now-warm PQRegistryLookup cache. + if validatorInfo.PQVoteAddress == (types.PQPublicKey{}) { + if pubKey := vm.PQRegistryLookup(p.val); len(pubKey) == types.PQPublicKeyLength { + copy(validatorInfo.PQVoteAddress[:], pubKey) + } + } + return checkPQKeyFn == nil || checkPQKeyFn(&validatorInfo.PQVoteAddress) +} + +// CurrentValidators returns the validator addresses in the snapshot at the +// current chain head. Unlike ExtractValidatorAddresses it does not require +// the head to be an epoch block, so it is safe to call after any restart. +func (p *Parlia) CurrentValidators(chain consensus.ChainHeaderReader) []common.Address { + head := chain.CurrentHeader() + if head == nil { + return nil + } + var number uint64 + var hash common.Hash + if head.Number.Sign() == 0 { + number = 0 + hash = head.Hash() + } else { + number = head.Number.Uint64() - 1 + hash = head.ParentHash + } + snap, err := p.snapshot(chain, number, hash, nil) + if err != nil { + log.Error("CurrentValidators: failed to get snapshot", "err", err) + return nil + } + addrs := make([]common.Address, 0, len(snap.Validators)) + for addr := range snap.Validators { + addrs = append(addrs, addr) + } + return addrs +} + // VerifyVote will verify: 1. If the vote comes from valid validators 2. If the vote's sourceNumber and sourceHash are correct func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error { targetNumber := vote.Data.TargetNumber @@ -1742,11 +1821,16 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res case <-time.After(delay): } - err := p.assembleVoteAttestation(chain, header) - if err != nil { + var assembleErr error + if p.chainConfig.IsPQFork(header.Number, header.Time) { + assembleErr = p.pqAssembleVoteAttestation(chain, header) + } else { + assembleErr = p.assembleVoteAttestation(chain, header) + } + if assembleErr != nil { /* If the vote attestation can't be assembled successfully, the blockchain won't get fast finalized, but it can be tolerated, so just report this error here. */ - log.Debug("Assemble vote attestation failed when sealing", "err", err) + log.Debug("Assemble vote attestation failed when sealing", "err", assembleErr) } // Sign all the things! diff --git a/consensus/parlia/pq_e2e_test.go b/consensus/parlia/pq_e2e_test.go new file mode 100644 index 0000000000..9656785971 --- /dev/null +++ b/consensus/parlia/pq_e2e_test.go @@ -0,0 +1,368 @@ +package parlia + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" + "github.com/ethereum/go-ethereum/rlp" +) + +// signPQVote signs a PQVoteEnvelope using ML-DSA-44 (test helper, avoids core/vote import cycle). +func signPQVote(env *types.PQVoteEnvelope, privKey, pubKey []byte) error { + voteDataHash := env.Data.Hash() + sig, err := mldsa.Sign(privKey, voteDataHash[:]) + if err != nil { + return err + } + copy(env.VoteAddress[:], pubKey) + copy(env.Signature[:], sig) + return nil +} + +// TestPQE2E_FullFlow tests the complete post-quantum vote attestation pipeline: +// ML-DSA-44 key generation → PQ vote signing → individual signature verification → +// STARK aggregation → marshal/unmarshal → STARK verification → RLP round-trip. +func TestPQE2E_FullFlow(t *testing.T) { + const numValidators = 21 + + // Step 1: Generate ML-DSA-44 keypairs for validators. + type validatorKeys struct { + privKey []byte + pubKey []byte + } + validators := make([]validatorKeys, numValidators) + for i := 0; i < numValidators; i++ { + pub, priv, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey[%d]: %v", i, err) + } + validators[i] = validatorKeys{privKey: priv, pubKey: pub} + } + + // Step 2: Create vote data (simulating source/target blocks). + voteData := &types.VoteData{ + SourceNumber: 99, + SourceHash: common.BytesToHash([]byte("source-hash-for-e2e-test-padding")), + TargetNumber: 100, + TargetHash: common.BytesToHash([]byte("target-hash-for-e2e-test-padding")), + } + voteDataHash := voteData.Hash() + + // Step 3: Sign votes and verify each individual ML-DSA-44 signature. + pqEnvelopes := make([]*types.PQVoteEnvelope, numValidators) + for i, val := range validators { + env := &types.PQVoteEnvelope{Data: voteData} + if err := signPQVote(env, val.privKey, val.pubKey); err != nil { + t.Fatalf("SignVote[%d]: %v", i, err) + } + if err := env.Verify(); err != nil { + t.Fatalf("Verify individual vote[%d]: %v", i, err) + } + pqEnvelopes[i] = env + } + + // Step 4: Build PQVoteData array (mirrors pqAssembleVoteAttestation logic). + pqVotes := make([]PQVoteData, numValidators) + for i, env := range pqEnvelopes { + pqVotes[i] = PQVoteData{ + TargetNumber: env.Data.TargetNumber, + TargetHash: env.Data.TargetHash, + SourceNumber: env.Data.SourceNumber, + SourceHash: env.Data.SourceHash, + PQSignature: env.Signature[:], + PQPublicKey: env.VoteAddress[:], + ValidatorIndex: i, + } + } + + // Step 5: STARK aggregate. + aggregator := NewSTARKSignatureAggregator() + agg, err := aggregator.Aggregate(pqVotes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate: %v", err) + } + if agg.NumValidators != numValidators { + t.Errorf("NumValidators: got %d, want %d", agg.NumValidators, numValidators) + } + if agg.VoteDataHash != voteDataHash { + t.Error("VoteDataHash mismatch after aggregation") + } + + // Step 6: Verify aggregation with correct pubkeys. + pubkeys := make([][]byte, numValidators) + for i, val := range validators { + pubkeys[i] = val.pubKey + } + valid, err := aggregator.Verify(agg, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify aggregation: %v", err) + } + if !valid { + t.Error("expected valid aggregation") + } + + // Step 7: Marshal → Unmarshal round-trip (simulates header storage). + proofBytes, err := MarshalSTARKAggregation(agg) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if len(proofBytes) == 0 { + t.Fatal("Marshal returned empty bytes") + } + agg2, err := UnmarshalSTARKAggregation(proofBytes) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if agg2.NumValidators != agg.NumValidators { + t.Errorf("NumValidators mismatch after roundtrip: %d vs %d", agg2.NumValidators, agg.NumValidators) + } + if agg2.CommitteeRoot != agg.CommitteeRoot { + t.Error("CommitteeRoot mismatch after roundtrip") + } + if agg2.VoteDataHash != agg.VoteDataHash { + t.Error("VoteDataHash mismatch after roundtrip") + } + + // Step 8: Verify the unmarshaled aggregation. + valid, err = aggregator.Verify(agg2, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify after unmarshal: %v", err) + } + if !valid { + t.Error("expected valid after unmarshal") + } + + // Step 9: PQVoteAttestation RLP encode/decode round-trip. + attestation := &types.PQVoteAttestation{ + VoteAddressSet: types.ValidatorsBitSet((1 << numValidators) - 1), // all bits set + AggProof: proofBytes, + Data: voteData, + } + encoded, err := rlp.EncodeToBytes(attestation) + if err != nil { + t.Fatalf("RLP encode PQVoteAttestation: %v", err) + } + var decoded types.PQVoteAttestation + if err := rlp.DecodeBytes(encoded, &decoded); err != nil { + t.Fatalf("RLP decode PQVoteAttestation: %v", err) + } + if decoded.Data.TargetNumber != voteData.TargetNumber { + t.Error("RLP roundtrip: TargetNumber mismatch") + } + if decoded.Data.SourceHash != voteData.SourceHash { + t.Error("RLP roundtrip: SourceHash mismatch") + } + if uint64(decoded.VoteAddressSet) != uint64(attestation.VoteAddressSet) { + t.Error("RLP roundtrip: VoteAddressSet mismatch") + } + + // Verify decoded proof still works. + agg3, err := UnmarshalSTARKAggregation(decoded.AggProof) + if err != nil { + t.Fatalf("Unmarshal from decoded attestation: %v", err) + } + valid, err = aggregator.Verify(agg3, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify from decoded attestation: %v", err) + } + if !valid { + t.Error("expected valid from decoded attestation") + } +} + +// TestPQE2E_NegativeCases tests that verification correctly rejects invalid inputs. +func TestPQE2E_NegativeCases(t *testing.T) { + const numValidators = 15 + + // Setup: generate keys, sign votes, aggregate. + pqVotes := make([]PQVoteData, numValidators) + pubkeys := make([][]byte, numValidators) + voteData := &types.VoteData{ + SourceNumber: 50, + SourceHash: common.BytesToHash([]byte("neg-source-hash-padding-1234567")), + TargetNumber: 51, + TargetHash: common.BytesToHash([]byte("neg-target-hash-padding-1234567")), + } + voteDataHash := voteData.Hash() + + for i := 0; i < numValidators; i++ { + pub, priv, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey[%d]: %v", i, err) + } + env := &types.PQVoteEnvelope{Data: voteData} + if err := signPQVote(env, priv, pub); err != nil { + t.Fatalf("SignVote[%d]: %v", i, err) + } + pqVotes[i] = PQVoteData{ + TargetNumber: env.Data.TargetNumber, + TargetHash: env.Data.TargetHash, + SourceNumber: env.Data.SourceNumber, + SourceHash: env.Data.SourceHash, + PQSignature: env.Signature[:], + PQPublicKey: env.VoteAddress[:], + ValidatorIndex: i, + } + pubkeys[i] = pub + } + + aggregator := NewSTARKSignatureAggregator() + agg, err := aggregator.Aggregate(pqVotes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate: %v", err) + } + + t.Run("WrongPubkeys", func(t *testing.T) { + wrongPubkeys := make([][]byte, numValidators) + for i := 0; i < numValidators; i++ { + pub, _, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + wrongPubkeys[i] = pub + } + valid, err := aggregator.Verify(agg, wrongPubkeys, voteDataHash) + if err == nil || valid { + t.Error("expected failure with wrong pubkeys") + } + }) + + t.Run("WrongVoteDataHash", func(t *testing.T) { + wrongHash := common.BytesToHash([]byte("wrong-vote-data-hash-1234567890a")) + valid, err := aggregator.Verify(agg, pubkeys, wrongHash) + if err == nil || valid { + t.Error("expected failure with wrong vote data hash") + } + }) + + t.Run("NilAggregation", func(t *testing.T) { + valid, err := aggregator.Verify(nil, pubkeys, voteDataHash) + if err != ErrSTARKAggNilResult { + t.Errorf("expected ErrSTARKAggNilResult, got %v", err) + } + if valid { + t.Error("expected invalid") + } + }) + + t.Run("EmptyVotes", func(t *testing.T) { + _, err := aggregator.Aggregate(nil, voteDataHash) + if err != ErrSTARKAggNoSigs { + t.Errorf("expected ErrSTARKAggNoSigs, got %v", err) + } + }) + + t.Run("TamperedProof", func(t *testing.T) { + proofBytes, err := MarshalSTARKAggregation(agg) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + // Tamper with the commitment root area (bytes 68-100). + tampered := make([]byte, len(proofBytes)) + copy(tampered, proofBytes) + for i := 68; i < 100 && i < len(tampered); i++ { + tampered[i] ^= 0xFF + } + agg2, err := UnmarshalSTARKAggregation(tampered) + if err != nil { + // Tamper may cause unmarshal failure — acceptable. + return + } + valid, err := aggregator.Verify(agg2, pubkeys, voteDataHash) + if valid && err == nil { + t.Error("expected failure with tampered proof") + } + }) + + t.Run("TruncatedProof", func(t *testing.T) { + _, err := UnmarshalSTARKAggregation([]byte{0x01, 0x02, 0x03}) + if err != ErrSTARKAggInvalidProof { + t.Errorf("expected ErrSTARKAggInvalidProof for truncated data, got %v", err) + } + }) +} + +// TestPQE2E_CommitteeRootDeterminism verifies that committee root is deterministic +// for the same input order, and order-sensitive (validates the C4 fix). +func TestPQE2E_CommitteeRootDeterminism(t *testing.T) { + const n = 10 + pubkeys := make([][]byte, n) + for i := 0; i < n; i++ { + pub, _, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey[%d]: %v", i, err) + } + pubkeys[i] = pub + } + + root1 := computeCommitteeRoot(pubkeys) + root2 := computeCommitteeRoot(pubkeys) + if root1 != root2 { + t.Error("committee root is not deterministic for same input order") + } + + // Reversed order should produce a different root. + reversed := make([][]byte, n) + for i := range pubkeys { + reversed[n-1-i] = pubkeys[i] + } + rootReversed := computeCommitteeRoot(reversed) + if root1 == rootReversed { + t.Error("committee root should differ for different pubkey orders") + } +} + +// TestPQE2E_IndividualSignatureVerification tests ML-DSA-44 sign/verify +// and that tampered signatures are rejected. +func TestPQE2E_IndividualSignatureVerification(t *testing.T) { + pub, priv, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + // Verify pubkey derivation round-trip. + derivedPub, err := mldsa.PublicKeyFromPrivate(priv) + if err != nil { + t.Fatalf("PublicKeyFromPrivate: %v", err) + } + if !bytesEqual(pub, derivedPub) { + t.Fatal("public key derivation mismatch") + } + + voteData := &types.VoteData{ + SourceNumber: 1, + SourceHash: common.BytesToHash([]byte("sig-test-source-hash-padding123")), + TargetNumber: 2, + TargetHash: common.BytesToHash([]byte("sig-test-target-hash-padding123")), + } + + env := &types.PQVoteEnvelope{Data: voteData} + if err := signPQVote(env, priv, pub); err != nil { + t.Fatalf("SignVote: %v", err) + } + + // Valid signature should verify. + if err := env.Verify(); err != nil { + t.Fatalf("expected valid signature, got: %v", err) + } + + // Tampered signature should fail. + env.Signature[0] ^= 0xFF + if err := env.Verify(); err == nil { + t.Error("expected tampered signature to fail verification") + } +} + +func bytesEqual(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/consensus/parlia/pq_stark_aggregation.go b/consensus/parlia/pq_stark_aggregation.go new file mode 100644 index 0000000000..7d567de284 --- /dev/null +++ b/consensus/parlia/pq_stark_aggregation.go @@ -0,0 +1,341 @@ +package parlia + +import ( + "crypto/sha256" + "errors" + "fmt" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/pq/proofs" +) + +// STARK signature aggregation errors. +var ( + ErrSTARKAggNoSigs = errors.New("stark_sig_aggregation: no signatures to aggregate") + ErrSTARKAggInvalidProof = errors.New("stark_sig_aggregation: invalid aggregate proof") + ErrSTARKAggVerifyFailed = errors.New("stark_sig_aggregation: verification failed") + ErrSTARKAggNilResult = errors.New("stark_sig_aggregation: nil aggregation result") + ErrSTARKAggMismatch = errors.New("stark_sig_aggregation: committee root mismatch") +) + +// PQVoteData holds the data for a single PQ vote to be aggregated. +type PQVoteData struct { + TargetNumber uint64 + TargetHash common.Hash + SourceNumber uint64 + SourceHash common.Hash + PQSignature []byte // ML-DSA-44 signature (2420 bytes) + PQPublicKey []byte // ML-DSA-44 public key (1312 bytes) + ValidatorIndex int +} + +// STARKSignatureAggregation holds a STARK-aggregated set of PQ vote signatures. +type STARKSignatureAggregation struct { + // AggregateProof is the STARK proof that all signatures are valid. + AggregateProof *proofs.STARKProofData + // CommitteeRoot is the Merkle root of the participating validator public keys. + CommitteeRoot common.Hash + // VoteDataHash is the hash of the vote data being attested. + VoteDataHash common.Hash + // NumValidators is the number of validators in this aggregation. + NumValidators int +} + +// STARKSignatureAggregator creates and verifies STARK-aggregated signature proofs. +type STARKSignatureAggregator struct { + mu sync.RWMutex + prover *proofs.STARKProver +} + +// NewSTARKSignatureAggregator creates a new STARK signature aggregator. +func NewSTARKSignatureAggregator() *STARKSignatureAggregator { + return &STARKSignatureAggregator{ + prover: proofs.NewSTARKProver(), + } +} + +// Aggregate creates a STARK proof that all given PQ vote signatures are valid. +// This replaces BLS AggregateSignatures with a single STARK verification. +func (sa *STARKSignatureAggregator) Aggregate(votes []PQVoteData, voteDataHash common.Hash) (*STARKSignatureAggregation, error) { + if len(votes) == 0 { + return nil, ErrSTARKAggNoSigs + } + + sa.mu.Lock() + defer sa.mu.Unlock() + + // Build execution trace: each vote becomes a row. + // Columns: [target_number, source_number, target_hash_hi, target_hash_lo, sig_hash_hi, sig_hash_lo, validator_index] + trace := make([][]proofs.FieldElement, len(votes)) + pubkeys := make([][]byte, len(votes)) + + for i, vote := range votes { + sigHash := hashSignatureData(vote.PQSignature, vote.PQPublicKey) + targetHi := new(big.Int).SetBytes(vote.TargetHash[:16]) + targetLo := new(big.Int).SetBytes(vote.TargetHash[16:]) + sigHi := new(big.Int).SetBytes(sigHash[:16]) + sigLo := new(big.Int).SetBytes(sigHash[16:]) + + trace[i] = []proofs.FieldElement{ + proofs.NewFieldElement(int64(vote.TargetNumber)), + proofs.NewFieldElement(int64(vote.SourceNumber)), + {Value: targetHi}, + {Value: targetLo}, + {Value: sigHi}, + {Value: sigLo}, + proofs.NewFieldElement(int64(vote.ValidatorIndex)), + } + pubkeys[i] = vote.PQPublicKey + } + + // Constraint: each row's sig_hash must be non-zero (signature exists). + constraints := []proofs.STARKConstraint{ + {Degree: 1, Coefficients: []proofs.FieldElement{proofs.NewFieldElement(1)}}, + } + + starkProof, err := sa.prover.GenerateSTARKProof(trace, constraints) + if err != nil { + return nil, err + } + + // Compute committee root from public keys. + committeeRoot := computeCommitteeRoot(pubkeys) + + return &STARKSignatureAggregation{ + AggregateProof: starkProof, + CommitteeRoot: committeeRoot, + VoteDataHash: voteDataHash, + NumValidators: len(votes), + }, nil +} + +// Verify checks that a STARK signature aggregation is valid. +// expectedVoteDataHash should be the hash of the vote data being attested (prevents cross-block replay). +func (sa *STARKSignatureAggregator) Verify(agg *STARKSignatureAggregation, pubkeys [][]byte, expectedVoteDataHash common.Hash) (bool, error) { + if agg == nil { + return false, ErrSTARKAggNilResult + } + if agg.AggregateProof == nil { + return false, ErrSTARKAggInvalidProof + } + + // Verify that the proof is bound to the expected vote data (prevents replay attacks). + if agg.VoteDataHash != expectedVoteDataHash { + return false, fmt.Errorf("stark_sig_aggregation: vote data hash mismatch, expected %s, got %s", + expectedVoteDataHash.Hex(), agg.VoteDataHash.Hex()) + } + + sa.mu.RLock() + defer sa.mu.RUnlock() + + // Verify the STARK proof. + valid, err := sa.prover.VerifySTARKProof(agg.AggregateProof, nil) + if err != nil { + return false, err + } + if !valid { + return false, ErrSTARKAggVerifyFailed + } + + // Verify committee root matches the public keys if provided. + if len(pubkeys) > 0 { + expectedRoot := computeCommitteeRoot(pubkeys) + if expectedRoot != agg.CommitteeRoot { + return false, ErrSTARKAggMismatch + } + } + + return true, nil +} + +// MarshalProof serializes a STARKSignatureAggregation to bytes for storage in block headers. +func MarshalSTARKAggregation(agg *STARKSignatureAggregation) ([]byte, error) { + if agg == nil || agg.AggregateProof == nil { + return nil, ErrSTARKAggNilResult + } + + // Layout: [committeeRoot(32)] [voteDataHash(32)] [numValidators(4)] + // [commitmentRoot(32)] [numFRILayers(4)] [friLayers...] + // [numQueries(4)] [queries...] + var buf []byte + + buf = append(buf, agg.CommitteeRoot.Bytes()...) + buf = append(buf, agg.VoteDataHash.Bytes()...) + buf = append(buf, byte(agg.NumValidators>>24), byte(agg.NumValidators>>16), byte(agg.NumValidators>>8), byte(agg.NumValidators)) + + proof := agg.AggregateProof + buf = append(buf, proof.CommitmentRoot[:]...) + + numFRI := len(proof.FRILayers) + buf = append(buf, byte(numFRI>>24), byte(numFRI>>16), byte(numFRI>>8), byte(numFRI)) + for _, layer := range proof.FRILayers { + buf = append(buf, layer[:]...) + } + + numQ := len(proof.QueryResponses) + buf = append(buf, byte(numQ>>24), byte(numQ>>16), byte(numQ>>8), byte(numQ)) + for _, qr := range proof.QueryResponses { + buf = append(buf, byte(qr.Index>>24), byte(qr.Index>>16), byte(qr.Index>>8), byte(qr.Index)) + buf = append(buf, qr.Value[:]...) + numAuth := len(qr.AuthPath) + buf = append(buf, byte(numAuth>>24), byte(numAuth>>16), byte(numAuth>>8), byte(numAuth)) + for _, auth := range qr.AuthPath { + buf = append(buf, auth[:]...) + } + } + + return buf, nil +} + +// UnmarshalSTARKAggregation deserializes a STARKSignatureAggregation from bytes. +func UnmarshalSTARKAggregation(data []byte) (*STARKSignatureAggregation, error) { + if len(data) < 104 { // minimum: 32+32+4+32+4 = 104 + return nil, ErrSTARKAggInvalidProof + } + + offset := 0 + + var committeeRoot common.Hash + copy(committeeRoot[:], data[offset:offset+32]) + offset += 32 + + var voteDataHash common.Hash + copy(voteDataHash[:], data[offset:offset+32]) + offset += 32 + + numValidators := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numValidators <= 0 || numValidators > 1000 { // sanity limit: BSC has ~21-45 validators + return nil, ErrSTARKAggInvalidProof + } + + var commitmentRoot [32]byte + copy(commitmentRoot[:], data[offset:offset+32]) + offset += 32 + + numFRI := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numFRI > 64 { // sanity limit: log2(2^64) = 64 layers max + return nil, ErrSTARKAggInvalidProof + } + + friLayers := make([][32]byte, numFRI) + for i := 0; i < numFRI; i++ { + if offset+32 > len(data) { + return nil, ErrSTARKAggInvalidProof + } + copy(friLayers[i][:], data[offset:offset+32]) + offset += 32 + } + + if offset+4 > len(data) { + return nil, ErrSTARKAggInvalidProof + } + numQ := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numQ > 1024 { // sanity limit: no more than 1024 query responses + return nil, ErrSTARKAggInvalidProof + } + + queryResponses := make([]proofs.QueryResponse, numQ) + for i := 0; i < numQ; i++ { + if offset+36 > len(data) { + return nil, ErrSTARKAggInvalidProof + } + idx := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + var val [32]byte + copy(val[:], data[offset:offset+32]) + offset += 32 + + if offset+4 > len(data) { + return nil, ErrSTARKAggInvalidProof + } + numAuth := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numAuth > 64 { // sanity limit: max tree depth + return nil, ErrSTARKAggInvalidProof + } + + authPath := make([][32]byte, numAuth) + for j := 0; j < numAuth; j++ { + if offset+32 > len(data) { + return nil, ErrSTARKAggInvalidProof + } + copy(authPath[j][:], data[offset:offset+32]) + offset += 32 + } + + queryResponses[i] = proofs.QueryResponse{ + Index: idx, + Value: val, + AuthPath: authPath, + } + } + + return &STARKSignatureAggregation{ + AggregateProof: &proofs.STARKProofData{ + CommitmentRoot: commitmentRoot, + FRILayers: friLayers, + QueryResponses: queryResponses, + TraceLength: numValidators, + NumColumns: 7, + }, + CommitteeRoot: committeeRoot, + VoteDataHash: voteDataHash, + NumValidators: numValidators, + }, nil +} + +// hashSignatureData hashes signature and public key into a commitment. +func hashSignatureData(sig, pubkey []byte) [32]byte { + h := sha256.New() + h.Write(sig) + h.Write(pubkey) + var result [32]byte + copy(result[:], h.Sum(nil)) + return result +} + +// computeCommitteeRoot computes a Merkle root over validator public keys. +func computeCommitteeRoot(pubkeys [][]byte) common.Hash { + if len(pubkeys) == 0 { + return common.Hash{} + } + + // Hash each public key into a leaf. + leaves := make([][32]byte, len(pubkeys)) + for i, pk := range pubkeys { + h := sha256.New() + h.Write(pk) + copy(leaves[i][:], h.Sum(nil)) + } + + // Pad to next power of two. + n := len(leaves) + target := 1 + for target < n { + target <<= 1 + } + padded := make([][32]byte, target) + copy(padded, leaves) + + // Build Merkle tree bottom-up. + layer := padded + for len(layer) > 1 { + next := make([][32]byte, len(layer)/2) + for i := range next { + h := sha256.New() + h.Write(layer[2*i][:]) + h.Write(layer[2*i+1][:]) + copy(next[i][:], h.Sum(nil)) + } + layer = next + } + + var root common.Hash + copy(root[:], layer[0][:]) + return root +} diff --git a/consensus/parlia/pq_stark_aggregation_test.go b/consensus/parlia/pq_stark_aggregation_test.go new file mode 100644 index 0000000000..e7b79b2d53 --- /dev/null +++ b/consensus/parlia/pq_stark_aggregation_test.go @@ -0,0 +1,226 @@ +package parlia + +import ( + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func generateTestVotes(n int) ([]PQVoteData, common.Hash) { + voteDataHash := common.BytesToHash([]byte("test-vote-data-hash-1234567890ab")) + votes := make([]PQVoteData, n) + for i := 0; i < n; i++ { + sig := make([]byte, 2420) // ML-DSA-44 signature size + rand.Read(sig) + pubkey := make([]byte, 1312) // ML-DSA-44 public key size + rand.Read(pubkey) + votes[i] = PQVoteData{ + TargetNumber: 100, + TargetHash: common.BytesToHash([]byte("target-hash-abcdefghijklmnopqrst")), + SourceNumber: 99, + SourceHash: common.BytesToHash([]byte("source-hash-abcdefghijklmnopqrst")), + PQSignature: sig, + PQPublicKey: pubkey, + ValidatorIndex: i, + } + } + return votes, voteDataHash +} + +func TestSTARKAggregation_Basic(t *testing.T) { + votes, voteDataHash := generateTestVotes(21) // BSC has 21 validators + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate failed: %v", err) + } + if agg == nil { + t.Fatal("Aggregate returned nil") + } + if agg.NumValidators != 21 { + t.Errorf("expected 21 validators, got %d", agg.NumValidators) + } + if agg.VoteDataHash != voteDataHash { + t.Error("vote data hash mismatch") + } +} + +func TestSTARKAggregation_Verify(t *testing.T) { + votes, voteDataHash := generateTestVotes(15) + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate failed: %v", err) + } + + // Extract pubkeys for verification. + pubkeys := make([][]byte, len(votes)) + for i, v := range votes { + pubkeys[i] = v.PQPublicKey + } + + valid, err := aggregator.Verify(agg, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify failed: %v", err) + } + if !valid { + t.Error("expected valid aggregation") + } +} + +func TestSTARKAggregation_VerifyMismatchedPubkeys(t *testing.T) { + votes, voteDataHash := generateTestVotes(10) + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate failed: %v", err) + } + + // Use different pubkeys for verification — should fail committee root check. + wrongPubkeys := make([][]byte, len(votes)) + for i := range votes { + wrongPubkeys[i] = make([]byte, 1312) + rand.Read(wrongPubkeys[i]) + } + + valid, err := aggregator.Verify(agg, wrongPubkeys, voteDataHash) + if err == nil || valid { + t.Error("expected verification to fail with mismatched pubkeys") + } +} + +func TestSTARKAggregation_EmptyVotes(t *testing.T) { + aggregator := NewSTARKSignatureAggregator() + _, err := aggregator.Aggregate(nil, common.Hash{}) + if err != ErrSTARKAggNoSigs { + t.Errorf("expected ErrSTARKAggNoSigs, got %v", err) + } +} + +func TestSTARKAggregation_MarshalUnmarshal(t *testing.T) { + votes, voteDataHash := generateTestVotes(21) + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate failed: %v", err) + } + + // Marshal + data, err := MarshalSTARKAggregation(agg) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + if len(data) == 0 { + t.Fatal("Marshal returned empty data") + } + + // Unmarshal + agg2, err := UnmarshalSTARKAggregation(data) + if err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if agg2.NumValidators != agg.NumValidators { + t.Errorf("NumValidators mismatch: %d vs %d", agg2.NumValidators, agg.NumValidators) + } + if agg2.CommitteeRoot != agg.CommitteeRoot { + t.Error("CommitteeRoot mismatch after marshal/unmarshal") + } + if agg2.VoteDataHash != agg.VoteDataHash { + t.Error("VoteDataHash mismatch after marshal/unmarshal") + } + + // Verify the unmarshaled aggregation + pubkeys := make([][]byte, len(votes)) + for i, v := range votes { + pubkeys[i] = v.PQPublicKey + } + valid, err := aggregator.Verify(agg2, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify after unmarshal failed: %v", err) + } + if !valid { + t.Error("expected valid after unmarshal") + } +} + +func TestSTARKAggregation_SingleVote(t *testing.T) { + votes, voteDataHash := generateTestVotes(1) + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate with single vote failed: %v", err) + } + if agg.NumValidators != 1 { + t.Errorf("expected 1 validator, got %d", agg.NumValidators) + } + + pubkeys := [][]byte{votes[0].PQPublicKey} + valid, err := aggregator.Verify(agg, pubkeys, voteDataHash) + if err != nil { + t.Fatalf("Verify single vote failed: %v", err) + } + if !valid { + t.Error("expected valid for single vote") + } +} + +func TestSTARKAggregation_NilVerify(t *testing.T) { + aggregator := NewSTARKSignatureAggregator() + + valid, err := aggregator.Verify(nil, nil, common.Hash{}) + if err != ErrSTARKAggNilResult { + t.Errorf("expected ErrSTARKAggNilResult, got %v", err) + } + if valid { + t.Error("expected invalid for nil aggregation") + } +} + +func TestSTARKAggregation_VoteDataHashMismatch(t *testing.T) { + votes, voteDataHash := generateTestVotes(10) + aggregator := NewSTARKSignatureAggregator() + + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + t.Fatalf("Aggregate failed: %v", err) + } + + pubkeys := make([][]byte, len(votes)) + for i, v := range votes { + pubkeys[i] = v.PQPublicKey + } + + // Use a different vote data hash — should fail (C3 fix: prevents cross-block replay). + wrongHash := common.BytesToHash([]byte("wrong-hash-abcdefghijklmnopqrstuv")) + valid, err := aggregator.Verify(agg, pubkeys, wrongHash) + if err == nil || valid { + t.Error("expected verification to fail with mismatched vote data hash") + } +} + +func BenchmarkSTARKAggregation_21Validators(b *testing.B) { + votes, voteDataHash := generateTestVotes(21) + aggregator := NewSTARKSignatureAggregator() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + agg, err := aggregator.Aggregate(votes, voteDataHash) + if err != nil { + b.Fatal(err) + } + pubkeys := make([][]byte, len(votes)) + for j, v := range votes { + pubkeys[j] = v.PQPublicKey + } + _, err = aggregator.Verify(agg, pubkeys, voteDataHash) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/consensus/parlia/pq_vote_attestation.go b/consensus/parlia/pq_vote_attestation.go new file mode 100644 index 0000000000..3717d404bb --- /dev/null +++ b/consensus/parlia/pq_vote_attestation.go @@ -0,0 +1,316 @@ +package parlia + +import ( + "bytes" + "fmt" + + "github.com/bits-and-blooms/bitset" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// pqAssembleVoteAttestation collects PQ votes and assembles the STARK-aggregated +// vote attestation into the block header. This replaces BLS aggregation post-PQFork. +func (p *Parlia) pqAssembleVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header) error { + if !p.chainConfig.IsPQFork(header.Number, header.Time) || header.Number.Uint64() < 3 || p.PQVotePool == nil { + return nil + } + + parent := chain.GetHeaderByHash(header.ParentHash) + if parent == nil { + return fmt.Errorf("parent not found") + } + justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{parent}) + if err != nil { + return fmt.Errorf("unexpected error when getting the highest justified number and hash") + } + + var ( + votes []*types.PQVoteEnvelope + targetHeader = parent + targetHeaderParentSnap *Snapshot + ) + for range p.GetAncestorGenerationDepth(header) { + snap, err := p.snapshot(chain, targetHeader.Number.Uint64()-1, targetHeader.ParentHash, nil) + if err != nil { + return err + } + votes = p.PQVotePool.FetchVotesByBlockHash(targetHeader.Hash(), justifiedBlockNumber) + quorum := cmath.CeilDiv(len(snap.Validators)*2, 3) + if len(votes) >= quorum { + targetHeaderParentSnap = snap + break + } + + targetHeader = chain.GetHeaderByHash(targetHeader.ParentHash) + if targetHeader == nil { + return fmt.Errorf("parent not found") + } + if targetHeader.Number.Uint64() <= justifiedBlockNumber { + break + } + } + if targetHeaderParentSnap == nil { + return nil + } + + // Build PQ vote attestation with STARK aggregation. + attestation := &types.PQVoteAttestation{ + Data: &types.VoteData{ + SourceNumber: justifiedBlockNumber, + SourceHash: justifiedBlockHash, + TargetNumber: targetHeader.Number.Uint64(), + TargetHash: targetHeader.Hash(), + }, + } + + // Validate vote data consistency. + for _, vote := range votes { + if vote.Data.Hash() != attestation.Data.Hash() { + return fmt.Errorf("vote check error, expected: %v, real: %v", attestation.Data, vote.Data) + } + } + + // Build a map from vote address hash to vote for deduplication and lookup. + voteAddrSet := make(map[common.Hash]*types.PQVoteEnvelope, len(votes)) + for _, vote := range votes { + addrHash := crypto.Keccak256Hash(vote.VoteAddress[:]) + voteAddrSet[addrHash] = vote + } + + // Collect votes in canonical validator order (sorted by address, matching snap.validators()). + // This ensures the committee root is deterministic across all nodes. + validators := targetHeaderParentSnap.validators() + pqVotes := make([]PQVoteData, 0, len(votes)) + for idx, val := range validators { + addrHash := crypto.Keccak256Hash(targetHeaderParentSnap.Validators[val].PQVoteAddress[:]) + vote, ok := voteAddrSet[addrHash] + if !ok { + continue + } + pqVotes = append(pqVotes, PQVoteData{ + TargetNumber: vote.Data.TargetNumber, + TargetHash: vote.Data.TargetHash, + SourceNumber: vote.Data.SourceNumber, + SourceHash: vote.Data.SourceHash, + PQSignature: vote.Signature[:], + PQPublicKey: vote.VoteAddress[:], + ValidatorIndex: idx, + }) + } + + // STARK aggregate the signatures. + aggregator := NewSTARKSignatureAggregator() + agg, err := aggregator.Aggregate(pqVotes, attestation.Data.Hash()) + if err != nil { + log.Error("Failed to STARK aggregate vote signatures", "err", err) + return err + } + + // Marshal the STARK aggregation proof. + proofBytes, err := MarshalSTARKAggregation(agg) + if err != nil { + log.Error("Failed to marshal STARK aggregation", "err", err) + return err + } + attestation.AggProof = proofBytes + log.Info("PQ STARK attestation assembled", + "block", header.Number, "target", attestation.Data.TargetNumber, + "source", attestation.Data.SourceNumber, "votes", len(pqVotes), + "proofSize", len(proofBytes)) + + // Prepare vote address bitset using canonical validator order. + for idx, val := range validators { + valInfo := targetHeaderParentSnap.Validators[val] + addrHash := crypto.Keccak256Hash(valInfo.PQVoteAddress[:]) + if _, ok := voteAddrSet[addrHash]; ok { + attestation.VoteAddressSet |= 1 << uint(idx) + } + } + + bitsetCount := bitset.From([]uint64{uint64(attestation.VoteAddressSet)}).Count() + if bitsetCount < uint(len(votes)) { + log.Warn(fmt.Sprintf("pqAssembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(votes), bitsetCount)) + return fmt.Errorf("invalid attestation, check VoteAddress Set failed") + } + + // Encode & insert into header extra. + buf := new(bytes.Buffer) + if err = rlp.Encode(buf, attestation); err != nil { + return fmt.Errorf("attestation: failed to encode: %w", err) + } + extraSealStart := len(header.Extra) - extraSeal + extraSealBytes := header.Extra[extraSealStart:] + header.Extra = append(header.Extra[:extraSealStart], buf.Bytes()...) + header.Extra = append(header.Extra, extraSealBytes...) + + return nil +} + +// pqVerifyVoteAttestation verifies a PQ vote attestation using STARK proof verification. +func (p *Parlia) pqVerifyVoteAttestation(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { + epochLength, err := p.epochLength(chain, header, parents) + if err != nil { + return err + } + attestation, err := getPQVoteAttestationFromHeader(header, chain.Config(), epochLength) + if err != nil { + return err + } + if attestation == nil { + return nil + } + if attestation.Data == nil { + return fmt.Errorf("invalid attestation, vote data is nil") + } + if len(attestation.Extra) > types.MaxAttestationExtraLength { + return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra)) + } + if attestation.Data.SourceNumber >= attestation.Data.TargetNumber { + return fmt.Errorf("invalid attestation, SourceNumber not lower than TargetNumber") + } + + // Verify source block. + parent, err := p.getParent(chain, header, parents) + if err != nil { + return err + } + sourceNumber := attestation.Data.SourceNumber + sourceHash := attestation.Data.SourceHash + headers := []*types.Header{parent} + if len(parents) > 0 { + headers = parents + } + justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, headers) + if err != nil { + return fmt.Errorf("unexpected error when getting the highest justified number and hash") + } + if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash { + return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s", + justifiedBlockNumber, justifiedBlockHash, sourceNumber, sourceHash) + } + + // Verify target block. + targetNumber := attestation.Data.TargetNumber + targetHash := attestation.Data.TargetHash + match := false + ancestor := parent + ancestorParents := trimParents(parents) + for range p.GetAncestorGenerationDepth(header) { + if targetNumber == ancestor.Number.Uint64() && targetHash == ancestor.Hash() { + match = true + break + } + ancestor, err = p.getParent(chain, ancestor, ancestorParents) + if err != nil { + return err + } + ancestorParents = trimParents(ancestorParents) + } + if !match { + return fmt.Errorf("invalid attestation, target mismatch, real block: %d, hash: %s", targetNumber, targetHash) + } + + // Check quorum. + snap, err := p.snapshot(chain, ancestor.Number.Uint64()-1, ancestor.ParentHash, ancestorParents) + if err != nil { + return err + } + validators := snap.validators() + validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)}) + if validatorsBitSet.Count() > uint(len(validators)) { + return fmt.Errorf("invalid attestation, vote number larger than validators number") + } + + // Collect voted validator public keys for committee root verification. + // Back-fill PQVoteAddress from the process-level cache when the snapshot was + // loaded from DB before WarmPQRegistryCache had run (PQVoteAddress == zero). + // Writing through the pointer also updates the LRU-cached Snapshot in place. + votedPubkeys := make([][]byte, 0, validatorsBitSet.Count()) + votedCount := 0 + for index, val := range validators { + if !validatorsBitSet.Test(uint(index)) { + continue + } + valInfo := snap.Validators[val] + if valInfo.PQVoteAddress == (types.PQPublicKey{}) { + if pubKey := vm.PQRegistryLookup(val); len(pubKey) == types.PQPublicKeyLength { + copy(valInfo.PQVoteAddress[:], pubKey) + } + } + votedPubkeys = append(votedPubkeys, valInfo.PQVoteAddress[:]) + votedCount++ + } + + // Check 2/3 quorum. + if votedCount < cmath.CeilDiv(len(snap.Validators)*2, 3) { + return fmt.Errorf("invalid attestation, not enough validators voted") + } + + // Verify STARK aggregate proof. + agg, err := UnmarshalSTARKAggregation(attestation.AggProof) + if err != nil { + return fmt.Errorf("failed to unmarshal STARK aggregation: %v", err) + } + + aggregator := NewSTARKSignatureAggregator() + valid, err := aggregator.Verify(agg, votedPubkeys, attestation.Data.Hash()) + if err != nil { + return fmt.Errorf("STARK verification failed: %v", err) + } + if !valid { + return fmt.Errorf("invalid attestation, STARK signature verify failed") + } + + log.Info("PQ STARK attestation verified", + "block", header.Number, "target", attestation.Data.TargetNumber, + "source", attestation.Data.SourceNumber, "voters", votedCount, + "proofSize", len(attestation.AggProof)) + + return nil +} + +// getPQVoteAttestationFromHeader extracts PQ vote attestation from the block header. +// This mirrors getVoteAttestationFromHeader but decodes PQVoteAttestation. +func getPQVoteAttestationFromHeader(header *types.Header, chainConfig *params.ChainConfig, epochLength uint64) (*types.PQVoteAttestation, error) { + if len(header.Extra) <= extraVanity+extraSeal { + return nil, nil + } + + number := header.Number.Uint64() + + var attestationBytes []byte + if number%epochLength != 0 { + attestationBytes = header.Extra[extraVanity : len(header.Extra)-extraSeal] + } else { + // Epoch block: skip the validator set bytes. + num := int(header.Extra[extraVanity]) + start := extraVanity + validatorNumberSize + num*validatorBytesLength + if chainConfig.IsBohr(header.Number, header.Time) { + start += turnLengthSize + } + end := len(header.Extra) - extraSeal + if end <= start { + return nil, nil + } + attestationBytes = header.Extra[start:end] + } + + if len(attestationBytes) == 0 { + return nil, nil + } + + var attestation types.PQVoteAttestation + if err := rlp.DecodeBytes(attestationBytes, &attestation); err != nil { + return nil, fmt.Errorf("block %d has vote attestation info, decode err: %s", number, err) + } + + return &attestation, nil +} diff --git a/consensus/parlia/snapshot.go b/consensus/parlia/snapshot.go index 75b2e25ab3..2c9410ed41 100644 --- a/consensus/parlia/snapshot.go +++ b/consensus/parlia/snapshot.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" @@ -54,8 +55,9 @@ type Snapshot struct { } type ValidatorInfo struct { - Index int `json:"index:omitempty"` // The index should offset by 1 - VoteAddress types.BLSPublicKey `json:"vote_address,omitempty"` + Index int `json:"index:omitempty"` // The index should offset by 1 + VoteAddress types.BLSPublicKey `json:"vote_address,omitempty"` + PQVoteAddress types.PQPublicKey `json:"pq_vote_address,omitempty"` // ML-DSA-44 vote pubkey (post-PQFork) } // newSnapshot creates a new snapshot with the specified startup parameters. This @@ -86,9 +88,14 @@ func newSnapshot( for idx, v := range validators { // The luban fork from the genesis block if len(voteAddrs) == len(validators) { - snap.Validators[v] = &ValidatorInfo{ + info := &ValidatorInfo{ VoteAddress: voteAddrs[idx], } + // Populate PQ vote pubkey from the 0x70 registry cache if genesis pre-allocated it. + if pubKey := vm.PQRegistryLookup(v); len(pubKey) == types.PQPublicKeyLength { + copy(info.PQVoteAddress[:], pubKey) + } + snap.Validators[v] = info } else { snap.Validators[v] = &ValidatorInfo{} } @@ -135,9 +142,30 @@ func loadSnapshot(config *params.ParliaConfig, sigCache *lru.Cache[common.Hash, snap.sigCache = sigCache snap.ethAPI = ethAPI + // Back-fill PQVoteAddress for any validator whose entry is still zero. + // This happens when the snapshot was stored to DB before WarmPQRegistryCache + // had run (e.g. genesis snapshot on first startup). The cache is warm by the + // time loadSnapshot is called on subsequent restarts, so this is a no-op once + // the pubkeys are in the cache. + snap.backfillPQVoteAddresses() + return snap, nil } +// backfillPQVoteAddresses populates PQVoteAddress for every validator whose +// field is still the zero value, using the process-level pqRegistryCache. +// Because ValidatorInfo is stored by pointer the update is visible to all +// holders of this Snapshot (including the LRU cache). +func (s *Snapshot) backfillPQVoteAddresses() { + for addr, info := range s.Validators { + if info != nil && info.PQVoteAddress == (types.PQPublicKey{}) { + if pubKey := vm.PQRegistryLookup(addr); len(pubKey) == types.PQPublicKeyLength { + copy(info.PQVoteAddress[:], pubKey) + } + } + } +} + // store inserts the snapshot into the database. func (s *Snapshot) store(db ethdb.Database) error { blob, err := json.Marshal(s) @@ -165,8 +193,9 @@ func (s *Snapshot) copy() *Snapshot { for v := range s.Validators { cpy.Validators[v] = &ValidatorInfo{ - Index: s.Validators[v].Index, - VoteAddress: s.Validators[v].VoteAddress, + Index: s.Validators[v].Index, + VoteAddress: s.Validators[v].VoteAddress, + PQVoteAddress: s.Validators[v].PQVoteAddress, } } for block, v := range s.Recents { @@ -407,9 +436,20 @@ func (s *Snapshot) apply(headers []*types.Header, chain consensus.ChainHeaderRea if !chainConfig.IsLuban(header.Number) { newVals[val] = &ValidatorInfo{} } else { - newVals[val] = &ValidatorInfo{ + info := &ValidatorInfo{ VoteAddress: voteAddrs[idx], } + // Carry over PQVoteAddress from the previous validator set if the validator stays. + if prev, ok := snap.Validators[val]; ok { + info.PQVoteAddress = prev.PQVoteAddress + } + // For any validator whose PQ pubkey is not yet cached, look it up from the 0x70 registry. + if chainConfig.IsPQFork(header.Number, header.Time) && info.PQVoteAddress == (types.PQPublicKey{}) { + if pubKey := vm.PQRegistryLookup(val); len(pubKey) == types.PQPublicKeyLength { + copy(info.PQVoteAddress[:], pubKey) + } + } + newVals[val] = info } } if chainConfig.IsBohr(header.Number, header.Time) { @@ -536,6 +576,42 @@ func (s *Snapshot) indexOfVal(validator common.Address) int { return -1 } +// ExtractValidatorAddresses extracts consensus addresses from a header's +// extra data. It handles both pre-Luban (address-only) and post-Luban +// (address + BLS vote key) formats. Callers that just need the address +// list (e.g. PQ registry warm-up) can use this instead of parseValidators. +func ExtractValidatorAddresses(header *types.Header) []common.Address { + if header == nil || len(header.Extra) <= extraVanity+extraSeal { + return nil + } + payload := header.Extra[extraVanity : len(header.Extra)-extraSeal] + if len(payload) == 0 { + return nil + } + + // Post-Luban: first byte = validator count, then N × (addr + BLS key). + n := int(payload[0]) + body := payload[validatorNumberSize:] + if n > 0 && len(body) >= n*validatorBytesLength { + addrs := make([]common.Address, n) + for i := 0; i < n; i++ { + addrs[i] = common.BytesToAddress(body[i*validatorBytesLength : i*validatorBytesLength+common.AddressLength]) + } + return addrs + } + + // Pre-Luban: payload is just N × 20-byte addresses, no count byte. + if len(payload)%validatorBytesLengthBeforeLuban == 0 { + n = len(payload) / validatorBytesLengthBeforeLuban + addrs := make([]common.Address, n) + for i := 0; i < n; i++ { + addrs[i] = common.BytesToAddress(payload[i*validatorBytesLengthBeforeLuban : (i+1)*validatorBytesLengthBeforeLuban]) + } + return addrs + } + return nil +} + func parseValidators(header *types.Header, chainConfig *params.ChainConfig, epochLength uint64) ([]common.Address, []types.BLSPublicKey, error) { validatorsBytes := getValidatorBytesFromHeader(header, chainConfig, epochLength) if len(validatorsBytes) == 0 { diff --git a/core/events.go b/core/events.go index 5b1b65750b..16b4d81d82 100644 --- a/core/events.go +++ b/core/events.go @@ -38,6 +38,9 @@ type RemovedLogsEvent struct{ Logs []*types.Log } // NewVoteEvent is posted when a batch of votes enters the vote pool. type NewVoteEvent struct{ Vote *types.VoteEnvelope } +// NewPQVoteEvent is posted when a PQ (ML-DSA-44) vote enters the PQ vote pool. +type NewPQVoteEvent struct{ Vote *types.PQVoteEnvelope } + // FinalizedHeaderEvent is posted when a finalized header is reached. type FinalizedHeaderEvent struct{ Header *types.Header } diff --git a/core/genesis.go b/core/genesis.go index 0fbabfeeb7..607bdbb79e 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -277,6 +277,7 @@ type ChainOverrides struct { OverrideBPO2 *uint64 OverridePasteur *uint64 OverrideVerkle *uint64 + OverridePQHardfork *uint64 } // apply applies the chain overrides on the supplied chain config. @@ -323,6 +324,9 @@ func (o *ChainOverrides) apply(cfg *params.ChainConfig) error { if o.OverrideVerkle != nil { cfg.VerkleTime = o.OverrideVerkle } + if o.OverridePQHardfork != nil { + cfg.PQForkTime = o.OverridePQHardfork + } return cfg.CheckConfigForkOrder() } @@ -736,6 +740,7 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis { common.BytesToAddress([]byte{0x10}): {Balance: big.NewInt(1)}, // BLSG1MapG1 common.BytesToAddress([]byte{0x11}): {Balance: big.NewInt(1)}, // BLSG2MapG2 common.BytesToAddress([]byte{0x1, 00}): {Balance: big.NewInt(1)}, // P256Verify + common.BytesToAddress([]byte{0x70}): {Nonce: 1}, // PQKeyRegistry // Pre-deploy system contracts params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0}, params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0}, diff --git a/core/genesis_test.go b/core/genesis_test.go index 52e76ad3f8..8a4571338a 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "math/big" "reflect" + "strings" "testing" "github.com/davecgh/go-spew/spew" @@ -290,6 +291,178 @@ func TestConfigOrDefault(t *testing.T) { } } +const pqForkTimeGenesisJSON = `{ + "config": { + "chainId": 714, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "mirrorSyncBlock": 1, + "brunoBlock": 1, + "eulerBlock": 2, + "nanoBlock": 3, + "moranBlock": 3, + "gibbsBlock": 4, + "planckBlock": 5, + "lubanBlock": 6, + "platoBlock": 7, + "berlinBlock": 8, + "londonBlock": 8, + "hertzBlock": 8, + "hertzfixBlock": 8, + "pqForkTime": 0, + "shanghaiTime": 0, + "keplerTime": 0, + "feynmanTime": 0, + "feynmanFixTime": 0, + "cancunTime": 0, + "haberTime": 0, + "haberFixTime": 0, + "bohrTime": 0, + "pascalTime": 0, + "pragueTime": 0, + "lorentzTime": 0, + "maxwellTime": 0, + "fermiTime": 0, + "osakaTime": 0, + "mendelTime": 0, + "pasteurTime": 0, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "osaka": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + } + }, + "parlia": { + "period": 3, + "epoch": 200 + } + }, + "nonce": "0x0", + "timestamp": "0x5e9da7ce", + "extraData": "0x00", + "gasLimit": "0x2625a00", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": {} +}` + +func decodePQForkTimeGenesis(t *testing.T) *Genesis { + t.Helper() + + var genesis Genesis + if err := json.Unmarshal([]byte(pqForkTimeGenesisJSON), &genesis); err != nil { + t.Fatalf("unmarshal genesis: %v", err) + } + if genesis.Config == nil { + t.Fatal("decoded genesis config is nil") + } + if genesis.Config.PQForkTime == nil || *genesis.Config.PQForkTime != 0 { + t.Fatalf("decoded PQForkTime lost: have %v want 0", genesis.Config.PQForkTime) + } + return &genesis +} + +func TestLoadChainConfigWithRialtoHashPreservesPQForkTime(t *testing.T) { + genesis := decodePQForkTimeGenesis(t) + + db := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(db, triedb.HashDefaults) + block, err := genesis.Commit(db, tdb) + if err != nil { + t.Fatalf("commit genesis: %v", err) + } + oldRialtoHash := params.RialtoGenesisHash + params.RialtoGenesisHash = block.Hash() + defer func() { + params.RialtoGenesisHash = oldRialtoHash + }() + + got, hash, err := LoadChainConfig(db, nil) + if err != nil { + t.Fatalf("LoadChainConfig: %v", err) + } + if hash != block.Hash() { + t.Fatalf("unexpected genesis hash: have %s want %s", hash, block.Hash()) + } + if got.PQForkTime == nil || *got.PQForkTime != 0 { + t.Fatalf("loaded PQForkTime lost: have %v want 0", got.PQForkTime) + } +} + +func TestCustomGenesisPQForkTimeRoundTrip(t *testing.T) { + genesis := decodePQForkTimeGenesis(t) + + db := rawdb.NewMemoryDatabase() + tdb := triedb.NewDatabase(db, triedb.HashDefaults) + block, err := genesis.Commit(db, tdb) + if err != nil { + t.Fatalf("commit genesis: %v", err) + } + stored := rawdb.ReadChainConfig(db, block.Hash()) + if stored == nil { + t.Fatal("stored config is nil after commit") + } + if stored.PQForkTime == nil || *stored.PQForkTime != 0 { + t.Fatalf("stored PQForkTime lost after commit: have %v want 0", stored.PQForkTime) + } + oldRialtoHash := params.RialtoGenesisHash + params.RialtoGenesisHash = block.Hash() + defer func() { + params.RialtoGenesisHash = oldRialtoHash + }() + + passedForkTime := uint64(1) + lastHardforkTime := uint64(2) + updatedCfg, _, _, err := SetupGenesisBlockWithOverride(db, tdb, nil, &ChainOverrides{ + OverridePassedForkTime: &passedForkTime, + OverrideLorentz: &passedForkTime, + OverrideMaxwell: &passedForkTime, + OverrideFermi: &lastHardforkTime, + OverrideOsaka: &lastHardforkTime, + OverrideMendel: &lastHardforkTime, + OverridePasteur: &lastHardforkTime, + OverridePQHardfork: &lastHardforkTime, + }) + if err != nil { + t.Fatalf("setup genesis with override: %v", err) + } + if updatedCfg == nil { + t.Fatal("updated config is nil") + } + if updatedCfg.PQForkTime == nil || *updatedCfg.PQForkTime != lastHardforkTime { + t.Fatalf("updated PQForkTime lost after override path: have %v want %d", updatedCfg.PQForkTime, lastHardforkTime) + } + if !strings.Contains(updatedCfg.String(), "PQForkTime: 2") { + t.Fatalf("chain config string missing PQForkTime: %s", updatedCfg.String()) + } + stored = rawdb.ReadChainConfig(db, block.Hash()) + if stored == nil { + t.Fatal("stored config is nil after override path") + } + if stored.PQForkTime == nil || *stored.PQForkTime != lastHardforkTime { + t.Fatalf("stored PQForkTime lost after override path: have %v want %d", stored.PQForkTime, lastHardforkTime) + } +} + func newDbConfig(scheme string) *triedb.Config { if scheme == rawdb.HashScheme { return triedb.HashDefaults diff --git a/core/pq_e2e_test.go b/core/pq_e2e_test.go new file mode 100644 index 0000000000..ac3e6a16f5 --- /dev/null +++ b/core/pq_e2e_test.go @@ -0,0 +1,272 @@ +package core + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +// pqChainConfig returns a chain config with PQ active from timestamp 0. +func pqChainConfig() *params.ChainConfig { + cfg := *params.AllEthashProtocolChanges // shallow copy + cfg.PQForkTime = new(uint64) + return &cfg +} + +// TestPQTransactionE2E tests the full lifecycle of a PQ (ML-DSA-44) transaction: +// +// 1. Generate a PQ keypair; derive the sender address. +// 2. Pre-fund sender in the genesis block. +// 3. Build a one-block chain; include a PQ value-transfer tx. +// 4. Insert the block into a BlockChain. +// 5. Assert recipient received the transferred value. +// 6. Assert sender nonce incremented to 1. +func TestPQTransactionE2E(t *testing.T) { + // --- 1. Keys & addresses --- + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("mldsa.GenerateKey: %v", err) + } + sender := crypto.PQPubkeyToAddress(pubKey) + recipient := common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + + chainID := big.NewInt(1337) // matches AllEthashProtocolChanges + signer := types.NewPQSigner(chainID) + restore := types.SetPQRegistryBackend(func(addr common.Address) []byte { + if addr == sender { + return pubKey + } + return nil + }) + defer restore() + + // --- 2. Genesis with funded sender --- + fundAmount := new(big.Int).Mul(big.NewInt(1e6), big.NewInt(params.GWei)) // 1 mGwei + config := pqChainConfig() + genesis := &Genesis{ + Config: config, + BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: GenesisAlloc{ + sender: {Balance: new(big.Int).Mul(fundAmount, big.NewInt(100))}, + }, + } + + // --- 3. Generate one block containing the PQ tx --- + transferValue := big.NewInt(1000) + gasPrice := big.NewInt(params.InitialBaseFee) // baseFee == gasPrice for simplicity + + genDB, blocks, _ := GenerateChainWithGenesis(genesis, ethash.NewFaker(), 1, + func(_ int, b *BlockGen) { + tx := types.NewTx(&types.PQTxData{ + ChainID: new(big.Int).Set(chainID), + Nonce: 0, + GasPrice: gasPrice, + Gas: params.TxGas, + From: sender, + To: &recipient, + Value: transferValue, + }) + signed, err := types.SignPQTx(tx, signer, privKey) + if err != nil { + t.Fatalf("SignPQTx: %v", err) + } + b.AddTx(signed) + }, + ) + _ = genDB + + // --- 4. Insert chain --- + db := rawdb.NewMemoryDatabase() + blockchain, err := NewBlockChain(db, genesis, ethash.NewFaker(), + DefaultConfig().WithStateScheme(rawdb.HashScheme)) + if err != nil { + t.Fatalf("NewBlockChain: %v", err) + } + defer blockchain.Stop() + + if _, err := blockchain.InsertChain(blocks); err != nil { + t.Fatalf("InsertChain: %v", err) + } + + // --- 5. Verify state --- + state, err := blockchain.State() + if err != nil { + t.Fatalf("blockchain.State: %v", err) + } + + recipientBal := state.GetBalance(recipient) + wantBal, _ := uint256.FromBig(transferValue) + if recipientBal.Cmp(wantBal) != 0 { + t.Errorf("recipient balance: got %s want %s", recipientBal, wantBal) + } + + senderNonce := state.GetNonce(sender) + if senderNonce != 1 { + t.Errorf("sender nonce: got %d want 1", senderNonce) + } + + t.Logf("sender: %s", sender.Hex()) + t.Logf("recipient: %s balance=%s", recipient.Hex(), recipientBal) + t.Logf("nonce: %d", senderNonce) +} + +func TestPQRegistryPrecompileE2E(t *testing.T) { + pubKey, _, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("mldsa.GenerateKey: %v", err) + } + caller := crypto.PQPubkeyToAddress(pubKey) + precompileAddr := common.BytesToAddress([]byte{0x70}) + + config := pqChainConfig() + genesis := &Genesis{Config: config, BaseFee: big.NewInt(params.InitialBaseFee)} + db := rawdb.NewMemoryDatabase() + blockchain, err := NewBlockChain(db, genesis, ethash.NewFaker(), + DefaultConfig().WithStateScheme(rawdb.HashScheme)) + if err != nil { + t.Fatalf("NewBlockChain: %v", err) + } + defer blockchain.Stop() + + state, _ := blockchain.State() + blockCtx := vm.BlockContext{ + BlockNumber: big.NewInt(0), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 1e8, + BaseFee: big.NewInt(params.InitialBaseFee), + CanTransfer: CanTransfer, + Transfer: Transfer, + } + evm := vm.NewEVM(blockCtx, state, config, vm.Config{}) + + precompiles := vm.ActivePrecompiledContracts(config.Rules(big.NewInt(0), false, 0)) + precompiles[precompileAddr] = vm.NewPQKeyRegistryPrecompile() + evm.SetPrecompiles(precompiles) + + zero := uint256.NewInt(0) + + ret, _, err := evm.Call(caller, precompileAddr, pubKey, 1e7, zero) + if err != nil { + t.Fatalf("EVM.Call (register): %v", err) + } + if len(ret) != 1 || ret[0] != 1 { + t.Fatalf("unexpected register result: %x", ret) + } + + if _, _, err := evm.Call(caller, precompileAddr, pubKey, 1e7, zero); err == nil { + t.Fatal("expected second register to fail") + } + + lookupRet, _, err := evm.Call(caller, precompileAddr, caller.Bytes(), 1e7, zero) + if err != nil { + t.Fatalf("EVM.Call (lookup): %v", err) + } + if len(lookupRet) != len(pubKey) { + t.Fatalf("unexpected lookup size: have %d want %d", len(lookupRet), len(pubKey)) + } + if string(lookupRet) != string(pubKey) { + t.Fatal("lookup returned unexpected pubkey") + } +} + +// TestPQRecoverPrecompileE2E tests the pqRecover precompile (0x68) via a direct +// EVM call, verifying it returns the correct sender address for a valid ML-DSA-44 +// signature and 32 zero bytes for an invalid one. +func TestPQRecoverPrecompileE2E(t *testing.T) { + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("mldsa.GenerateKey: %v", err) + } + + msg := crypto.Keccak256([]byte("pq-precompile-test")) + sig, err := crypto.SignPQ(msg, privKey) + if err != nil { + t.Fatalf("SignPQ: %v", err) + } + + // Build the 3764-byte precompile input: hash(32) || sig(2420) || pubKey(1312) + input := make([]byte, 0, 32+len(sig)+len(pubKey)) + input = append(input, msg...) + input = append(input, sig...) + input = append(input, pubKey...) + + precompileAddr := common.BytesToAddress([]byte{0x68}) + + config := pqChainConfig() + genesis := &Genesis{Config: config, BaseFee: big.NewInt(params.InitialBaseFee)} + db := rawdb.NewMemoryDatabase() + blockchain, err := NewBlockChain(db, genesis, ethash.NewFaker(), + DefaultConfig().WithStateScheme(rawdb.HashScheme)) + if err != nil { + t.Fatalf("NewBlockChain: %v", err) + } + defer blockchain.Stop() + + state, _ := blockchain.State() + blockCtx := vm.BlockContext{ + BlockNumber: big.NewInt(0), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 1e8, + BaseFee: big.NewInt(params.InitialBaseFee), + CanTransfer: CanTransfer, + Transfer: Transfer, + } + evm := vm.NewEVM(blockCtx, state, config, vm.Config{}) + + // Explicitly inject the pqRecover precompile (0x68) because AllEthashProtocolChanges + // does not enable BSC-specific Hertz rules where 0x68 is normally activated. + precompiles := vm.ActivePrecompiledContracts(config.Rules(big.NewInt(0), false, 0)) + precompiles[precompileAddr] = vm.NewPQRecoverPrecompile() + evm.SetPrecompiles(precompiles) + + zero := uint256.NewInt(0) + caller := common.Address{} + + // --- valid signature: expect 32-byte left-padded address --- + ret, _, err := evm.Call(caller, precompileAddr, input, 1e7, zero) + if err != nil { + t.Fatalf("EVM.Call (valid): %v", err) + } + if len(ret) != 32 { + t.Fatalf("expected 32-byte return, got %d", len(ret)) + } + // first 12 bytes must be zero (left-pad), last 20 bytes are the address + for i := 0; i < 12; i++ { + if ret[i] != 0 { + t.Errorf("byte %d of return should be 0, got %x", i, ret[i]) + } + } + wantAddr := crypto.PQPubkeyToAddress(pubKey) + gotAddr := common.BytesToAddress(ret[12:]) + if gotAddr != wantAddr { + t.Errorf("recovered address: got %s want %s", gotAddr.Hex(), wantAddr.Hex()) + } + t.Logf("pqRecover precompile returned address: %s", gotAddr.Hex()) + + // --- tampered signature: expect 32 zero bytes --- + tampered := make([]byte, len(input)) + copy(tampered, input) + tampered[32] ^= 0xff // flip first byte of sig + + ret2, _, err2 := evm.Call(caller, precompileAddr, tampered, 1e7, zero) + if err2 != nil { + t.Fatalf("EVM.Call (tampered): %v", err2) + } + for i, b := range ret2 { + if b != 0 { + t.Errorf("tampered: byte %d should be 0, got %x", i, b) + } + } + t.Log("pqRecover precompile correctly returned zero address for tampered signature") +} diff --git a/core/state_processor.go b/core/state_processor.go index 4299f37666..8260b66713 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -85,6 +85,20 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if lastBlock == nil { return nil, errors.New("could not get parent block") } + + // For PQ-fork blocks, pre-warm the registry cache for every PQ tx sender. + // PQFrom extracts the embedded From field without signature verification, + // which is safe here because the block seal has already been checked. + // This must be done on the Process statedb (single-threaded) so there is + // no data race with the concurrently-running prefetcher goroutine, which + // uses its own throwaway statedb and falls back to a cache-only lookup. + if config.IsPQFork(blockNumber, header.Time) { + for _, tx := range block.Transactions() { + if from, ok := types.PQFrom(tx); ok { + vm.PQRegistryLookupWithState(from, statedb) + } + } + } // Handle upgrade built-in system contract code systemcontracts.TryUpdateBuildInSystemContract(p.chain.Config(), blockNumber, lastBlock.Time, block.Time(), statedb, true) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 50f9cf4eb4..56c177e168 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -309,7 +310,7 @@ func New(config Config, chain BlockChain) *LegacyPool { // pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction. func (pool *LegacyPool) Filter(tx *types.Transaction) bool { switch tx.Type() { - case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.SetCodeTxType: + case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType, types.SetCodeTxType, types.PQTxType: return true default: return false @@ -603,6 +604,23 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address] // This check is meant as an early check which only needs to be performed once, // and does not require the pool mutex to be held. func (pool *LegacyPool) ValidateTxBasics(tx *types.Transaction) error { + // For PQ transactions the sender's pubkey must be in the process-level cache + // before PQSigner.Sender can verify the signature. On a fresh start the cache + // is only pre-warmed for validators; a regular sender whose key is stored in + // the 0x70 registry on-chain won't be in the cache yet. Fill it on-demand. + // Each call creates its own StateDB snapshot via StateAt so that concurrent + // invocations of ValidateTxBasics never share a StateDB — pool.currentState + // is not goroutine-safe for concurrent reads. The write target is + // pqRegistryCache (sync.Map), which is safe for concurrent stores. + if tx.Type() == types.PQTxType { + if from, ok := types.PQFrom(tx); ok && len(vm.PQRegistryLookup(from)) == 0 { + if head := pool.currentHead.Load(); head != nil { + if statedb, err := pool.chain.StateAt(head.Root); err == nil { + vm.PQRegistryLookupWithState(from, statedb) + } + } + } + } sender, err := types.Sender(pool.signer, tx) if err != nil { return err @@ -620,7 +638,8 @@ func (pool *LegacyPool) ValidateTxBasics(tx *types.Transaction) error { 1< params.MaxInitCodeSize { return fmt.Errorf("%w: code size %v, limit %v", core.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) diff --git a/core/types/pq_transaction.go b/core/types/pq_transaction.go new file mode 100644 index 0000000000..5754f8f3f7 --- /dev/null +++ b/core/types/pq_transaction.go @@ -0,0 +1,114 @@ +package types + +import ( + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +const PQTxType = 0x05 + +// PQTxData represents an ML-DSA-44 signed typed transaction. +type PQTxData struct { + ChainID *big.Int + Nonce uint64 + GasPrice *big.Int + Gas uint64 + To *common.Address `rlp:"nil"` + Value *big.Int + Data []byte + From common.Address + PQSignature []byte +} + +// PQFrom returns the embedded sender address and true if tx is a PQ +// transaction. Unlike Sender(), this does NOT verify the signature. +// Use only when the signature will be verified separately (e.g. during +// block state processing after the block has already been seal-verified). +func PQFrom(tx *Transaction) (common.Address, bool) { + pqtx, ok := tx.inner.(*PQTxData) + if !ok { + return common.Address{}, false + } + return pqtx.From, true +} + +func (tx *PQTxData) copy() TxData { + cpy := &PQTxData{ + Nonce: tx.Nonce, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + From: tx.From, + PQSignature: common.CopyBytes(tx.PQSignature), + ChainID: new(big.Int), + GasPrice: new(big.Int), + Value: new(big.Int), + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasPrice != nil { + cpy.GasPrice.Set(tx.GasPrice) + } + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + return cpy +} + +func (tx *PQTxData) txType() byte { return PQTxType } +func (tx *PQTxData) chainID() *big.Int { return tx.ChainID } +func (tx *PQTxData) accessList() AccessList { return nil } +func (tx *PQTxData) data() []byte { return tx.Data } +func (tx *PQTxData) gas() uint64 { return tx.Gas } +func (tx *PQTxData) gasPrice() *big.Int { return tx.GasPrice } +func (tx *PQTxData) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *PQTxData) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *PQTxData) value() *big.Int { return tx.Value } +func (tx *PQTxData) nonce() uint64 { return tx.Nonce } +func (tx *PQTxData) to() *common.Address { return tx.To } + +func (tx *PQTxData) rawSignatureValues() (v, r, s *big.Int) { + return new(big.Int), new(big.Int), new(big.Int) +} + +func (tx *PQTxData) setSignatureValues(chainID, v, r, s *big.Int) { + if chainID == nil { + tx.ChainID = nil + return + } + if tx.ChainID == nil { + tx.ChainID = new(big.Int) + } + tx.ChainID.Set(chainID) +} + +func (tx *PQTxData) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + return dst.Set(tx.GasPrice) +} + +func (tx *PQTxData) encode(b *bytes.Buffer) error { + return rlp.Encode(b, tx) +} + +func (tx *PQTxData) decode(input []byte) error { + return rlp.DecodeBytes(input, tx) +} + +func (tx *PQTxData) sigHash(chainID *big.Int) common.Hash { + if chainID == nil { + chainID = tx.ChainID + } + return rlpHash([]any{ + chainID, + tx.Nonce, + tx.GasPrice, + tx.Gas, + tx.To, + tx.Value, + tx.Data, + }) +} diff --git a/core/types/pq_transaction_test.go b/core/types/pq_transaction_test.go new file mode 100644 index 0000000000..89be3f51ce --- /dev/null +++ b/core/types/pq_transaction_test.go @@ -0,0 +1,70 @@ +package types + +import ( + "math/big" + "testing" + + "github.com/cloudflare/circl/sign/mldsa/mldsa44" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" +) + +func TestPQTxSignAndSender(t *testing.T) { + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + chainID := big.NewInt(97) + signer := NewPQSigner(chainID) + to := common.HexToAddress("0x0102030405060708090a0b0c0d0e0f1011121314") + from := crypto.PQPubkeyToAddress(pubKey) + restore := SetPQRegistryBackend(func(addr common.Address) []byte { + if addr == from { + return pubKey + } + return nil + }) + defer restore() + + tx := NewTx(&PQTxData{ + ChainID: new(big.Int).Set(chainID), + Nonce: 7, + GasPrice: big.NewInt(5), + Gas: 21000, + From: from, + To: &to, + Value: big.NewInt(11), + Data: []byte("pq-transaction"), + }) + + signed, err := SignPQTx(tx, signer, privKey) + if err != nil { + t.Fatalf("SignPQTx error: %v", err) + } + + pqtx, ok := signed.inner.(*PQTxData) + if !ok { + t.Fatal("signed transaction does not contain PQTxData") + } + if pqtx.From != from { + t.Fatalf("unexpected sender field: have %s want %s", pqtx.From.Hex(), from.Hex()) + } + if len(pqtx.PQSignature) != mldsa44.SignatureSize { + t.Fatalf("unexpected signature size: have %d want %d", len(pqtx.PQSignature), mldsa44.SignatureSize) + } + + hash := signer.Hash(signed) + if !crypto.VerifyPQ(pubKey, hash[:], pqtx.PQSignature) { + t.Fatal("stored PQ signature does not verify") + } + + gotFrom, err := Sender(signer, signed) + if err != nil { + t.Fatalf("Sender error: %v", err) + } + if gotFrom != pqtx.From { + t.Fatalf("unexpected sender: have %s want %s", gotFrom.Hex(), pqtx.From.Hex()) + } +} diff --git a/core/types/transaction.go b/core/types/transaction.go index 13dccc843e..a5807f41bf 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -208,6 +208,8 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { inner = new(AccessListTx) case DynamicFeeTxType: inner = new(DynamicFeeTx) + case PQTxType: + inner = new(PQTxData) case BlobTxType: inner = new(BlobTx) case SetCodeTxType: diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index ef8fb194d5..415d6e826c 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -56,6 +56,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint default: signer = FrontierSigner{} } + if config.IsPQFork(blockNumber, blockTime) { + signer = NewPQDispatchSigner(signer, config.ChainID) + } return signer } @@ -86,6 +89,9 @@ func LatestSigner(config *params.ChainConfig) Signer { } else { signer = HomesteadSigner{} } + if config.PQForkTime != nil { + signer = NewPQDispatchSigner(signer, config.ChainID) + } return signer } diff --git a/core/types/transaction_signing_pq.go b/core/types/transaction_signing_pq.go new file mode 100644 index 0000000000..b13c18f930 --- /dev/null +++ b/core/types/transaction_signing_pq.go @@ -0,0 +1,200 @@ +package types + +import ( + "errors" + "fmt" + "math/big" + + "github.com/cloudflare/circl/sign/mldsa/mldsa44" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var pqRegistryBackend func(addr common.Address) []byte + +func SetPQRegistryBackend(backend func(addr common.Address) []byte) func() { + prev := pqRegistryBackend + pqRegistryBackend = backend + return func() { + pqRegistryBackend = prev + } +} + +// pqDispatchSigner wraps any existing Signer and additionally handles PQTxType +// transactions by delegating to PQSigner. Used by MakeSigner when the PQ fork +// is active so that the rest of the codebase (state processor, tx pool) does not +// need to be aware of the PQ signer directly. +type pqDispatchSigner struct { + Signer + pq PQSigner +} + +// NewPQDispatchSigner wraps base with PQ dispatch for the given chainID. +func NewPQDispatchSigner(base Signer, chainID *big.Int) Signer { + return &pqDispatchSigner{Signer: base, pq: NewPQSigner(chainID)} +} + +func (s *pqDispatchSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() == PQTxType { + return s.pq.Sender(tx) + } + return s.Signer.Sender(tx) +} + +func (s *pqDispatchSigner) Hash(tx *Transaction) common.Hash { + if tx.Type() == PQTxType { + return s.pq.Hash(tx) + } + return s.Signer.Hash(tx) +} + +func (s *pqDispatchSigner) SignatureValues(tx *Transaction, sig []byte) (r, ss, v *big.Int, err error) { + if tx.Type() == PQTxType { + return s.pq.SignatureValues(tx, sig) + } + return s.Signer.SignatureValues(tx, sig) +} + +func (s *pqDispatchSigner) Equal(s2 Signer) bool { + other, ok := s2.(*pqDispatchSigner) + if !ok { + return false + } + return s.Signer.Equal(other.Signer) && s.pq.Equal(other.pq) +} + +func (s *pqDispatchSigner) ChainID() *big.Int { return s.pq.chainID } + +type PQSigner struct { + chainID *big.Int +} + +func NewPQSigner(chainID *big.Int) PQSigner { + if chainID == nil { + chainID = new(big.Int) + } + return PQSigner{chainID: new(big.Int).Set(chainID)} +} + +func (s PQSigner) ChainID() *big.Int { + return s.chainID +} + +func (s PQSigner) Equal(s2 Signer) bool { + switch other := s2.(type) { + case PQSigner: + return s.chainID.Cmp(other.chainID) == 0 + case *PQSigner: + return other != nil && s.chainID.Cmp(other.chainID) == 0 + default: + return false + } +} + +func (s PQSigner) Hash(tx *Transaction) common.Hash { + pqtx, ok := tx.inner.(*PQTxData) + if !ok { + return common.Hash{} + } + return pqtx.sigHash(s.chainID) +} + +func (s PQSigner) Sender(tx *Transaction) (common.Address, error) { + pqtx, ok := tx.inner.(*PQTxData) + if !ok { + return common.Address{}, ErrTxTypeNotSupported + } + if pqtx.ChainID != nil && pqtx.ChainID.Sign() != 0 && pqtx.ChainID.Cmp(s.chainID) != 0 { + return common.Address{}, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, pqtx.ChainID, s.chainID) + } + + pubKey := pqKeyRegistryLookup(pqtx.From) + if len(pubKey) != mldsa44.PublicKeySize || isZeroPQPubKey(pubKey) { + return common.Address{}, errors.New("sender not registered") + } + + hash := s.Hash(tx) + if !crypto.VerifyPQ(pubKey, hash[:], pqtx.PQSignature) { + return common.Address{}, errors.New("invalid pq signature") + } + return pqtx.From, nil +} + +func (s PQSigner) SignatureValues(tx *Transaction, sig []byte) (r, ss, v *big.Int, err error) { + pqtx, ok := tx.inner.(*PQTxData) + if !ok { + return nil, nil, nil, ErrTxTypeNotSupported + } + if pqtx.ChainID != nil && pqtx.ChainID.Sign() != 0 && pqtx.ChainID.Cmp(s.chainID) != 0 { + return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, pqtx.ChainID, s.chainID) + } + return new(big.Int), new(big.Int), new(big.Int), nil +} + +func SignPQTx(tx *Transaction, s PQSigner, privKey []byte) (*Transaction, error) { + pqtx, ok := tx.inner.(*PQTxData) + if !ok { + return nil, ErrTxTypeNotSupported + } + if pqtx.ChainID != nil && pqtx.ChainID.Sign() != 0 && pqtx.ChainID.Cmp(s.chainID) != 0 { + return nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, pqtx.ChainID, s.chainID) + } + + hash := s.Hash(tx) + sig, err := crypto.SignPQ(hash[:], privKey) + if err != nil { + return nil, err + } + if len(sig) != mldsa44.SignatureSize { + return nil, fmt.Errorf("invalid pq signature size: have %d want %d", len(sig), mldsa44.SignatureSize) + } + + pubKey, err := pqPublicKeyFromPrivate(privKey) + if err != nil { + return nil, err + } + if len(pubKey) != mldsa44.PublicKeySize { + return nil, fmt.Errorf("invalid pq public key size: have %d want %d", len(pubKey), mldsa44.PublicKeySize) + } + + cpy, ok := tx.inner.copy().(*PQTxData) + if !ok { + return nil, ErrTxTypeNotSupported + } + cpy.From = crypto.PQPubkeyToAddress(pubKey) + cpy.PQSignature = sig + if cpy.ChainID == nil { + cpy.ChainID = new(big.Int) + } + cpy.ChainID.Set(s.chainID) + + return &Transaction{inner: cpy, time: tx.time}, nil +} + +func pqPublicKeyFromPrivate(privKey []byte) ([]byte, error) { + var key mldsa44.PrivateKey + if err := key.UnmarshalBinary(privKey); err != nil { + return nil, err + } + pubKey, ok := key.Public().(*mldsa44.PublicKey) + if !ok { + return nil, errors.New("invalid pq public key type") + } + return pubKey.Bytes(), nil +} + +func pqKeyRegistryLookup(addr common.Address) []byte { + if pqRegistryBackend == nil { + return nil + } + return common.CopyBytes(pqRegistryBackend(addr)) +} + +func isZeroPQPubKey(pubKey []byte) bool { + for _, b := range pubKey { + if b != 0 { + return false + } + } + return true +} diff --git a/core/types/vote.go b/core/types/vote.go index e5c7fd8525..3084fd387d 100644 --- a/core/types/vote.go +++ b/core/types/vote.go @@ -9,6 +9,7 @@ import ( "github.com/prysmaticlabs/prysm/v5/crypto/bls" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" ) const ( @@ -18,8 +19,15 @@ const ( MaxAttestationExtraLength = 256 ) +const ( + PQPublicKeyLength = 1312 + PQSignatureLength = 2420 +) + type BLSPublicKey [BLSPublicKeyLength]byte type BLSSignature [BLSSignatureLength]byte +type PQPublicKey [PQPublicKeyLength]byte +type PQSignature [PQSignatureLength]byte type ValidatorsBitSet uint64 // VoteData represents the vote range that validator voted for fast finality. @@ -43,6 +51,16 @@ type VoteEnvelope struct { hash atomic.Value } +// PQVoteEnvelope represents the vote of a single validator using post-quantum signatures. +type PQVoteEnvelope struct { + VoteAddress PQPublicKey // The ML-DSA-44 public key of the validator. + Signature PQSignature // Validator's ML-DSA-44 signature for the vote data. + Data *VoteData // The vote data for fast finality. + + // caches + hash atomic.Value +} + // VoteAttestation represents the votes of super majority validators. type VoteAttestation struct { VoteAddressSet ValidatorsBitSet // The bitset marks the voted validators. @@ -51,6 +69,14 @@ type VoteAttestation struct { Extra []byte // Reserved for future usage. } +// PQVoteAttestation represents the votes of super majority validators using STARK aggregation. +type PQVoteAttestation struct { + VoteAddressSet ValidatorsBitSet // The bitset marks the voted validators. + AggProof []byte // The STARK aggregate proof replacing BLS aggregate signature. + Data *VoteData // The vote data for fast finality. + Extra []byte // Reserved for future usage. +} + // Hash returns the vote's hash. func (v *VoteEnvelope) Hash() common.Hash { if hash := v.hash.Load(); hash != nil { @@ -72,6 +98,26 @@ func (v *VoteEnvelope) calcVoteHash() common.Hash { } func (b BLSPublicKey) Bytes() []byte { return b[:] } +func (b PQPublicKey) Bytes() []byte { return b[:] } + +// Hash returns the PQ vote's hash. +func (v *PQVoteEnvelope) Hash() common.Hash { + if hash := v.hash.Load(); hash != nil { + return hash.(common.Hash) + } + h := v.calcVoteHash() + v.hash.Store(h) + return h +} + +func (v *PQVoteEnvelope) calcVoteHash() common.Hash { + vote := struct { + VoteAddress PQPublicKey + Signature PQSignature + Data *VoteData + }{v.VoteAddress, v.Signature, v.Data} + return rlpHash(vote) +} // Verify vote using BLS. func (v *VoteEnvelope) Verify() error { @@ -92,6 +138,15 @@ func (v *VoteEnvelope) Verify() error { return nil } +// Verify verifies the PQ vote using ML-DSA-44. +func (v *PQVoteEnvelope) Verify() error { + voteDataHash := v.Data.Hash() + if !mldsa.Verify(v.VoteAddress[:], voteDataHash[:], v.Signature[:]) { + return errors.New("verify ML-DSA-44 signature failed") + } + return nil +} + type SlashIndicatorVoteDataWrapper struct { SrcNum *big.Int SrcHash string diff --git a/core/vm/contracts.go b/core/vm/contracts.go index f8b2d0856d..994ae8deb9 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -26,7 +26,9 @@ import ( "math" "math/big" "math/bits" + "sync" + "github.com/cloudflare/circl/sign/mldsa/mldsa44" "github.com/consensys/gnark-crypto/ecc" bls12381 "github.com/consensys/gnark-crypto/ecc/bls12-381" "github.com/consensys/gnark-crypto/ecc/bls12-381/fp" @@ -40,6 +42,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bn256" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/crypto/pq/proofs" "github.com/ethereum/go-ethereum/crypto/secp256k1" "github.com/ethereum/go-ethereum/crypto/secp256r1" "github.com/ethereum/go-ethereum/log" @@ -59,6 +62,13 @@ type PrecompiledContract interface { Name() string } +// StatefulPrecompiledContract is implemented by precompiles that need caller +// context or direct access to StateDB. +type StatefulPrecompiledContract interface { + PrecompiledContract + RunStateful(input []byte, caller common.Address, stateDB StateDB, readOnly bool) ([]byte, error) +} + // PrecompiledContracts contains the precompiled contracts supported at the given fork. type PrecompiledContracts map[common.Address]PrecompiledContract @@ -163,6 +173,7 @@ var PrecompiledContractsLuban = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlanck{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidate{}, + common.BytesToAddress([]byte{0x68}): &pqRecover{}, } // PrecompiledContractsPlato contains the default set of pre-compiled Ethereum @@ -182,6 +193,7 @@ var PrecompiledContractsPlato = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidate{}, + common.BytesToAddress([]byte{0x68}): &pqRecover{}, } // PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum @@ -215,6 +227,9 @@ var PrecompiledContractsHertz = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, + common.BytesToAddress([]byte{0x68}): &pqRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, } // PrecompiledContractsFeynman contains the default set of pre-compiled Ethereum @@ -234,8 +249,10 @@ var PrecompiledContractsFeynman = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, - common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x68}): &pqRecoverCompat{}, common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, } // PrecompiledContractsCancun contains the default set of pre-compiled Ethereum @@ -256,8 +273,10 @@ var PrecompiledContractsCancun = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, - common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x68}): &pqRecoverCompat{}, common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, } // PrecompiledContractsHaber contains the default set of pre-compiled Ethereum @@ -278,8 +297,10 @@ var PrecompiledContractsHaber = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, - common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x68}): &pqRecoverCompat{}, common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, common.BytesToAddress([]byte{0x1, 0x00}): &p256Verify{}, } @@ -309,15 +330,22 @@ var PrecompiledContractsPrague = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, - common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x68}): &pqRecoverCompat{}, common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, common.BytesToAddress([]byte{0x1, 0x00}): &p256Verify{}, } var PrecompiledContractsBLS = PrecompiledContractsPrague -var PrecompiledContractsVerkle = PrecompiledContractsBerlin +var PrecompiledContractsVerkle = func() PrecompiledContracts { + contracts := maps.Clone(PrecompiledContractsBerlin) + contracts[common.BytesToAddress([]byte{0x6a})] = &pqAttestationVerify{} + contracts[common.BytesToAddress([]byte{0x70})] = &pqKeyRegistry{} + return contracts +}() // PrecompiledContractsOsaka contains the set of pre-compiled Ethereum // contracts used in the Osaka release. @@ -344,8 +372,10 @@ var PrecompiledContractsOsaka = PrecompiledContracts{ common.BytesToAddress([]byte{0x65}): &iavlMerkleProofValidatePlato{}, common.BytesToAddress([]byte{0x66}): &blsSignatureVerify{}, common.BytesToAddress([]byte{0x67}): &cometBFTLightBlockValidateHertz{}, - common.BytesToAddress([]byte{0x68}): &verifyDoubleSignEvidence{}, + common.BytesToAddress([]byte{0x68}): &pqRecoverCompat{}, common.BytesToAddress([]byte{0x69}): &secp256k1SignatureRecover{}, + common.BytesToAddress([]byte{0x6a}): &pqAttestationVerify{}, + common.BytesToAddress([]byte{0x70}): &pqKeyRegistry{}, common.BytesToAddress([]byte{0x1, 0x00}): &p256Verify{eip7951: true}, } @@ -358,6 +388,7 @@ var PrecompiledContractsP256Verify = PrecompiledContracts{ var ( PrecompiledAddressesOsaka []common.Address + PrecompiledAddressesVerkle []common.Address PrecompiledAddressesPrague []common.Address PrecompiledAddressesHaber []common.Address PrecompiledAddressesCancun []common.Address @@ -417,6 +448,9 @@ func init() { for k := range PrecompiledContractsPrague { PrecompiledAddressesPrague = append(PrecompiledAddressesPrague, k) } + for k := range PrecompiledContractsVerkle { + PrecompiledAddressesVerkle = append(PrecompiledAddressesVerkle, k) + } for k := range PrecompiledContractsOsaka { PrecompiledAddressesOsaka = append(PrecompiledAddressesOsaka, k) } @@ -467,6 +501,8 @@ func ActivePrecompiledContracts(rules params.Rules) PrecompiledContracts { // ActivePrecompiles returns the precompile addresses enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsVerkle: + return PrecompiledAddressesVerkle case rules.IsOsaka: return PrecompiledAddressesOsaka case rules.IsPrague: @@ -506,6 +542,10 @@ func ActivePrecompiles(rules params.Rules) []common.Address { // - the _remaining_ gas, // - any error that occurred func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, logger *tracing.Hooks) (ret []byte, remainingGas uint64, err error) { + return runPrecompiledContract(p, input, suppliedGas, logger, common.Address{}, nil, false) +} + +func runPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, logger *tracing.Hooks, caller common.Address, stateDB StateDB, readOnly bool) (ret []byte, remainingGas uint64, err error) { gasCost := p.RequiredGas(input) if suppliedGas < gasCost { return nil, 0, ErrOutOfGas @@ -514,6 +554,10 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uin logger.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract) } suppliedGas -= gasCost + if stateful, ok := p.(StatefulPrecompiledContract); ok { + output, err := stateful.RunStateful(input, caller, stateDB, readOnly) + return output, suppliedGas, err + } output, err := p.Run(input) return output, suppliedGas, err } @@ -1672,6 +1716,516 @@ func (c *blsSignatureVerify) Name() string { return "BLS_SIGNATURE_VERIFY" } +// pqAttestationVerify implements STARK-based PQ attestation verification precompile. +type pqAttestationVerify struct{} + +// RequiredGas returns the gas required to verify a PQ attestation. +// Base cost: 200,000 gas (STARK proof verification is flat-cost). +func (c *pqAttestationVerify) RequiredGas(input []byte) uint64 { + return 200000 +} + +// Run verifies a STARK aggregate proof for PQ attestations. +// Input format: [proof_len (4 bytes)] [proof_bytes] [vote_data_hash (32 bytes)] [num_pubkeys (4 bytes)] [{pubkey (1312 bytes)}...] +// Returns 1 if valid, 0 if invalid. +func (c *pqAttestationVerify) Run(input []byte) ([]byte, error) { + if len(input) < 8 { + return nil, ErrExecutionReverted + } + + inputLen := len(input) + + // Parse proof length and proof bytes. + proofLen := int(input[0])<<24 | int(input[1])<<16 | int(input[2])<<8 | int(input[3]) + offset := 4 + if proofLen <= 0 || offset+proofLen > inputLen { + return nil, ErrExecutionReverted + } + proofBytes := input[offset : offset+proofLen] + offset += proofLen + + // Parse vote data hash (32 bytes). + if offset+32 > inputLen { + return nil, ErrExecutionReverted + } + var voteDataHash common.Hash + copy(voteDataHash[:], input[offset:offset+32]) + offset += 32 + + // Parse pubkeys. + if offset+4 > inputLen { + return nil, ErrExecutionReverted + } + numPubkeys := int(input[offset])<<24 | int(input[offset+1])<<16 | int(input[offset+2])<<8 | int(input[offset+3]) + offset += 4 + + pubkeySize := 1312 // ML-DSA-44 public key size + if numPubkeys <= 0 || numPubkeys > 1000 || offset+numPubkeys*pubkeySize > inputLen { + return nil, ErrExecutionReverted + } + + pubkeys := make([][]byte, numPubkeys) + for i := 0; i < numPubkeys; i++ { + pubkeys[i] = input[offset : offset+pubkeySize] + offset += pubkeySize + } + + if len(proofBytes) < 104 { // minimum STARK aggregation size + return common.Big0.Bytes(), nil + } + + // Unmarshal the STARK aggregation from proof bytes. + agg, err := pqUnmarshalSTARKAggregation(proofBytes) + if err != nil { + return common.Big0.Bytes(), nil + } + + // Check vote data hash binding. + if agg.voteDataHash != voteDataHash { + return common.Big0.Bytes(), nil + } + + // Verify STARK proof (auth paths). + prover := proofs.NewSTARKProver() + valid, err := prover.VerifySTARKProof(agg.proof, nil) + if err != nil || !valid { + return common.Big0.Bytes(), nil + } + + // Verify committee root matches pubkeys. + expectedRoot := pqComputeCommitteeRoot(pubkeys) + if expectedRoot != agg.committeeRoot { + return common.Big0.Bytes(), nil + } + + return common.Big1.Bytes(), nil +} + +// pqUnmarshalResult holds the minimal parsed STARK aggregation for the precompile. +type pqUnmarshalResult struct { + committeeRoot common.Hash + voteDataHash common.Hash + proof *proofs.STARKProofData +} + +// pqUnmarshalSTARKAggregation is a minimal unmarshal for the precompile (avoids importing parlia). +func pqUnmarshalSTARKAggregation(data []byte) (*pqUnmarshalResult, error) { + if len(data) < 104 { + return nil, fmt.Errorf("proof too short") + } + offset := 0 + + var committeeRoot common.Hash + copy(committeeRoot[:], data[offset:offset+32]) + offset += 32 + + var voteDataHash common.Hash + copy(voteDataHash[:], data[offset:offset+32]) + offset += 32 + + numValidators := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numValidators <= 0 || numValidators > 1000 { + return nil, fmt.Errorf("invalid numValidators") + } + + if offset+32 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + var commitmentRoot [32]byte + copy(commitmentRoot[:], data[offset:offset+32]) + offset += 32 + + if offset+4 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + numFRI := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numFRI > 64 { + return nil, fmt.Errorf("invalid numFRI") + } + friLayers := make([][32]byte, numFRI) + for i := 0; i < numFRI; i++ { + if offset+32 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + copy(friLayers[i][:], data[offset:offset+32]) + offset += 32 + } + + if offset+4 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + numQ := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numQ > 1024 { + return nil, fmt.Errorf("invalid numQ") + } + queryResponses := make([]proofs.QueryResponse, numQ) + for i := 0; i < numQ; i++ { + if offset+36 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + idx := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + var val [32]byte + copy(val[:], data[offset:offset+32]) + offset += 32 + + if offset+4 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + numAuth := int(data[offset])<<24 | int(data[offset+1])<<16 | int(data[offset+2])<<8 | int(data[offset+3]) + offset += 4 + if numAuth > 64 { + return nil, fmt.Errorf("invalid numAuth") + } + authPath := make([][32]byte, numAuth) + for j := 0; j < numAuth; j++ { + if offset+32 > len(data) { + return nil, fmt.Errorf("proof truncated") + } + copy(authPath[j][:], data[offset:offset+32]) + offset += 32 + } + queryResponses[i] = proofs.QueryResponse{Index: idx, Value: val, AuthPath: authPath} + } + + return &pqUnmarshalResult{ + committeeRoot: committeeRoot, + voteDataHash: voteDataHash, + proof: &proofs.STARKProofData{ + CommitmentRoot: commitmentRoot, + FRILayers: friLayers, + QueryResponses: queryResponses, + TraceLength: numValidators, + NumColumns: 7, + }, + }, nil +} + +// pqComputeCommitteeRoot computes a SHA-256 Merkle root over validator public keys. +func pqComputeCommitteeRoot(pubkeys [][]byte) common.Hash { + if len(pubkeys) == 0 { + return common.Hash{} + } + leaves := make([][32]byte, len(pubkeys)) + for i, pk := range pubkeys { + h := sha256.New() + h.Write(pk) + copy(leaves[i][:], h.Sum(nil)) + } + target := 1 + for target < len(leaves) { + target <<= 1 + } + padded := make([][32]byte, target) + copy(padded, leaves) + layer := padded + for len(layer) > 1 { + next := make([][32]byte, len(layer)/2) + for i := range next { + h := sha256.New() + h.Write(layer[2*i][:]) + h.Write(layer[2*i+1][:]) + copy(next[i][:], h.Sum(nil)) + } + layer = next + } + var root common.Hash + copy(root[:], layer[0][:]) + return root +} + +func (c *pqAttestationVerify) Name() string { + return "PQ_ATTESTATION_VERIFY" +} + +const ( + pqRecoverGas = 30000 + pqRecoverHashLength = 32 + pqRecoverSigLength = mldsa44.SignatureSize + pqRecoverPubKeyLength = mldsa44.PublicKeySize + pqRecoverInputLength = pqRecoverHashLength + pqRecoverSigLength + pqRecoverPubKeyLength + pqPubKeySize = 1312 + pqSlotsPerKey = 41 + pqRegistryRegisterGas = 20000 * pqSlotsPerKey + pqRegistryLookupGas = 800 * pqSlotsPerKey +) + +var ( + pqRegistryAddress = common.BytesToAddress([]byte{0x70}) + // TODO: Remove the fallback map once all callers use StateDB-backed execution. + pqRegistryFallback sync.Map + // pqRegistryCache is a process-level, write-once cache of addr → pubkey. + // Because the registry is immutable after registration, entries never need + // invalidation. Memory: ~1312 bytes per entry (≈13 MB for 10k addresses). + pqRegistryCache sync.Map // common.Address → []byte (1312 bytes) +) + +// pqRecover implements ML-DSA signature verification and address recovery. +type pqRecover struct{} + +func (pqRecover) RequiredGas(input []byte) uint64 { + return pqRecoverGas +} + +func (pqRecover) Run(input []byte) ([]byte, error) { + if len(input) != pqRecoverInputLength { + return false32Byte, nil + } + + hash := input[:pqRecoverHashLength] + sigOffset := pqRecoverHashLength + pqRecoverSigLength + sig := input[pqRecoverHashLength:sigOffset] + pubKey := input[sigOffset:] + + if !crypto.VerifyPQ(pubKey, hash, sig) { + return false32Byte, nil + } + + addr := crypto.PQPubkeyToAddress(pubKey) + out := make([]byte, 32) + copy(out[12:], addr[:]) + return out, nil +} + +func (pqRecover) Name() string { + return "PQRECOVER" +} + +// NewPQRecoverPrecompile returns the pqRecover precompile instance for use in tests +// and explicit precompile injection (e.g. when the BSC-specific Hertz fork rules +// are not activated by the chain config in use). +func NewPQRecoverPrecompile() PrecompiledContract { return pqRecover{} } + +// pqKeyRegistry implements PQ public-key registration and lookup at 0x70. +type pqKeyRegistry struct{} + +func (pqKeyRegistry) RequiredGas(input []byte) uint64 { + switch len(input) { + case pqPubKeySize, common.AddressLength + pqPubKeySize: + return pqRegistryRegisterGas + case common.AddressLength: + return pqRegistryLookupGas + default: + return 0 + } +} + +func (c pqKeyRegistry) Run(input []byte) ([]byte, error) { + return c.RunStateful(input, common.Address{}, nil, false) +} + +func (pqKeyRegistry) RunStateful(input []byte, caller common.Address, stateDB StateDB, readOnly bool) ([]byte, error) { + // Ensure the registry account has nonce=1 so EIP-158 does not treat it as + // "empty" and wipe its storage during state finalization. + if stateDB != nil && !readOnly && stateDB.GetNonce(pqRegistryAddress) == 0 { + stateDB.SetNonce(pqRegistryAddress, 1, tracing.NonceChangeUnspecified) + } + + switch len(input) { + case pqPubKeySize: + // Self-register: caller registers its own PQ public key. + if readOnly { + return nil, ErrWriteProtection + } + if pqRegistryChunkAt(caller, 0, stateDB) != (common.Hash{}) { + return nil, errors.New("already registered") + } + pubKey := common.CopyBytes(input) + for i := 0; i < pqSlotsPerKey; i++ { + start := i * common.HashLength + end := start + common.HashLength + pqRegistryWriteChunk(caller, i, common.BytesToHash(pubKey[start:end]), stateDB) + } + // Populate cache on registration so subsequent lookups never hit stateDB. + pqRegistryCache.Store(caller, pubKey) + return []byte{1}, nil + + case common.AddressLength + pqPubKeySize: + // Delegate-register (devnet bootstrap): any caller can register a target + // address on its behalf. Useful when the PQ account has no gas to send a + // self-registration tx (e.g., first-time devnet setup). + // Format: target_addr (20 bytes) || pubkey (1312 bytes) + if readOnly { + return nil, ErrWriteProtection + } + target := common.BytesToAddress(input[:common.AddressLength]) + if pqRegistryChunkAt(target, 0, stateDB) != (common.Hash{}) { + return nil, errors.New("already registered") + } + pubKey := common.CopyBytes(input[common.AddressLength:]) + for i := 0; i < pqSlotsPerKey; i++ { + start := i * common.HashLength + end := start + common.HashLength + pqRegistryWriteChunk(target, i, common.BytesToHash(pubKey[start:end]), stateDB) + } + pqRegistryCache.Store(target, pubKey) + return []byte{1}, nil + case common.AddressLength: + addr := common.BytesToAddress(input) + // Fast path: return cached pubkey without touching stateDB. + if cached, ok := pqRegistryCache.Load(addr); ok { + return common.CopyBytes(cached.([]byte)), nil + } + // Slow path: assemble from 41 stateDB slots. + pubKey := make([]byte, pqPubKeySize) + allZero := true + for i := 0; i < pqSlotsPerKey; i++ { + chunk := pqRegistryChunkAt(addr, i, stateDB) + if chunk != (common.Hash{}) { + allZero = false + } + copy(pubKey[i*common.HashLength:], chunk[:]) + } + if allZero { + return make([]byte, pqPubKeySize), nil + } + // Warm the cache for future lookups. + pqRegistryCache.Store(addr, pubKey) + return pubKey, nil + default: + return nil, errors.New("invalid input length") + } +} + +func (pqKeyRegistry) Name() string { + return "PQ_KEY_REGISTRY" +} + +// NewPQKeyRegistryPrecompile returns the pqKeyRegistry precompile instance for tests. +func NewPQKeyRegistryPrecompile() PrecompiledContract { return pqKeyRegistry{} } + +// PQRegistryLookup returns the registered ML-DSA-44 public key for addr from +// the process-level cache. Returns nil if addr has not been registered. +// Safe for concurrent use (sync.Map read). +func PQRegistryLookup(addr common.Address) []byte { + if cached, ok := pqRegistryCache.Load(addr); ok { + return common.CopyBytes(cached.([]byte)) + } + return nil +} + +// PQRegistryLookupWithState returns the registered ML-DSA-44 public key for +// addr, first checking the process-level cache and falling back to reading the +// 0x70 registry storage slots from stateDB. Results are written into the cache +// so subsequent lookups are fast. Use this during block processing where an +// arbitrary registered address may appear as a PQ tx sender. +func PQRegistryLookupWithState(addr common.Address, stateDB StateDB) []byte { + if cached, ok := pqRegistryCache.Load(addr); ok { + return common.CopyBytes(cached.([]byte)) + } + if stateDB == nil { + return nil + } + pubKey := make([]byte, pqPubKeySize) + allZero := true + for i := 0; i < pqSlotsPerKey; i++ { + chunk := stateDB.GetState(pqRegistryAddress, pqRegistrySlot(addr, i)) + if chunk != (common.Hash{}) { + allZero = false + } + copy(pubKey[i*common.HashLength:], chunk[:]) + } + if allZero { + return nil + } + pqRegistryCache.Store(addr, pubKey) + return common.CopyBytes(pubKey) +} + +// WarmPQRegistryCache reads the 0x70 registry storage for each validator +// address via the provided stateDB and pre-populates pqRegistryCache. +// Call this once at startup after the blockchain is initialized, before +// the PQ vote manager or snapshot code needs PQRegistryLookup. +func WarmPQRegistryCache(stateDB StateDB, validators []common.Address) int { + warmed := 0 + for _, addr := range validators { + if _, ok := pqRegistryCache.Load(addr); ok { + continue // already cached + } + pubKey := make([]byte, pqPubKeySize) + allZero := true + for i := 0; i < pqSlotsPerKey; i++ { + chunk := stateDB.GetState(pqRegistryAddress, pqRegistrySlot(addr, i)) + if chunk != (common.Hash{}) { + allZero = false + } + copy(pubKey[i*common.HashLength:], chunk[:]) + } + if !allZero { + pqRegistryCache.Store(addr, pubKey) + warmed++ + } + } + return warmed +} + +// pqRecoverCompat preserves the existing double-sign evidence precompile at 0x68 +// while routing fixed-size ML-DSA inputs to pqRecover. +type pqRecoverCompat struct{} + +func (pqRecoverCompat) RequiredGas(input []byte) uint64 { + if len(input) == pqRecoverInputLength && !looksLikeDoubleSignEvidenceInput(input) { + return (&pqRecover{}).RequiredGas(input) + } + return (&verifyDoubleSignEvidence{}).RequiredGas(input) +} + +func (pqRecoverCompat) Run(input []byte) ([]byte, error) { + if len(input) == pqRecoverInputLength { + out, err := (&pqRecover{}).Run(input) + if !bytes.Equal(out, false32Byte) || !looksLikeDoubleSignEvidenceInput(input) { + return out, err + } + } + return (&verifyDoubleSignEvidence{}).Run(input) +} + +func (pqRecoverCompat) Name() string { + return "PQRECOVER_COMPAT" +} + +func pqRegistrySlot(addr common.Address, index int) common.Hash { + indexBytes := uint256.NewInt(uint64(index)).Bytes32() + return crypto.Keccak256Hash(addr.Bytes(), indexBytes[:]) +} + +func pqRegistryChunkAt(addr common.Address, index int, stateDB StateDB) common.Hash { + if stateDB != nil { + return stateDB.GetState(pqRegistryAddress, pqRegistrySlot(addr, index)) + } + if value, ok := pqRegistryFallback.Load(addr); ok { + if pubKey, ok := value.([]byte); ok && len(pubKey) == pqPubKeySize { + start := index * common.HashLength + end := start + common.HashLength + return common.BytesToHash(pubKey[start:end]) + } + } + return common.Hash{} +} + +func pqRegistryWriteChunk(addr common.Address, index int, chunk common.Hash, stateDB StateDB) { + if stateDB != nil { + stateDB.SetState(pqRegistryAddress, pqRegistrySlot(addr, index), chunk) + return + } + pubKey := make([]byte, pqPubKeySize) + if existing, ok := pqRegistryFallback.Load(addr); ok { + if existingKey, ok := existing.([]byte); ok && len(existingKey) == pqPubKeySize { + copy(pubKey, existingKey) + } + } + copy(pubKey[index*common.HashLength:], chunk[:]) + pqRegistryFallback.Store(addr, pubKey) +} + +func looksLikeDoubleSignEvidenceInput(input []byte) bool { + var evidence DoubleSignEvidence + return rlp.DecodeBytes(input, &evidence) == nil +} + // kzgPointEvaluation implements the EIP-4844 point evaluation precompile. type kzgPointEvaluation struct{} diff --git a/core/vm/evm.go b/core/vm/evm.go index ad0377a2cf..89481a7aea 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -269,7 +269,7 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g evm.Context.Transfer(evm.StateDB, caller, addr, value) if isPrecompile { - ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) + ret, gas, err = runPrecompiledContract(p, input, gas, evm.Config.Tracer, caller, evm.StateDB, evm.readOnly) } else { // Initialise a new contract and set the code that is to be used by the EVM. code := evm.resolveCode(addr) @@ -357,7 +357,7 @@ func (evm *EVM) CallCode(caller common.Address, addr common.Address, input []byt // It is allowed to call precompiles, even via delegatecall if p, isPrecompile := evm.precompile(addr); isPrecompile { - ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) + ret, gas, err = runPrecompiledContract(p, input, gas, evm.Config.Tracer, caller, evm.StateDB, evm.readOnly) } else { if evm.Config.EnableOpcodeOptimizations { addrCopy := addr @@ -425,7 +425,7 @@ func (evm *EVM) DelegateCall(originCaller common.Address, caller common.Address, // It is allowed to call precompiles, even via delegatecall if p, isPrecompile := evm.precompile(addr); isPrecompile { - ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) + ret, gas, err = runPrecompiledContract(p, input, gas, evm.Config.Tracer, caller, evm.StateDB, evm.readOnly) } else { if evm.Config.EnableOpcodeOptimizations { addrCopy := addr @@ -497,7 +497,7 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b evm.StateDB.AddBalance(addr, new(uint256.Int), tracing.BalanceChangeTouchAccount) if p, isPrecompile := evm.precompile(addr); isPrecompile { - ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config.Tracer) + ret, gas, err = runPrecompiledContract(p, input, gas, evm.Config.Tracer, caller, evm.StateDB, evm.readOnly) } else { if evm.Config.EnableOpcodeOptimizations { addrCopy := addr diff --git a/core/vm/pq_precompile_test.go b/core/vm/pq_precompile_test.go new file mode 100644 index 0000000000..5be405b519 --- /dev/null +++ b/core/vm/pq_precompile_test.go @@ -0,0 +1,58 @@ +package vm + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" +) + +func TestPQRecoverValid(t *testing.T) { + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + hash := crypto.Keccak256([]byte("pq-precompile-valid")) + sig, err := crypto.SignPQ(hash, privKey) + if err != nil { + t.Fatalf("SignPQ error: %v", err) + } + + input := append(append(append([]byte{}, hash...), sig...), pubKey...) + got, err := (pqRecover{}).Run(input) + if err != nil { + t.Fatalf("Run error: %v", err) + } + + want := make([]byte, 32) + addr := crypto.PQPubkeyToAddress(pubKey) + copy(want[12:], addr[:]) + if !bytes.Equal(got, want) { + t.Fatalf("unexpected output: have %x want %x", got, want) + } +} + +func TestPQRecoverInvalid(t *testing.T) { + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + hash := crypto.Keccak256([]byte("pq-precompile-invalid")) + sig, err := crypto.SignPQ(hash, privKey) + if err != nil { + t.Fatalf("SignPQ error: %v", err) + } + sig[0] ^= 0xff + + input := append(append(append([]byte{}, hash...), sig...), pubKey...) + got, err := (pqRecover{}).Run(input) + if err != nil { + t.Fatalf("Run error: %v", err) + } + if !bytes.Equal(got, make([]byte, 32)) { + t.Fatalf("expected zero output for invalid signature, got %x", got) + } +} diff --git a/core/vote/pq_vote_manager.go b/core/vote/pq_vote_manager.go new file mode 100644 index 0000000000..63218c72d2 --- /dev/null +++ b/core/vote/pq_vote_manager.go @@ -0,0 +1,156 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. + +package vote + +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/parlia" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +var pqVotesManagerCounter = metrics.NewRegisteredCounter("pqVotesManager/local", nil) + +// PQVoteManager signs blocks with ML-DSA-44 and inserts the PQ vote envelopes +// into the PQVotePool. It is the post-quantum counterpart of VoteManager. +// +// Activation is gated on chainConfig.IsPQFork — before the fork timestamp the +// manager stays idle so the legacy BLS VoteManager remains the only voter. +type PQVoteManager struct { + eth Backend + chain *core.BlockChain + + highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent + highestVerifiedBlockSub event.Subscription + + pool *PQVotePool + signer *PQVoteSigner + + // Reused only to get fork state, block interval, and active-validator check. + engine *parlia.Parlia +} + +// NewPQVoteManager wires up the PQ vote manager. signer and pool are required; +// the manager starts its loop goroutine immediately. +func NewPQVoteManager(eth Backend, chain *core.BlockChain, pool *PQVotePool, signer *PQVoteSigner, engine *parlia.Parlia) (*PQVoteManager, error) { + m := &PQVoteManager{ + eth: eth, + chain: chain, + highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize), + pool: pool, + signer: signer, + engine: engine, + } + metrics.GetOrRegisterLabel("miner-info", nil).Mark(map[string]interface{}{ + "PQVoteKey": common.Bytes2Hex(signer.PubKey[:]), + }) + + m.highestVerifiedBlockSub = chain.SubscribeHighestVerifiedHeaderEvent(m.highestVerifiedBlockCh) + + go m.loop() + return m, nil +} + +func (m *PQVoteManager) loop() { + log.Debug("PQ vote manager loop started") + defer m.highestVerifiedBlockSub.Unsubscribe() + + events := m.eth.EventMux().Subscribe(downloader.StartEvent{}, downloader.DoneEvent{}, downloader.FailedEvent{}) + defer func() { + if !events.Closed() { + events.Unsubscribe() + } + }() + + dlEventCh := events.Chan() + + startVote := true + blockCountSinceMining := 0 + for { + select { + case ev := <-dlEventCh: + if ev == nil { + continue + } + switch ev.Data.(type) { + case downloader.StartEvent: + startVote = false + case downloader.FailedEvent, downloader.DoneEvent: + startVote = true + } + + case cHead := <-m.highestVerifiedBlockCh: + if !startVote || cHead.Header == nil { + continue + } + if !m.eth.IsMining() { + blockCountSinceMining = 0 + continue + } + blockCountSinceMining++ + if blockCountSinceMining <= blocksNumberSinceMining { + continue + } + + curHead := cHead.Header + + // Fork gate — do nothing until the PQ fork is active for this block. + cfg := m.chain.Config() + if !cfg.IsPQFork(curHead.Number, curHead.Time) { + continue + } + + // Must be a known active validator AND our ML-DSA pubkey must match the one on chain. + if !m.engine.IsActivePQValidatorAt(m.chain, curHead, + func(pqPubKey *types.PQPublicKey) bool { + return bytes.Equal(m.signer.PubKey[:], pqPubKey[:]) + }) { + log.Debug("local PQ vote key is not an active validator at curHead", + "number", curHead.Number) + continue + } + + sourceNumber, sourceHash, err := m.engine.GetJustifiedNumberAndHash(m.chain, []*types.Header{curHead}) + if err != nil { + log.Debug("PQ vote: failed to get justified source", "err", err) + continue + } + if sourceHash == (common.Hash{}) { + continue + } + + voteMessage := &types.PQVoteEnvelope{ + Data: &types.VoteData{ + SourceNumber: sourceNumber, + SourceHash: sourceHash, + TargetNumber: curHead.Number.Uint64(), + TargetHash: curHead.Hash(), + }, + } + + if err := m.signer.SignVote(voteMessage); err != nil { + log.Error("Failed to sign PQ vote", "err", err, + "target", voteMessage.Data.TargetNumber) + continue + } + + log.Info("PQ vote produced", + "target", voteMessage.Data.TargetNumber, + "source", voteMessage.Data.SourceNumber, + "hash", voteMessage.Hash()) + m.pool.PutVote(voteMessage) + pqVotesManagerCounter.Inc(1) + + case <-m.highestVerifiedBlockSub.Err(): + log.Debug("PQ vote manager: chainHead subscription closed") + return + } + } +} diff --git a/core/vote/pq_vote_pool.go b/core/vote/pq_vote_pool.go new file mode 100644 index 0000000000..a765d22bfa --- /dev/null +++ b/core/vote/pq_vote_pool.go @@ -0,0 +1,257 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. + +package vote + +import ( + "sync" + + mapset "github.com/deckarep/golang-set/v2" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +// Per-block limits chosen to match the BLS VotePool semantics (21 validators on BSC). +const ( + maxPQCurVotesPerBlock = 21 +) + +var ( + localPQCurVotesCounter = metrics.NewRegisteredCounter("curPQVotes/local", nil) + localPQReceivedVotesGauge = metrics.NewRegisteredGauge("receivedPQVotes/local", nil) +) + +// PQVoteBox groups PQ vote envelopes targeting the same block hash. +type PQVoteBox struct { + blockNumber uint64 + blockHash common.Hash + voteMessages []*types.PQVoteEnvelope +} + +// PQVotePool stores post-quantum (ML-DSA-44) vote envelopes produced by validators. +// Compared to the BLS VotePool this implementation is intentionally minimal: +// - no future/current split (votes are stored for their target hash directly), +// - no priority queue (we prune via a full scan on chain head events, which is +// fine for the expected O(256) capacity), +// - no engine.VerifyVote call (quorum + committee verification happens inside +// parlia.pqVerifyVoteAttestation during header validation). +// +// The pool emits core.NewPQVoteEvent whenever a new vote is accepted so the +// eth protocol handler can broadcast it to peers. +type PQVotePool struct { + chain *core.BlockChain + + mu sync.RWMutex + + votesFeed event.Feed + scope event.SubscriptionScope + + // Dedup set keyed by PQVoteEnvelope.Hash(). + receivedVotes mapset.Set[common.Hash] + + // Vote messages grouped by target block hash. + curVotes map[common.Hash]*PQVoteBox + + highestVerifiedBlockCh chan core.HighestVerifiedBlockEvent + highestVerifiedBlockSub event.Subscription + + votesCh chan *types.PQVoteEnvelope + + quitCh chan struct{} +} + +// NewPQVotePool creates a new PQVotePool subscribed to chain head events for pruning. +func NewPQVotePool(chain *core.BlockChain) *PQVotePool { + pool := &PQVotePool{ + chain: chain, + receivedVotes: mapset.NewSet[common.Hash](), + curVotes: make(map[common.Hash]*PQVoteBox), + highestVerifiedBlockCh: make(chan core.HighestVerifiedBlockEvent, highestVerifiedBlockChanSize), + votesCh: make(chan *types.PQVoteEnvelope, voteBufferForPut), + quitCh: make(chan struct{}), + } + + if chain != nil { + pool.highestVerifiedBlockSub = chain.SubscribeHighestVerifiedHeaderEvent(pool.highestVerifiedBlockCh) + } + + go pool.loop() + return pool +} + +// Stop releases resources. Safe to call multiple times. +func (pool *PQVotePool) Stop() { + select { + case <-pool.quitCh: + return + default: + close(pool.quitCh) + } + pool.scope.Close() +} + +func (pool *PQVotePool) loop() { + if pool.highestVerifiedBlockSub != nil { + defer pool.highestVerifiedBlockSub.Unsubscribe() + } + var subErrCh <-chan error + if pool.highestVerifiedBlockSub != nil { + subErrCh = pool.highestVerifiedBlockSub.Err() + } + + for { + select { + case ev := <-pool.highestVerifiedBlockCh: + if ev.Header != nil { + pool.prune(ev.Header.Number.Uint64()) + } + case vote := <-pool.votesCh: + pool.putIntoVotePool(vote) + case <-subErrCh: + return + case <-pool.quitCh: + return + } + } +} + +// PutVote enqueues a PQ vote for asynchronous insertion into the pool. +func (pool *PQVotePool) PutVote(vote *types.PQVoteEnvelope) { + if vote == nil || vote.Data == nil { + return + } + select { + case pool.votesCh <- vote: + default: + log.Warn("PQ vote pool channel full, dropping vote", "target", vote.Data.TargetNumber) + } +} + +// SubscribeNewPQVoteEvent lets the protocol handler listen for newly accepted PQ votes. +func (pool *PQVotePool) SubscribeNewPQVoteEvent(ch chan<- core.NewPQVoteEvent) event.Subscription { + return pool.scope.Track(pool.votesFeed.Subscribe(ch)) +} + +func (pool *PQVotePool) putIntoVotePool(vote *types.PQVoteEnvelope) bool { + targetNumber := vote.Data.TargetNumber + targetHash := vote.Data.TargetHash + + var headNumber uint64 + if pool.chain != nil { + if head := pool.chain.CurrentBlock(); head != nil { + headNumber = head.Number.Uint64() + } + } + + // Range check: (head-256, head+11]. + if headNumber > 0 { + if targetNumber+lowerLimitOfVoteBlockNumber-1 < headNumber || + targetNumber > headNumber+upperLimitOfVoteBlockNumber { + log.Debug("PQ vote outside accepted window, discarding", + "target", targetNumber, "head", headNumber) + return false + } + } + + voteHash := vote.Hash() + + pool.mu.Lock() + if pool.receivedVotes.Contains(voteHash) { + pool.mu.Unlock() + return false + } + if box, ok := pool.curVotes[targetHash]; ok && len(box.voteMessages) >= maxPQCurVotesPerBlock { + pool.mu.Unlock() + log.Debug("PQ vote pool box full", "target", targetNumber) + return false + } + pool.mu.Unlock() + + // Signature verification is expensive (ML-DSA-44 ~3ms); do it outside the lock. + if err := vote.Verify(); err != nil { + log.Warn("Failed to verify PQ vote", "err", err, "target", targetNumber) + return false + } + + pool.mu.Lock() + defer pool.mu.Unlock() + + // Re-check dedup after verify. + if pool.receivedVotes.Contains(voteHash) { + return false + } + + box, ok := pool.curVotes[targetHash] + if !ok { + box = &PQVoteBox{ + blockNumber: targetNumber, + blockHash: targetHash, + voteMessages: make([]*types.PQVoteEnvelope, 0, maxPQCurVotesPerBlock), + } + pool.curVotes[targetHash] = box + } + box.voteMessages = append(box.voteMessages, vote) + pool.receivedVotes.Add(voteHash) + + localPQCurVotesCounter.Inc(1) + localPQReceivedVotesGauge.Update(int64(pool.receivedVotes.Cardinality())) + + // Broadcast to subscribers (protocol handler). + pool.votesFeed.Send(core.NewPQVoteEvent{Vote: vote}) + + log.Debug("PQ vote accepted", "target", targetNumber, "hash", voteHash) + return true +} + +// prune removes votes that are too old relative to the latest verified block. +func (pool *PQVotePool) prune(latestBlockNumber uint64) { + pool.mu.Lock() + defer pool.mu.Unlock() + + for hash, box := range pool.curVotes { + if box.blockNumber+lowerLimitOfVoteBlockNumber-1 < latestBlockNumber { + for _, v := range box.voteMessages { + pool.receivedVotes.Remove(v.Hash()) + } + localPQCurVotesCounter.Dec(int64(len(box.voteMessages))) + delete(pool.curVotes, hash) + } + } + localPQReceivedVotesGauge.Update(int64(pool.receivedVotes.Cardinality())) +} + +// GetVotes returns a snapshot of all currently pooled PQ votes. +func (pool *PQVotePool) GetVotes() []*types.PQVoteEnvelope { + pool.mu.RLock() + defer pool.mu.RUnlock() + + res := make([]*types.PQVoteEnvelope, 0) + for _, box := range pool.curVotes { + res = append(res, box.voteMessages...) + } + return res +} + +// FetchVotesByBlockHash returns every vote for targetBlockHash whose +// SourceNumber matches the supplied sourceBlockNum. Mirrors VotePool.FetchVotesByBlockHash. +func (pool *PQVotePool) FetchVotesByBlockHash(targetBlockHash common.Hash, sourceBlockNum uint64) []*types.PQVoteEnvelope { + pool.mu.RLock() + defer pool.mu.RUnlock() + + box, ok := pool.curVotes[targetBlockHash] + if !ok { + return nil + } + var res []*types.PQVoteEnvelope + for _, v := range box.voteMessages { + if v.Data.SourceNumber == sourceBlockNum { + res = append(res, v) + } + } + return res +} diff --git a/core/vote/pq_vote_pool_test.go b/core/vote/pq_vote_pool_test.go new file mode 100644 index 0000000000..f3a12c54e0 --- /dev/null +++ b/core/vote/pq_vote_pool_test.go @@ -0,0 +1,146 @@ +package vote + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" +) + +// newSignedPQVote creates a valid ML-DSA-44 signed PQ vote for testing. +func newSignedPQVote(t *testing.T, target uint64, targetHash common.Hash) (*types.PQVoteEnvelope, []byte) { + t.Helper() + pub, priv, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + signer, err := NewPQVoteSignerFromRawKey(priv) + if err != nil { + t.Fatalf("NewPQVoteSignerFromRawKey: %v", err) + } + env := &types.PQVoteEnvelope{ + Data: &types.VoteData{ + SourceNumber: target - 1, + SourceHash: common.BytesToHash([]byte("source-hash-padding-0123456789ab")), + TargetNumber: target, + TargetHash: targetHash, + }, + } + if err := signer.SignVote(env); err != nil { + t.Fatalf("SignVote: %v", err) + } + return env, pub +} + +func TestPQVotePool_PutAndFetch(t *testing.T) { + pool := NewPQVotePool(nil) // nil chain → skip head-range check + defer pool.Stop() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + env, _ := newSignedPQVote(t, 100, targetHash) + + pool.putIntoVotePool(env) + + got := pool.FetchVotesByBlockHash(targetHash, 99) + if len(got) != 1 { + t.Fatalf("expected 1 vote, got %d", len(got)) + } + if got[0].Hash() != env.Hash() { + t.Error("vote hash mismatch") + } +} + +func TestPQVotePool_Dedup(t *testing.T) { + pool := NewPQVotePool(nil) + defer pool.Stop() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + env, _ := newSignedPQVote(t, 100, targetHash) + + if !pool.putIntoVotePool(env) { + t.Fatal("first put should succeed") + } + if pool.putIntoVotePool(env) { + t.Error("duplicate put should be rejected") + } + if n := len(pool.GetVotes()); n != 1 { + t.Errorf("expected 1 vote after dedup, got %d", n) + } +} + +func TestPQVotePool_RejectBadSignature(t *testing.T) { + pool := NewPQVotePool(nil) + defer pool.Stop() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + env, _ := newSignedPQVote(t, 100, targetHash) + env.Signature[0] ^= 0xFF // tamper + + if pool.putIntoVotePool(env) { + t.Error("tampered vote should be rejected") + } + if n := len(pool.GetVotes()); n != 0 { + t.Errorf("expected 0 votes, got %d", n) + } +} + +func TestPQVotePool_BoxCap(t *testing.T) { + pool := NewPQVotePool(nil) + defer pool.Stop() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + // Fill past cap — each call produces a distinct keypair/signature so no dedup. + accepted := 0 + for i := 0; i < maxPQCurVotesPerBlock+5; i++ { + env, _ := newSignedPQVote(t, 100, targetHash) + if pool.putIntoVotePool(env) { + accepted++ + } + } + if accepted != maxPQCurVotesPerBlock { + t.Errorf("expected %d accepted, got %d", maxPQCurVotesPerBlock, accepted) + } +} + +func TestPQVotePool_SubscribeEvent(t *testing.T) { + pool := NewPQVotePool(nil) + defer pool.Stop() + + ch := make(chan core.NewPQVoteEvent, 1) + sub := pool.SubscribeNewPQVoteEvent(ch) + defer sub.Unsubscribe() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + env, _ := newSignedPQVote(t, 100, targetHash) + if !pool.putIntoVotePool(env) { + t.Fatal("put failed") + } + select { + case ev := <-ch: + if ev.Vote.Hash() != env.Hash() { + t.Error("event vote hash mismatch") + } + default: + t.Error("expected NewPQVoteEvent to be delivered") + } +} + +func TestPQVotePool_Prune(t *testing.T) { + pool := NewPQVotePool(nil) + defer pool.Stop() + + targetHash := common.BytesToHash([]byte("target-hash-abcdef0123456789abcd")) + env, _ := newSignedPQVote(t, 10, targetHash) + pool.putIntoVotePool(env) + if n := len(pool.GetVotes()); n != 1 { + t.Fatalf("pre-prune: expected 1 vote, got %d", n) + } + + // latestBlockNumber well past vote target+lowerLimit. + pool.prune(10 + lowerLimitOfVoteBlockNumber + 5) + if n := len(pool.GetVotes()); n != 0 { + t.Errorf("post-prune: expected 0 votes, got %d", n) + } +} diff --git a/core/vote/pq_vote_signer.go b/core/vote/pq_vote_signer.go new file mode 100644 index 0000000000..2eb05dc6a0 --- /dev/null +++ b/core/vote/pq_vote_signer.go @@ -0,0 +1,78 @@ +package vote + +import ( + "os" + + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +var pqVotesSigningErrorCounter = metrics.NewRegisteredCounter("pqVotesSigner/error", nil) + +// PQVoteSigner signs votes using ML-DSA-44 post-quantum signatures. +type PQVoteSigner struct { + privKey []byte + PubKey types.PQPublicKey +} + +// NewPQVoteSigner creates a new PQ vote signer from a private key file. +// The key file should contain the raw ML-DSA-44 private key bytes. +func NewPQVoteSigner(pqKeyPath string) (*PQVoteSigner, error) { + privKeyBytes, err := os.ReadFile(pqKeyPath) + if err != nil { + log.Error("Read PQ vote key file", "err", err) + return nil, errors.Wrap(err, "failed to read PQ vote key file") + } + + pubKeyBytes, err := mldsa.PublicKeyFromPrivate(privKeyBytes) + if err != nil { + log.Error("Derive PQ public key from private key", "err", err) + return nil, errors.Wrap(err, "failed to derive PQ public key") + } + + var pubKey types.PQPublicKey + copy(pubKey[:], pubKeyBytes) + + log.Info("Created PQ vote signer successfully", "pubKeyLen", len(pubKeyBytes)) + + return &PQVoteSigner{ + privKey: privKeyBytes, + PubKey: pubKey, + }, nil +} + +// NewPQVoteSignerFromRawKey creates a PQ vote signer from raw private key bytes. +// This is useful for testing and programmatic key generation. +func NewPQVoteSignerFromRawKey(privKey []byte) (*PQVoteSigner, error) { + pubKeyBytes, err := mldsa.PublicKeyFromPrivate(privKey) + if err != nil { + return nil, errors.Wrap(err, "failed to derive PQ public key") + } + + var pubKey types.PQPublicKey + copy(pubKey[:], pubKeyBytes) + + return &PQVoteSigner{ + privKey: privKey, + PubKey: pubKey, + }, nil +} + +// SignVote signs a PQ vote envelope using ML-DSA-44. +func (signer *PQVoteSigner) SignVote(vote *types.PQVoteEnvelope) error { + voteDataHash := vote.Data.Hash() + + sig, err := mldsa.Sign(signer.privKey, voteDataHash[:]) + if err != nil { + pqVotesSigningErrorCounter.Inc(1) + return errors.Wrap(err, "failed to sign vote with ML-DSA-44") + } + + copy(vote.VoteAddress[:], signer.PubKey[:]) + copy(vote.Signature[:], sig) + return nil +} diff --git a/crypto/crypto.go b/crypto/crypto.go index db6b6ee071..d380fb0b07 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" "github.com/ethereum/go-ethereum/rlp" ) @@ -87,6 +88,21 @@ func CreateAddress2(b common.Address, salt [32]byte, inithash []byte) common.Add return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt[:], inithash)[12:]) } +// SignPQ signs a digest using an ML-DSA private key. +func SignPQ(digest []byte, privKey []byte) ([]byte, error) { + return mldsa.Sign(privKey, digest) +} + +// VerifyPQ verifies an ML-DSA signature against the given public key and digest. +func VerifyPQ(pubKey []byte, digest []byte, sig []byte) bool { + return mldsa.Verify(pubKey, digest, sig) +} + +// PQPubkeyToAddress derives an address from an ML-DSA public key. +func PQPubkeyToAddress(pubKey []byte) common.Address { + return mldsa.PubKeyToAddress(pubKey) +} + // ToECDSA creates a private key with the given D value. func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) { return toECDSA(d, true) diff --git a/crypto/pq/mldsa/mldsa.go b/crypto/pq/mldsa/mldsa.go new file mode 100644 index 0000000000..c910025cde --- /dev/null +++ b/crypto/pq/mldsa/mldsa.go @@ -0,0 +1,65 @@ +package mldsa + +import ( + "errors" + "fmt" + + "github.com/cloudflare/circl/sign/mldsa/mldsa44" + "github.com/ethereum/go-ethereum/common" + "golang.org/x/crypto/sha3" +) + +func GenerateKey() (pubKey []byte, privKey []byte, err error) { + pub, priv, err := mldsa44.GenerateKey(nil) + if err != nil { + return nil, nil, err + } + return pub.Bytes(), priv.Bytes(), nil +} + +func GenerateKeyFromSeed(seed []byte) (pubKey []byte, privKey []byte, err error) { + if len(seed) != mldsa44.SeedSize { + return nil, nil, fmt.Errorf("invalid seed length: have %d want %d", len(seed), mldsa44.SeedSize) + } + var fixedSeed [mldsa44.SeedSize]byte + copy(fixedSeed[:], seed) + pub, priv := mldsa44.NewKeyFromSeed(&fixedSeed) + return pub.Bytes(), priv.Bytes(), nil +} + +func Sign(privKey []byte, digest []byte) (sig []byte, err error) { + var key mldsa44.PrivateKey + if err := key.UnmarshalBinary(privKey); err != nil { + return nil, err + } + return key.Sign(nil, digest, nil) +} + +func Verify(pubKey []byte, digest []byte, sig []byte) bool { + var key mldsa44.PublicKey + if err := key.UnmarshalBinary(pubKey); err != nil { + return false + } + return mldsa44.Verify(&key, digest, nil, sig) +} + +func PublicKeyFromPrivate(privKey []byte) ([]byte, error) { + var key mldsa44.PrivateKey + if err := key.UnmarshalBinary(privKey); err != nil { + return nil, err + } + pubKey, ok := key.Public().(*mldsa44.PublicKey) + if !ok { + return nil, errors.New("invalid pq public key type") + } + return pubKey.Bytes(), nil +} + +func PubKeyToAddress(pubKey []byte) common.Address { + hasher := sha3.NewLegacyKeccak256() + hasher.Write(pubKey) + + sum := make([]byte, 32) + hasher.Sum(sum[:0]) + return common.BytesToAddress(sum[12:]) +} diff --git a/crypto/pq/mldsa/mldsa_test.go b/crypto/pq/mldsa/mldsa_test.go new file mode 100644 index 0000000000..fe2dfb8792 --- /dev/null +++ b/crypto/pq/mldsa/mldsa_test.go @@ -0,0 +1,48 @@ +package mldsa + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/crypto/sha3" +) + +func keccak256(data []byte) []byte { + h := sha3.NewLegacyKeccak256() + h.Write(data) + return h.Sum(nil) +} + +func TestSignVerifyRoundTrip(t *testing.T) { + pubKey, privKey, err := GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + digest := keccak256([]byte("phase-0-mldsa")) + sig, err := Sign(privKey, digest) + if err != nil { + t.Fatalf("Sign error: %v", err) + } + if !Verify(pubKey, digest, sig) { + t.Fatal("Verify returned false for valid signature") + } + + tampered := append([]byte(nil), digest...) + tampered[0] ^= 0xff + if Verify(pubKey, tampered, sig) { + t.Fatal("Verify accepted a tampered digest") + } +} + +func TestPubKeyToAddress(t *testing.T) { + pubKey, _, err := GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + want := common.BytesToAddress(keccak256(pubKey)[12:]) + if got := PubKeyToAddress(pubKey); got != want { + t.Fatalf("unexpected address: have %s want %s", got.Hex(), want.Hex()) + } +} diff --git a/crypto/pq/mlkem/mlkem.go b/crypto/pq/mlkem/mlkem.go new file mode 100644 index 0000000000..10faae7ddb --- /dev/null +++ b/crypto/pq/mlkem/mlkem.go @@ -0,0 +1,37 @@ +package mlkem + +import "github.com/cloudflare/circl/kem/mlkem/mlkem768" + +func GenerateKey() (encapKey []byte, decapKey []byte, err error) { + pub, priv, err := mlkem768.GenerateKeyPair(nil) + if err != nil { + return nil, nil, err + } + encapKey, err = pub.MarshalBinary() + if err != nil { + return nil, nil, err + } + decapKey, err = priv.MarshalBinary() + if err != nil { + return nil, nil, err + } + return encapKey, decapKey, nil +} + +func Encapsulate(encapKey []byte) (ciphertext []byte, sharedSecret []byte, err error) { + scheme := mlkem768.Scheme() + pub, err := scheme.UnmarshalBinaryPublicKey(encapKey) + if err != nil { + return nil, nil, err + } + return scheme.Encapsulate(pub) +} + +func Decapsulate(decapKey []byte, ciphertext []byte) (sharedSecret []byte, err error) { + scheme := mlkem768.Scheme() + priv, err := scheme.UnmarshalBinaryPrivateKey(decapKey) + if err != nil { + return nil, err + } + return scheme.Decapsulate(priv, ciphertext) +} diff --git a/crypto/pq/mlkem/mlkem_test.go b/crypto/pq/mlkem/mlkem_test.go new file mode 100644 index 0000000000..488846a40b --- /dev/null +++ b/crypto/pq/mlkem/mlkem_test.go @@ -0,0 +1,26 @@ +package mlkem + +import ( + "bytes" + "testing" +) + +func TestEncapsulateDecapsulateRoundTrip(t *testing.T) { + encapKey, decapKey, err := GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + ciphertext, sharedSecret, err := Encapsulate(encapKey) + if err != nil { + t.Fatalf("Encapsulate error: %v", err) + } + + recovered, err := Decapsulate(decapKey, ciphertext) + if err != nil { + t.Fatalf("Decapsulate error: %v", err) + } + if !bytes.Equal(sharedSecret, recovered) { + t.Fatal("shared secrets do not match") + } +} diff --git a/crypto/pq/proofs/stark_prover.go b/crypto/pq/proofs/stark_prover.go new file mode 100644 index 0000000000..d432073f61 --- /dev/null +++ b/crypto/pq/proofs/stark_prover.go @@ -0,0 +1,292 @@ +package proofs + +import ( + "crypto/sha256" + "errors" + "math/big" + "sync" +) + +// Errors +var ( + ErrNilTrace = errors.New("stark_prover: nil execution trace") + ErrEmptyTrace = errors.New("stark_prover: empty execution trace") + ErrProofGenFailed = errors.New("stark_prover: proof generation failed") +) + +// FieldElement represents an element in the STARK finite field. +type FieldElement struct { + Value *big.Int +} + +// NewFieldElement creates a FieldElement from an int64. +func NewFieldElement(v int64) FieldElement { + return FieldElement{Value: big.NewInt(v)} +} + +// STARKConstraint represents a constraint in the STARK proof system. +type STARKConstraint struct { + Degree int + Coefficients []FieldElement +} + +// STARKProofData holds the generated STARK proof. +type STARKProofData struct { + // CommitmentRoot is the Merkle commitment to the execution trace. + CommitmentRoot [32]byte + // FRILayers stores the FRI (Fast Reed-Solomon IOP) layer commitments. + FRILayers [][32]byte + // QueryResponses holds query/response pairs for the interactive portion. + QueryResponses []QueryResponse + // TraceLength is the number of rows in the execution trace. + TraceLength int + // NumColumns is the number of columns in the execution trace. + NumColumns int +} + +// QueryResponse is a single query-response in the STARK proof. +type QueryResponse struct { + Index int + Value [32]byte + AuthPath [][32]byte +} + +// ProofSize returns the approximate byte size of the proof. +func (p *STARKProofData) ProofSize() int { + if p == nil { + return 0 + } + size := 32 // CommitmentRoot + size += len(p.FRILayers) * 32 + for _, qr := range p.QueryResponses { + size += 4 + 32 + len(qr.AuthPath)*32 // Index + Value + AuthPath + } + return size +} + +// STARKProver generates and verifies STARK proofs. +type STARKProver struct { + mu sync.RWMutex +} + +// NewSTARKProver creates a new STARKProver. +func NewSTARKProver() *STARKProver { + return &STARKProver{} +} + +// GenerateSTARKProof generates a STARK proof from an execution trace and constraints. +// The trace is a 2D matrix where each row is one "step" and each column is a register. +// For signature aggregation, each row represents one attestation's data. +// +// TODO(pq-phase3): This is a placeholder implementation that builds Merkle commitments +// over the execution trace but does NOT perform real STARK proving (polynomial +// interpolation, FRI commitment, constraint evaluation). Replace with a production +// STARK/leanVM prover before enabling on any public network. +func (sp *STARKProver) GenerateSTARKProof(trace [][]FieldElement, constraints []STARKConstraint) (*STARKProofData, error) { + if trace == nil { + return nil, ErrNilTrace + } + if len(trace) == 0 { + return nil, ErrEmptyTrace + } + + sp.mu.Lock() + defer sp.mu.Unlock() + + numColumns := 0 + if len(trace) > 0 && len(trace[0]) > 0 { + numColumns = len(trace[0]) + } + + // Step 1: Compute commitment root from the execution trace. + // Hash each row, then build a Merkle tree. + rowHashes := make([][32]byte, len(trace)) + for i, row := range trace { + h := sha256.New() + for _, elem := range row { + if elem.Value != nil { + h.Write(elem.Value.Bytes()) + } else { + h.Write([]byte{0}) + } + } + copy(rowHashes[i][:], h.Sum(nil)) + } + commitmentRoot := computeMerkleRoot(rowHashes) + + // Step 2: Generate FRI layer commitments. + // Simulate FRI folding: log2(traceLength) layers. + numLayers := 0 + n := len(trace) + for n > 1 { + n = (n + 1) / 2 + numLayers++ + } + if numLayers == 0 { + numLayers = 1 + } + + friLayers := make([][32]byte, numLayers) + currentHashes := rowHashes + for layer := 0; layer < numLayers; layer++ { + nextLen := (len(currentHashes) + 1) / 2 + nextHashes := make([][32]byte, nextLen) + for j := 0; j < nextLen; j++ { + h := sha256.New() + h.Write(currentHashes[j*2][:]) + if j*2+1 < len(currentHashes) { + h.Write(currentHashes[j*2+1][:]) + } else { + h.Write(currentHashes[j*2][:]) // duplicate last if odd + } + copy(nextHashes[j][:], h.Sum(nil)) + } + friLayers[layer] = nextHashes[0] + currentHashes = nextHashes + } + + // Step 3: Generate query responses. + // For each constraint, produce a query response at deterministic indices. + numQueries := len(constraints) + if numQueries == 0 { + numQueries = 1 + } + if numQueries > len(trace) { + numQueries = len(trace) + } + + queryResponses := make([]QueryResponse, numQueries) + for q := 0; q < numQueries; q++ { + idx := q % len(trace) + queryResponses[q] = QueryResponse{ + Index: idx, + Value: rowHashes[idx], + AuthPath: computeAuthPath(rowHashes, idx), + } + } + + return &STARKProofData{ + CommitmentRoot: commitmentRoot, + FRILayers: friLayers, + QueryResponses: queryResponses, + TraceLength: len(trace), + NumColumns: numColumns, + }, nil +} + +// VerifySTARKProof verifies a STARK proof. +// publicInputs can be nil for basic verification. +// +// TODO(pq-phase3): This is a placeholder that only checks Merkle auth paths. +// It does NOT verify actual STARK constraints, FRI consistency, or that the +// underlying PQ signatures are valid. Must be replaced with a real STARK +// verifier (or leanVM verifier) before any testnet deployment. +func (sp *STARKProver) VerifySTARKProof(proof *STARKProofData, publicInputs []FieldElement) (bool, error) { + if proof == nil { + return false, ErrProofGenFailed + } + + sp.mu.RLock() + defer sp.mu.RUnlock() + + // Verify FRI layer chain: each layer should be consistent. + if len(proof.FRILayers) == 0 { + return false, errors.New("stark_prover: no FRI layers") + } + + // Verify query responses against the commitment root. + for _, qr := range proof.QueryResponses { + if !verifyAuthPath(proof.CommitmentRoot, qr.Value, qr.AuthPath, qr.Index, proof.TraceLength) { + return false, errors.New("stark_prover: auth path verification failed") + } + } + + return true, nil +} + +// computeMerkleRoot computes a Merkle root from a list of leaf hashes. +func computeMerkleRoot(leaves [][32]byte) [32]byte { + if len(leaves) == 0 { + return [32]byte{} + } + if len(leaves) == 1 { + return leaves[0] + } + + // Pad to next power of two. + target := 1 + for target < len(leaves) { + target <<= 1 + } + padded := make([][32]byte, target) + copy(padded, leaves) + + layer := padded + for len(layer) > 1 { + next := make([][32]byte, len(layer)/2) + for i := range next { + h := sha256.New() + h.Write(layer[2*i][:]) + h.Write(layer[2*i+1][:]) + copy(next[i][:], h.Sum(nil)) + } + layer = next + } + return layer[0] +} + +// computeAuthPath computes the Merkle authentication path for a leaf at the given index. +func computeAuthPath(leaves [][32]byte, index int) [][32]byte { + if len(leaves) <= 1 { + return nil + } + + target := 1 + for target < len(leaves) { + target <<= 1 + } + padded := make([][32]byte, target) + copy(padded, leaves) + + var path [][32]byte + layer := padded + idx := index + + for len(layer) > 1 { + sibling := idx ^ 1 + if sibling < len(layer) { + path = append(path, layer[sibling]) + } + next := make([][32]byte, len(layer)/2) + for i := range next { + h := sha256.New() + h.Write(layer[2*i][:]) + h.Write(layer[2*i+1][:]) + copy(next[i][:], h.Sum(nil)) + } + layer = next + idx = idx / 2 + } + return path +} + +// verifyAuthPath verifies a Merkle authentication path. +func verifyAuthPath(root [32]byte, leaf [32]byte, authPath [][32]byte, index int, totalLeaves int) bool { + current := leaf + idx := index + + for _, sibling := range authPath { + h := sha256.New() + if idx%2 == 0 { + h.Write(current[:]) + h.Write(sibling[:]) + } else { + h.Write(sibling[:]) + h.Write(current[:]) + } + copy(current[:], h.Sum(nil)) + idx = idx / 2 + } + + return current == root +} diff --git a/crypto/pq/xmss/xmss.go b/crypto/pq/xmss/xmss.go new file mode 100644 index 0000000000..5f41b479e2 --- /dev/null +++ b/crypto/pq/xmss/xmss.go @@ -0,0 +1,17 @@ +package xmss + +import "errors" + +var errNotImplemented = errors.New("not implemented") + +func Sign(privKey []byte, msg []byte) (sig []byte, err error) { + return nil, errNotImplemented +} + +func Aggregate(sigs [][]byte, pubKeys [][]byte, msg []byte) (proof []byte, err error) { + return nil, errNotImplemented +} + +func VerifyProof(proof []byte, pubKeys [][]byte, msg []byte) bool { + return false +} diff --git a/crypto/pq_signing_test.go b/crypto/pq_signing_test.go new file mode 100644 index 0000000000..0b5d8edc20 --- /dev/null +++ b/crypto/pq_signing_test.go @@ -0,0 +1,42 @@ +package crypto_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/pq/mldsa" +) + +func TestSignPQ(t *testing.T) { + pubKey, privKey, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + digest := crypto.Keccak256([]byte("phase-1-sign-pq")) + sig, err := crypto.SignPQ(digest, privKey) + if err != nil { + t.Fatalf("SignPQ error: %v", err) + } + if !crypto.VerifyPQ(pubKey, digest, sig) { + t.Fatal("VerifyPQ returned false for a valid signature") + } +} + +func TestPQPubkeyToAddress(t *testing.T) { + pubKey, _, err := mldsa.GenerateKey() + if err != nil { + t.Fatalf("GenerateKey error: %v", err) + } + + want := common.BytesToAddress(crypto.Keccak256(pubKey)[12:]) + got1 := crypto.PQPubkeyToAddress(pubKey) + got2 := crypto.PQPubkeyToAddress(pubKey) + if got1 != want { + t.Fatalf("unexpected address: have %s want %s", got1.Hex(), want.Hex()) + } + if got1 != got2 { + t.Fatalf("expected deterministic address output: first %s second %s", got1.Hex(), got2.Hex()) + } +} diff --git a/eth/backend.go b/eth/backend.go index 6650d1f238..4383bf744e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -155,8 +155,9 @@ type Ethereum struct { shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully - votePool *vote.VotePool - stopCh chan struct{} + votePool *vote.VotePool + pqVotePool *vote.PQVotePool + stopCh chan struct{} } // New creates a new Ethereum object (including the initialisation of the common Ethereum object), @@ -177,6 +178,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } + // Wire PQ public-key registry lookup so PQSigner.Sender can resolve pubkeys + // from the process-level cache populated by the 0x70 precompile. + types.SetPQRegistryBackend(vm.PQRegistryLookup) + chainDb, err := stack.OpenAndMergeDatabase(ChainData, ChainDBNamespace, false, config) if err != nil { return nil, err @@ -277,6 +282,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { chainConfig.VerkleTime = config.OverrideVerkle overrides.OverrideVerkle = config.OverrideVerkle } + if config.OverridePQHardfork != nil { + chainConfig.PQForkTime = config.OverridePQHardfork + overrides.OverridePQHardfork = config.OverridePQHardfork + } // startup ancient freeze freezeDb := chainDb @@ -408,6 +417,19 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { return nil, err } + // Warm the PQ key registry cache from on-chain state so that + // PQRegistryLookup works immediately for snapshot PQVoteAddress resolution. + // Use the Parlia snapshot as the authoritative validator source: it is always + // current regardless of whether the head is an epoch block. + if parliaEngine, ok := eth.engine.(*parlia.Parlia); ok { + if stateDB, stateErr := eth.blockchain.State(); stateErr == nil { + addrs := parliaEngine.CurrentValidators(eth.blockchain) + if n := vm.WarmPQRegistryCache(stateDB, addrs); n > 0 { + log.Info("Warmed PQ registry cache from state", "validators", n) + } + } + } + // Initialize filtermaps log index. // Auto-enable checkpoint file checkpointFile := filepath.Join(stack.DataDir(), "geth", "filtermap_checkpoints.json") @@ -519,6 +541,32 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } log.Info("Create voteManager successfully") } + + // Post-quantum vote pool & manager. The pool is always created so the + // p2p handler can relay PQ votes even on a non-producing node. The + // manager is only started when a --pqvotekey file is configured. + if parliaEngine, ok := eth.engine.(*parlia.Parlia); ok { + pqVotePool := vote.NewPQVotePool(eth.blockchain) + eth.pqVotePool = pqVotePool + if !config.Miner.DisableVoteAttestation { + parliaEngine.PQVotePool = pqVotePool + } + eth.handler.pqVotepool = pqVotePool + log.Info("Create PQ votePool successfully") + + if pqKeyPath := stack.ResolvePath(stack.Config().PQVoteKeyFile); pqKeyPath != "" && stack.Config().PQVoteKeyFile != "" { + pqSigner, err := vote.NewPQVoteSigner(pqKeyPath) + if err != nil { + log.Error("Failed to load PQ vote signer", "path", pqKeyPath, "err", err) + return nil, err + } + if _, err := vote.NewPQVoteManager(eth, eth.blockchain, pqVotePool, pqSigner, parliaEngine); err != nil { + log.Error("Failed to initialize PQ voteManager", "err", err) + return nil, err + } + log.Info("Create PQ voteManager successfully") + } + } } eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, config.GPO, config.Miner.GasPrice) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index a2f8604dde..f6191560d8 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -242,6 +242,9 @@ type Config struct { // OverrideVerkle (TODO: remove after the fork) OverrideVerkle *uint64 `toml:",omitempty"` + // OverridePQHardfork overrides the PQ transaction activation timestamp. + OverridePQHardfork *uint64 `toml:",omitempty"` + // EIP-7966: eth_sendRawTransactionSync timeouts TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"` diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 613ef4c808..a1beaf7a4d 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -83,6 +83,7 @@ func (c Config) MarshalTOML() (interface{}, error) { OverrideBPO2 *uint64 `toml:",omitempty"` OverridePasteur *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` + OverridePQHardfork *uint64 `toml:",omitempty"` TxSyncDefaultTimeout time.Duration `toml:",omitempty"` TxSyncMaxTimeout time.Duration `toml:",omitempty"` BlobExtraReserve uint64 @@ -161,6 +162,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.OverrideBPO2 = c.OverrideBPO2 enc.OverridePasteur = c.OverridePasteur enc.OverrideVerkle = c.OverrideVerkle + enc.OverridePQHardfork = c.OverridePQHardfork enc.TxSyncDefaultTimeout = c.TxSyncDefaultTimeout enc.TxSyncMaxTimeout = c.TxSyncMaxTimeout enc.BlobExtraReserve = c.BlobExtraReserve @@ -243,6 +245,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { OverrideBPO2 *uint64 `toml:",omitempty"` OverridePasteur *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` + OverridePQHardfork *uint64 `toml:",omitempty"` TxSyncDefaultTimeout *time.Duration `toml:",omitempty"` TxSyncMaxTimeout *time.Duration `toml:",omitempty"` BlobExtraReserve *uint64 @@ -454,6 +457,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideVerkle != nil { c.OverrideVerkle = dec.OverrideVerkle } + if dec.OverridePQHardfork != nil { + c.OverridePQHardfork = dec.OverridePQHardfork + } if dec.TxSyncDefaultTimeout != nil { c.TxSyncDefaultTimeout = *dec.TxSyncDefaultTimeout } diff --git a/eth/handler.go b/eth/handler.go index 83c628b23c..5082af19c8 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -125,6 +125,13 @@ type votePool interface { SubscribeNewVoteEvent(ch chan<- core.NewVoteEvent) event.Subscription } +// pqVotePool is the post-quantum counterpart of votePool. +type pqVotePool interface { + PutVote(vote *types.PQVoteEnvelope) + GetVotes() []*types.PQVoteEnvelope + SubscribeNewPQVoteEvent(ch chan<- core.NewPQVoteEvent) event.Subscription +} + // handlerConfig is the collection of initialization parameters to create a full // node network handler. type handlerConfig struct { @@ -133,6 +140,7 @@ type handlerConfig struct { Chain *core.BlockChain // Blockchain to serve data from TxPool txPool // Transaction pool to propagate from VotePool votePool + PQVotePool pqVotePool // optional post-quantum vote pool (Bsc4+) Network uint64 // Network identifier to adfvertise Sync ethconfig.SyncMode // Whether to snap or full sync BloomCache uint64 // Megabytes to alloc for snap sync bloom @@ -167,6 +175,7 @@ type handler struct { database ethdb.Database txpool txPool votepool votePool + pqVotepool pqVotePool maliciousVoteMonitor *monitor.MaliciousVoteMonitor chain *core.BlockChain maxPeers int @@ -190,6 +199,8 @@ type handler struct { voteCh chan core.NewVoteEvent votesSub event.Subscription voteMonitorSub event.Subscription + pqVoteCh chan core.NewPQVoteEvent + pqVotesSub event.Subscription requiredBlocks map[uint64]common.Hash @@ -221,6 +232,7 @@ func newHandler(config *handlerConfig) (*handler, error) { database: config.Database, txpool: config.TxPool, votepool: config.VotePool, + pqVotepool: config.PQVotePool, chain: config.Chain, peers: config.PeerSet, txBroadcastKey: newBroadcastChoiceKey(), @@ -564,6 +576,9 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { if h.votepool != nil && p.bscExt != nil { h.syncVotes(p.bscExt) } + if h.pqVotepool != nil && p.bscExt != nil { + h.syncPQVotes(p.bscExt) + } // Create a notification channel for pending requests if the peer goes down dead := make(chan struct{}) @@ -748,6 +763,14 @@ func (h *handler) Start(maxPeers int, maxPeersPerIP int) { } } + // broadcast post-quantum votes (Bsc4+) + if h.pqVotepool != nil { + h.wg.Add(1) + h.pqVoteCh = make(chan core.NewPQVoteEvent, voteChanSize) + h.pqVotesSub = h.pqVotepool.SubscribeNewPQVoteEvent(h.pqVoteCh) + go h.pqVoteBroadcastLoop() + } + // announce local pending transactions again h.wg.Add(1) h.reannoTxsCh = make(chan core.ReannoTxsEvent, txChanSize) @@ -801,6 +824,9 @@ func (h *handler) Stop() { h.voteMonitorSub.Unsubscribe() } } + if h.pqVotepool != nil { + h.pqVotesSub.Unsubscribe() // quits pqVoteBroadcastLoop + } close(h.stopCh) // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. @@ -1136,6 +1162,35 @@ func (h *handler) voteBroadcastLoop() { } } +// BroadcastPQVote propagates a PQ vote to all peers that have not seen it yet +// and that negotiated at least Bsc4. +func (h *handler) BroadcastPQVote(vote *types.PQVoteEnvelope) { + if vote == nil { + return + } + peers := h.peers.peersWithoutPQVote(vote.Hash()) + for _, peer := range peers { + if peer.bscExt == nil || peer.bscExt.Version() < bsc.Bsc4 { + continue + } + peer.bscExt.AsyncSendPQVotes([]*types.PQVoteEnvelope{vote}) + } + log.Debug("PQ vote broadcast", "peers", len(peers), "target", vote.Data.TargetNumber) +} + +// pqVoteBroadcastLoop announces new PQ votes to connected peers. +func (h *handler) pqVoteBroadcastLoop() { + defer h.wg.Done() + for { + select { + case event := <-h.pqVoteCh: + h.BroadcastPQVote(event.Vote) + case <-h.pqVotesSub.Err(): + return + } + } +} + // enableSyncedFeatures enables the post-sync functionalities when the initial // sync is finished. func (h *handler) enableSyncedFeatures() { diff --git a/eth/handler_bsc.go b/eth/handler_bsc.go index 791c60db1e..73230eadb8 100644 --- a/eth/handler_bsc.go +++ b/eth/handler_bsc.go @@ -42,6 +42,9 @@ func (h *bscHandler) Handle(peer *bsc.Peer, packet bsc.Packet) error { case *bsc.VotesPacket: return h.handleVotesBroadcast(peer, packet.Votes) + case *bsc.PQVotesPacket: + return h.handlePQVotesBroadcast(peer, packet.Votes) + default: return fmt.Errorf("unexpected bsc packet type: %T", packet) } @@ -61,3 +64,18 @@ func (h *bscHandler) handleVotesBroadcast(peer *bsc.Peer, votes []*types.VoteEnv return nil } + +// handlePQVotesBroadcast is invoked when a peer delivers a PQVotesPacket. +func (h *bscHandler) handlePQVotesBroadcast(peer *bsc.Peer, votes []*types.PQVoteEnvelope) error { + if h.pqVotepool == nil { + return nil + } + if peer.IsOverLimitAfterReceiving() { + return nil + } + // Single-vote DoS protection mirrors the BLS handler — one envelope per broadcast. + if len(votes) > 0 { + h.pqVotepool.PutVote(votes[0]) + } + return nil +} diff --git a/eth/peerset.go b/eth/peerset.go index 3d76d62df4..e0dae19b66 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -476,6 +476,21 @@ func (ps *peerSet) peersWithoutVote(hash common.Hash) []*ethPeer { return list } +// peersWithoutPQVote retrieves a list of peers that do not have a given +// post-quantum vote in their set of known hashes. +func (ps *peerSet) peersWithoutPQVote(hash common.Hash) []*ethPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + list := make([]*ethPeer, 0, len(ps.peers)) + for _, p := range ps.peers { + if p.bscExt != nil && !p.bscExt.KnownPQVote(hash) { + list = append(list, p) + } + } + return list +} + // len returns if the current number of `eth` peers in the set. Since the `snap` // peers are tied to the existence of an `eth` connection, that will always be a // subset of `eth`. diff --git a/eth/protocols/bsc/handler.go b/eth/protocols/bsc/handler.go index df15894a14..5dc9f8fce4 100644 --- a/eth/protocols/bsc/handler.go +++ b/eth/protocols/bsc/handler.go @@ -96,6 +96,14 @@ var bsc2 = map[uint64]msgHandler{ BlocksByRangeMsg: handleBlocksByRange, } +var bsc4 = map[uint64]msgHandler{ + BscCapMsg: handleBscCap, + VotesMsg: handleVotes, + GetBlocksByRangeMsg: handleGetBlocksByRange, + BlocksByRangeMsg: handleBlocksByRange, + PQVotesMsg: handlePQVotes, +} + // handleBscCap ignores the capability message for backward compatibility. // Old nodes send BscCapMsg as part of their handshake, we just ignore it // since P2P layer already negotiated the protocol version. @@ -126,6 +134,9 @@ func handleMessage(backend Backend, peer *Peer) error { if peer.Version() >= Bsc2 { handlers = bsc2 } + if peer.Version() >= Bsc4 { + handlers = bsc4 + } // Track the amount of time it takes to serve the request and run the handler if metrics.Enabled() { @@ -155,6 +166,15 @@ func handleVotes(backend Backend, msg Decoder, peer *Peer) error { return backend.Handle(peer, ann) } +func handlePQVotes(backend Backend, msg Decoder, peer *Peer) error { + ann := new(PQVotesPacket) + if err := msg.Decode(ann); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + peer.markPQVotes(ann.Votes) + return backend.Handle(peer, ann) +} + func handleGetBlocksByRange(backend Backend, msg Decoder, peer *Peer) error { req := new(GetBlocksByRangePacket) if err := msg.Decode(req); err != nil { diff --git a/eth/protocols/bsc/peer.go b/eth/protocols/bsc/peer.go index c74a956e86..3d6cd4fc53 100644 --- a/eth/protocols/bsc/peer.go +++ b/eth/protocols/bsc/peer.go @@ -32,12 +32,14 @@ const ( // Peer is a collection of relevant information we have about a `bsc` peer. type Peer struct { - id string // Unique ID for the peer, cached - knownVotes *knownCache // Set of vote hashes known to be known by this peer - voteBroadcast chan []*types.VoteEnvelope // Channel used to queue votes propagation requests - periodBegin time.Time // Begin time of the latest period for votes counting - periodCounter uint // Votes number in the latest period - dispatcher *Dispatcher // Message request-response dispatcher + id string // Unique ID for the peer, cached + knownVotes *knownCache // Set of vote hashes known to be known by this peer + voteBroadcast chan []*types.VoteEnvelope // Channel used to queue votes propagation requests + knownPQVotes *knownCache // Set of PQ vote hashes known by this peer (Bsc4+) + pqVoteBroadcast chan []*types.PQVoteEnvelope // Channel used to queue PQ votes propagation requests + periodBegin time.Time // Begin time of the latest period for votes counting + periodCounter uint // Votes number in the latest period + dispatcher *Dispatcher // Message request-response dispatcher *p2p.Peer // The embedded P2P package peer rw p2p.MsgReadWriter // Input/output streams for bsc @@ -51,19 +53,24 @@ type Peer struct { func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { id := p.ID().String() peer := &Peer{ - id: id, - knownVotes: newKnownCache(maxKnownVotes), - voteBroadcast: make(chan []*types.VoteEnvelope, voteBufferSize), - periodBegin: time.Now(), - periodCounter: 0, - Peer: p, - rw: rw, - version: version, - logger: log.New("peer", id[:8]), - term: make(chan struct{}), + id: id, + knownVotes: newKnownCache(maxKnownVotes), + voteBroadcast: make(chan []*types.VoteEnvelope, voteBufferSize), + knownPQVotes: newKnownCache(maxKnownVotes), + pqVoteBroadcast: make(chan []*types.PQVoteEnvelope, voteBufferSize), + periodBegin: time.Now(), + periodCounter: 0, + Peer: p, + rw: rw, + version: version, + logger: log.New("peer", id[:8]), + term: make(chan struct{}), } peer.dispatcher = NewDispatcher(peer) go peer.broadcastVotes() + if version >= Bsc4 { + go peer.broadcastPQVotes() + } return peer } @@ -157,6 +164,56 @@ func (p *Peer) broadcastVotes() { } } +// KnownPQVote returns whether peer is known to already have a PQ vote. +func (p *Peer) KnownPQVote(hash common.Hash) bool { + return p.knownPQVotes.contains(hash) +} + +// markPQVotes marks PQ votes as known for the peer so we don't repropagate them. +func (p *Peer) markPQVotes(votes []*types.PQVoteEnvelope) { + for _, vote := range votes { + if !p.knownPQVotes.contains(vote.Hash()) { + p.knownPQVotes.add(vote.Hash()) + } + } +} + +// sendPQVotes propagates a batch of PQ votes to the remote peer. +func (p *Peer) sendPQVotes(votes []*types.PQVoteEnvelope) error { + p.markPQVotes(votes) + return p2p.Send(p.rw, PQVotesMsg, &PQVotesPacket{votes}) +} + +// AsyncSendPQVotes queues a batch of PQ votes for propagation. Silently dropped +// if the peer buffer is full or closed. +func (p *Peer) AsyncSendPQVotes(votes []*types.PQVoteEnvelope) { + if p.version < Bsc4 { + return + } + select { + case p.pqVoteBroadcast <- votes: + case <-p.term: + p.Log().Debug("Dropping PQ vote propagation for closed peer", "count", len(votes)) + default: + p.Log().Debug("Dropping PQ vote propagation for abnormal peer", "count", len(votes)) + } +} + +// broadcastPQVotes is the write loop for post-quantum votes (Bsc4+). +func (p *Peer) broadcastPQVotes() { + for { + select { + case votes := <-p.pqVoteBroadcast: + if err := p.sendPQVotes(votes); err != nil { + return + } + p.Log().Trace("Sent PQ votes", "count", len(votes)) + case <-p.term: + return + } + } +} + // knownCache is a cache for known hashes. type knownCache struct { hashes mapset.Set[common.Hash] diff --git a/eth/protocols/bsc/protocol.go b/eth/protocols/bsc/protocol.go index fc3eb5b709..d75e9d83bb 100644 --- a/eth/protocols/bsc/protocol.go +++ b/eth/protocols/bsc/protocol.go @@ -13,6 +13,7 @@ const ( Bsc1 = 1 Bsc2 = 2 Bsc3 = 3 // to BAL process + Bsc4 = 4 // adds post-quantum (ML-DSA-44) votes ) // ProtocolName is the official short name of the `bsc` protocol used during @@ -21,11 +22,11 @@ const ProtocolName = "bsc" // ProtocolVersions are the supported versions of the `bsc` protocol (first // is primary). -var ProtocolVersions = []uint{Bsc1, Bsc2, Bsc3} +var ProtocolVersions = []uint{Bsc1, Bsc2, Bsc3, Bsc4} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4, Bsc3: 4} +var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4, Bsc3: 4, Bsc4: 5} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 @@ -35,6 +36,7 @@ const ( VotesMsg = 0x01 GetBlocksByRangeMsg = 0x02 // it can request (StartBlockHeight-Count, StartBlockHeight] range blocks from remote peer BlocksByRangeMsg = 0x03 // the replied blocks from remote peer + PQVotesMsg = 0x04 // post-quantum (ML-DSA-44) votes, Bsc4+ ) var defaultExtra = []byte{0x00} @@ -68,6 +70,14 @@ func (*BscCapPacket) Kind() byte { return BscCapMsg } func (*VotesPacket) Name() string { return "Votes" } func (*VotesPacket) Kind() byte { return VotesMsg } +// PQVotesPacket is the network packet for post-quantum votes record. +type PQVotesPacket struct { + Votes []*types.PQVoteEnvelope +} + +func (*PQVotesPacket) Name() string { return "PQVotes" } +func (*PQVotesPacket) Kind() byte { return PQVotesMsg } + type GetBlocksByRangePacket struct { RequestId uint64 StartBlockHeight uint64 // The start block height expected to be obtained from diff --git a/eth/sync.go b/eth/sync.go index 128624e507..b4eaff691c 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/eth/protocols/bsc" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" ) @@ -57,6 +58,18 @@ func (h *handler) syncVotes(p *bscPeer) { p.AsyncSendVotes(votes) } +// syncPQVotes sends all currently pending PQ votes to the given peer (Bsc4+). +func (h *handler) syncPQVotes(p *bscPeer) { + if p.Version() < bsc.Bsc4 { + return + } + votes := h.pqVotepool.GetVotes() + if len(votes) == 0 { + return + } + p.AsyncSendPQVotes(votes) +} + // chainSyncer coordinates blockchain sync components. type chainSyncer struct { handler *handler diff --git a/go.mod b/go.mod index b5c17f5951..5a9b34d24f 100644 --- a/go.mod +++ b/go.mod @@ -14,9 +14,10 @@ require ( github.com/bnb-chain/fastssz v0.1.2 github.com/bnb-chain/ics23 v0.1.0 github.com/cespare/cp v1.1.1 + github.com/cloudflare/circl v1.6.3 github.com/cloudflare/cloudflare-go v0.114.0 github.com/cockroachdb/pebble v1.1.5 - github.com/cometbft/cometbft v0.37.0 + github.com/cometbft/cometbft v0.0.0-00010101000000-000000000000 github.com/consensys/gnark-crypto v0.18.1 github.com/cosmos/iavl v0.12.0 github.com/crate-crypto/go-eth-kzg v1.4.0 diff --git a/go.sum b/go.sum index 6d2d09194c..c9a5383e15 100644 --- a/go.sum +++ b/go.sum @@ -182,6 +182,8 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cloudflare/cloudflare-go v0.114.0 h1:ucoti4/7Exo0XQ+rzpn1H+IfVVe++zgiM+tyKtf0HUA= github.com/cloudflare/cloudflare-go v0.114.0/go.mod h1:O7fYfFfA6wKqKFn2QIR9lhj7FDw6VQCGOY6hd2TBtd0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= diff --git a/node/config.go b/node/config.go index 8e18fc7d62..d98d9c9a50 100644 --- a/node/config.go +++ b/node/config.go @@ -241,6 +241,11 @@ type Config struct { // VoteJournalDir is the directory to store votes in the fast finality feature. VoteJournalDir string `toml:",omitempty"` + // PQVoteKeyFile is a file containing the raw ML-DSA-44 private key used to + // sign post-quantum votes after PQForkTime. If empty, this node will not + // produce PQ votes locally (but will still relay received ones). + PQVoteKeyFile string `toml:",omitempty"` + // BatchRequestLimit is the maximum number of requests in a batch. BatchRequestLimit int `toml:",omitempty"` diff --git a/params/config.go b/params/config.go index 8e9b40398a..100c20ce47 100644 --- a/params/config.go +++ b/params/config.go @@ -353,6 +353,7 @@ var ( CancunTime: newUint64(0), HaberTime: newUint64(0), HaberFixTime: newUint64(0), + PQForkTime: newUint64(0), BohrTime: newUint64(0), PascalTime: newUint64(0), PragueTime: newUint64(0), @@ -704,7 +705,6 @@ type ChainConfig struct { ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // Eip-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter - // Fork scheduling was switched from blocks to timestamps here ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) @@ -729,6 +729,7 @@ type ChainConfig struct { BPO5Time *uint64 `json:"bpo5Time,omitempty"` // BPO5 switch time (nil = no fork, 0 = already on bpo5) AmsterdamTime *uint64 `json:"amsterdamTime,omitempty"` // Amsterdam switch time (nil = no fork, 0 = already on amsterdam) PasteurTime *uint64 `json:"pasteurTime,omitempty"` // PasteurTime switch time (nil = no fork, 0 = already on pasteurTime) + PQForkTime *uint64 `json:"pqForkTime,omitempty"` // PQ switch time (nil = no fork, 0 = already active) VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) // TerminalTotalDifficulty is the amount of total difficulty reached by @@ -929,11 +930,15 @@ func (c *ChainConfig) String() string { if c.PasteurTime != nil { PasteurTime = big.NewInt(0).SetUint64(*c.PasteurTime) } + var PQForkTime *big.Int + if c.PQForkTime != nil { + PQForkTime = big.NewInt(0).SetUint64(*c.PQForkTime) + } return fmt.Sprintf("{ChainID: %v, Engine: %v, Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, "+ "MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, CatalystBlock: %v, London: %v, ArrowGlacier: %v, MergeFork:%v, Euler: %v, Gibbs: %v, Nano: %v, Moran: %v, Planck: %v,Luban: %v, Plato: %v, Hertz: %v, Hertzfix: %v, "+ "ShanghaiTime: %v, KeplerTime: %v, FeynmanTime: %v, FeynmanFixTime: %v, CancunTime: %v, HaberTime: %v, HaberFixTime: %v, BohrTime: %v, PascalTime: %v, PragueTime: %v, LorentzTime: %v, MaxwellTime: %v, FermiTime: %v, "+ - "OsakaTime: %v, MendelTime: %v, BPO1Time: %v, BPO2Time: %v, PasteurTime: %v}", + "OsakaTime: %v, MendelTime: %v, BPO1Time: %v, BPO2Time: %v, PasteurTime: %v, PQForkTime: %v}", c.ChainID, engine, c.HomesteadBlock, @@ -984,6 +989,7 @@ func (c *ChainConfig) String() string { BPO1Time, BPO2Time, PasteurTime, + PQForkTime, ) } @@ -1182,6 +1188,11 @@ func (c *ChainConfig) IsLondon(num *big.Int) bool { return isBlockForked(c.LondonBlock, num) } +// IsPQFork returns whether the PQ fork is active at the given block timestamp. +func (c *ChainConfig) IsPQFork(num *big.Int, time uint64) bool { + return isTimestampForked(c.PQForkTime, time) +} + // IsArrowGlacier returns whether num is either equal to the Arrow Glacier (EIP-4345) fork block or greater. func (c *ChainConfig) IsArrowGlacier(num *big.Int) bool { return isBlockForked(c.ArrowGlacierBlock, num) @@ -1583,6 +1594,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "bpo5", timestamp: c.BPO5Time, optional: true}, {name: "amsterdam", timestamp: c.AmsterdamTime, optional: true}, {name: "pasteurTime", timestamp: c.PasteurTime, optional: true}, + {name: "pqForkTime", timestamp: c.PQForkTime, optional: true}, } { if lastFork.name != "" { switch { @@ -2123,7 +2135,7 @@ type Rules struct { IsShanghai, IsKepler, IsFeynman, IsCancun, IsHaber bool IsBohr, IsPascal, IsPrague, IsLorentz, IsMaxwell bool IsFermi, IsOsaka, IsMendel bool - IsAmsterdam, IsPasteur, IsVerkle bool + IsAmsterdam, IsPasteur, IsPQ, IsVerkle bool } // Rules ensures c's ChainID is not nil. @@ -2171,6 +2183,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsMendel: c.IsMendel(num, timestamp), IsAmsterdam: (isMerge || c.IsInBSC()) && c.IsAmsterdam(num, timestamp), IsPasteur: c.IsPasteur(num, timestamp), + IsPQ: c.IsPQFork(num, timestamp), IsVerkle: c.IsVerkle(num, timestamp), IsEIP4762: isVerkle, }