diff --git a/.env.example b/.env.example index eb8733fb1a..1c6a85c25d 100644 --- a/.env.example +++ b/.env.example @@ -76,3 +76,6 @@ ETHERSCAN_API_KEY= # Local devnet private key LOCAL_DEVNET_PK=0x0000000000000000000000000000000000000000000000000000000000000000 +# in case of deploying on local devnet +#LOCAL_RPC_URL=http://rpc123.testnet.fi +#BLOCK_EXPLORER_BASE_URL=http://blockscout123.testnet.fi/ diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 7ad5a60688..63353dc9c3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,8 +9,6 @@ jobs: coverage: name: Hardhat / Unit Tests runs-on: ubuntu-latest - env: - NODE_OPTIONS: --max_old_space_size=6400 permissions: contents: write @@ -24,7 +22,8 @@ jobs: uses: ./.github/workflows/setup - name: Collect coverage - run: yarn test:coverage + run: NODE_OPTIONS="--max-old-space-size=10240" yarn test:coverage + timeout-minutes: 30 - name: Produce the coverage report uses: lidofinance/coverage-action@a94351baa279790f736655b1891178b1515594ea diff --git a/.github/workflows/tests-integration-hoodi.yml b/.github/workflows/tests-integration-hoodi.yml index 2615d471f6..c3860852c8 100644 --- a/.github/workflows/tests-integration-hoodi.yml +++ b/.github/workflows/tests-integration-hoodi.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 env: - NODE_OPTIONS: --max_old_space_size=7200 + NODE_OPTIONS: --max_old_space_size=10240 SKIP_GAS_REPORT: true SKIP_CONTRACT_SIZE: true SKIP_INTERFACES_CHECK: true @@ -20,7 +20,9 @@ jobs: uses: ./.github/workflows/setup - name: Run integration tests - run: yarn test:integration + run: yarn test:integration:upgrade env: + NETWORK: "hoodi" RPC_URL: "${{ secrets.HOODI_RPC_URL }}" + FORKING_BLOCK_NUMBER: "2715255" NETWORK_STATE_FILE: deployed-hoodi.json diff --git a/.github/workflows/tests-integration-mainnet.yml b/.github/workflows/tests-integration-mainnet.yml index 836ce0b1a6..c213c8f666 100644 --- a/.github/workflows/tests-integration-mainnet.yml +++ b/.github/workflows/tests-integration-mainnet.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 env: - NODE_OPTIONS: --max_old_space_size=7200 + NODE_OPTIONS: --max_old_space_size=10240 SKIP_GAS_REPORT: true SKIP_CONTRACT_SIZE: true SKIP_INTERFACES_CHECK: true @@ -20,7 +20,8 @@ jobs: uses: ./.github/workflows/setup - name: Run integration tests - run: yarn test:integration + run: yarn test:integration:upgrade env: + NETWORK: "mainnet" RPC_URL: "${{ secrets.ETH_RPC_URL }}" NETWORK_STATE_FILE: deployed-mainnet.json diff --git a/.github/workflows/tests-integration-scratch.yml b/.github/workflows/tests-integration-scratch.yml index cce5f5fd9c..e1bdefbbca 100644 --- a/.github/workflows/tests-integration-scratch.yml +++ b/.github/workflows/tests-integration-scratch.yml @@ -24,6 +24,10 @@ jobs: - name: Common setup uses: ./.github/workflows/setup + # https://github.com/foundry-rs/foundry-toolchain + - name: Install foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Set env run: cp .env.example .env diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 471024d4e5..10bd66dc8e 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -14,7 +14,7 @@ jobs: uses: ./.github/workflows/setup - name: Run unit tests - run: yarn test + run: NODE_OPTIONS="--max-old-space-size=10240" yarn test test_foundry_fuzzing: name: Foundry / Fuzzing & Invariants diff --git a/.gitignore b/.gitignore index 54f7003981..a1917ddeff 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ coverage/ state-mate/ coverage.json +localhost.json typechain/ typechain-types/ @@ -25,12 +26,16 @@ foundry/out/ lib/abi/*.json .env +.env.* +!.env.example +!.env.*.example accounts.json deployed-local.json deployed-local-devnet.json deployed-hardhat.json -deployed-local-devnet.json -deployed-mainnet-upgrade.json +deployed-*-upgrade.json +**/upgrade-params-local.toml +**/upgrade-params-*-upgrade.toml # MacOS .DS_Store diff --git a/contracts/0.4.24/Lido.sol b/contracts/0.4.24/Lido.sol index 8fff6b8891..91d1de5eb0 100644 --- a/contracts/0.4.24/Lido.sol +++ b/contracts/0.4.24/Lido.sol @@ -11,37 +11,28 @@ import {ILidoLocator} from "../common/interfaces/ILidoLocator.sol"; import {StETHPermit} from "./StETHPermit.sol"; import {Versioned} from "./utils/Versioned.sol"; - -import {Math256} from "../common/lib/Math256.sol"; import {StakeLimitUtils, StakeLimitUnstructuredStorage, StakeLimitState} from "./lib/StakeLimitUtils.sol"; import {UnstructuredStorageExt} from "./utils/UnstructuredStorageExt.sol"; - -interface IBurnerMigration { - function migrate(address _oldBurner) external; -} +import {Math256} from "../common/lib/Math256.sol"; interface IStakingRouter { - function deposit(uint256 _depositsCount, uint256 _stakingModuleId, bytes _depositCalldata) external payable; - - function getStakingModuleMaxDepositsCount( - uint256 _stakingModuleId, - uint256 _maxDepositsValue - ) external view returns (uint256); - function getTotalFeeE4Precision() external view returns (uint16 totalFee); function TOTAL_BASIS_POINTS() external view returns (uint256); function getWithdrawalCredentials() external view returns (bytes32); - function getStakingFeeAggregateDistributionE4Precision() external view returns (uint16 modulesFee, uint16 treasuryFee); + function getStakingFeeAggregateDistributionE4Precision() + external + view + returns (uint16 modulesFee, uint16 treasuryFee); + + function receiveDepositableEther() external payable; } interface IWithdrawalQueue { function unfinalizedStETH() external view returns (uint256); - function isBunkerModeActive() external view returns (bool); - function finalize(uint256 _lastIdToFinalize, uint256 _maxShareRate) external payable; } @@ -53,6 +44,26 @@ interface IWithdrawalVault { function withdrawWithdrawals(uint256 _amount) external; } +interface IAccountingOracle { + /// @dev returns a tuple instead of a structure to avoid allocating memory + function getProcessingState() + external + view + returns ( + uint256 currentFrameRefSlot, + uint256 processingDeadlineTime, + bytes32 mainDataHash, + bool mainDataSubmitted, + bytes32 extraDataHash, + uint256 extraDataFormat, + bool extraDataSubmitted, + uint256 extraDataItemsCount, + uint256 extraDataItemsSubmitted + ); + function getLastProcessingRefSlot() external view returns (uint256); + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp); +} + /** * @title Liquid staking pool implementation * @@ -87,8 +98,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { bytes32 public constant RESUME_ROLE = 0x2fc10cc8ae19568712f7a176fb4978616a610650813c9d05326c34abb62749c7; // keccak256("RESUME_ROLE"); bytes32 public constant STAKING_PAUSE_ROLE = 0x84ea57490227bc2be925c684e2a367071d69890b629590198f4125a018eb1de8; // keccak256("STAKING_PAUSE_ROLE") bytes32 public constant STAKING_CONTROL_ROLE = 0xa42eee1333c0758ba72be38e728b6dadb32ea767de5b4ddbaea1dae85b1b051f; // keccak256("STAKING_CONTROL_ROLE") - bytes32 public constant UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE = - 0xe6dc5d79630c61871e99d341ad72c5a052bed2fc8c79e5a4480a7cd31117576c; // keccak256("UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE") + bytes32 public constant BUFFER_RESERVE_MANAGER_ROLE = + 0x33969636f1fbf3d7d062d4de4a08e7bd3c46606ec28b3a4398d2665be559b921; // keccak256("BUFFER_RESERVE_MANAGER_ROLE") uint256 private constant DEPOSIT_SIZE = 32 ether; @@ -99,8 +110,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { /// |----- 128 bit -----|------ 128 bit -------| /// | external shares | total shares | /// keccak256("lido.StETH.totalAndExternalShares") - bytes32 internal constant TOTAL_AND_EXTERNAL_SHARES_POSITION = - TOTAL_SHARES_POSITION_LOW128; + bytes32 internal constant TOTAL_AND_EXTERNAL_SHARES_POSITION = TOTAL_SHARES_POSITION_LOW128; + /// @dev storage slot position for the Lido protocol contracts locator /// Since version 3, high 96 bits are used for the max external ratio BP /// |----- 96 bit -----|------ 160 bit -------| @@ -108,30 +119,63 @@ contract Lido is Versioned, StETHPermit, AragonApp { /// keccak256("lido.Lido.lidoLocatorAndMaxExternalRatio") bytes32 internal constant LOCATOR_AND_MAX_EXTERNAL_RATIO_POSITION = 0xd92bc31601d11a10411d08f59b7146d8a5915af253cde25f8e66b67beb4be223; + /// @dev amount of ether (on the current Ethereum side) buffered on this smart contract balance - /// Since version 3, high 128 bits are used for the deposited validators count - /// |------ 128 bit -------|------ 128 bit -------| - /// | deposited validators | buffered ether | - /// keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); - bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = - 0xa84c096ee27e195f25d7b6c7c2a03229e49f1a2a5087e57ce7d7127707942fe3; - /// @dev total amount of ether on Consensus Layer (sum of all the balances of Lido validators) - // "beacon" in the `keccak256()` parameter is staying here for compatibility reason - /// Since version 3, high 128 bits are used for the CL validators count - /// |----- 128 bit -----|------ 128 bit -------| - /// | CL validators | CL balance | - /// keccak256("lido.Lido.clBalanceAndClValidators"); - bytes32 internal constant CL_BALANCE_AND_CL_VALIDATORS_POSITION = - 0xc36804a03ec742b57b141e4e5d8d3bd1ddb08451fd0f9983af8aaab357a78e2f; + /// and amount of ether deposited since last report + /// depositedPostReport lifecycle: + /// 1) increased by `withdrawDepositableEther()` as CL deposits are performed; + /// 2) resets on report processing via `processClStateUpdate()` + /// |------ 128 bit --------|----- 128 bit ------| + /// | deposited post report | buffered ether | + /// keccak256("lido.Lido.bufferedEtherAndDepositedPostReport"); + bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION = + 0x81a11fa1111afa59b50051f60ccf604a39d96acb484dc467ad8eadb4a63f0a5f; + + /// @dev an internal counter accumulates the ETH deposited after the reporting period/frame changes + /// and unique identifier for the last deposit's frame (in this case, it's current refSlot) + /// keccak256("lido.Lido.depositedNextReportAndLastDepositNonce") + bytes32 internal constant DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION = + 0x8d3ed945c7718edcdb639b1235f2bbe3fa81f4a6cec7a436d8ea13fbc502d957; + + /// @dev CL validators balance and CL pending deposit balance + /// |----- 128 bit ------------|------ 128 bit -------| + /// | CL validators balance | CL pending balance | + /// keccak256("lido.Lido.clValidatorsBalanceAndClPendingBalance"); + bytes32 internal constant CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION = + 0x096e465397f38e659238ccd5d5a2c434ced54a63fd8d694045bfb058ab9d8112; + + /// @dev number of initial seed deposits (incrementing counter), ex. deposited validators + /// keccak256("lido.Lido.seedDepositsCount"); + bytes32 internal constant SEED_DEPOSITS_COUNT_POSITION = + 0x3f0eaa2c0f16ff9775c078f3df30470d8c042317b24ad1defa240b1c3e10b238; + /// @dev storage slot position of the staking rate limit structure /// keccak256("lido.Lido.stakeLimit"); bytes32 internal constant STAKING_STATE_POSITION = 0xa3678de4a579be090bed1177e0a24f77cc29d181ac22fd7688aca344d8938015; + /// @dev storage slot position for the total amount of execution layer rewards received by Lido contract. /// keccak256("lido.Lido.totalELRewardsCollected"); bytes32 internal constant TOTAL_EL_REWARDS_COLLECTED_POSITION = 0xafe016039542d12eec0183bb0b1ffc2ca45b027126a494672fba4154ee77facb; + /// @dev Storage slot for deposit reserve. + /// Holds buffered ether that remains depositable even when withdrawals demand exists. + /// Lifecycle: + /// 1) can be decreased by `setDepositsReserveTarget()` when target is lowered; + /// 2) consumed by `withdrawDepositableEther()` as CL deposits are performed; + /// 3) synced to target on report processing via `_updateBufferedEtherAllocation()` + /// keccak256("lido.Lido.depositsReserve") + bytes32 internal constant DEPOSITS_RESERVE_POSITION = + 0xda4fbe3b9cbd98dfae5dff538bbff4ba61f38979d4d7419bcd006f3e6250ec13; + + /// @dev Storage slot for deposits reserve target. + /// Stores governance-configured value that deposits reserve is restored to on each oracle report. + /// Set via `setDepositsReserveTarget()`, gated by `BUFFER_RESERVE_MANAGER_ROLE` + /// keccak256("lido.Lido.depositsReserveTarget") + bytes32 internal constant DEPOSITS_RESERVE_TARGET_POSITION = + 0x3d3e9bd6e90e5d1f1c6839835bcbe5746a47c9a013d1eae6e80c248264c06a81; + // Staking was paused (don't accept user's ether submits) event StakingPaused(); // Staking was resumed (accept user's ether submits) @@ -141,15 +185,18 @@ contract Lido is Versioned, StETHPermit, AragonApp { // Staking limit was removed event StakingLimitRemoved(); - // Emitted when validators number delivered by the oracle - event CLValidatorsUpdated(uint256 indexed reportTimestamp, uint256 preCLValidators, uint256 postCLValidators); + // Emitted when CL balances are updated by the oracle + event CLBalancesUpdated(uint256 indexed reportTimestamp, uint256 clValidatorsBalance, uint256 clPendingBalance); + // Emitted when CL pending balance is updated during deposits to CL + event DepositedPostReportUpdated(uint256 depositedPostReport); // Emitted when depositedValidators value is changed event DepositedValidatorsChanged(uint256 depositedValidators); // Emitted when oracle accounting report processed - // @dev `preCLBalance` is the balance of the validators on previous report - // plus the amount of ether that was deposited to the deposit contract since then + // @dev `preCLBalance` is actually the principal CL balance: the sum of the previous report's + // CL validators balance, CL pending balance, and deposited balance since the last report. + // The parameter name is kept for ABI backward compatibility. event ETHDistributed( uint256 indexed reportTimestamp, uint256 preCLBalance, // actually its preCLBalance + deposits due to compatibility reasons @@ -208,6 +255,15 @@ contract Lido is Versioned, StETHPermit, AragonApp { // Bad debt internalized event ExternalBadDebtInternalized(uint256 amountOfShares); + // Emitted when current deposits reserve is updated. + // Can be emitted from `withdrawDepositableEther()`, `collectRewardsAndProcessWithdrawals()`, + // and `setDepositsReserveTarget()` when target is lowered below current reserve. + event DepositsReserveSet(uint256 depositsReserve); + + // Emitted when deposits reserve target is set via `setDepositsReserveTarget()`. + // Emitted even if the new value equals the previous one + event DepositsReserveTargetSet(uint256 depositsReserveTarget); + /** * @notice Initializer function for scratch deploy of Lido contract * @@ -224,7 +280,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { emit LidoLocatorSet(_lidoLocator); _initializeEIP712StETH(_eip712StETH); - _setContractVersion(3); + _setContractVersion(4); ILidoLocator locator = ILidoLocator(_lidoLocator); @@ -233,86 +289,53 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** - * @notice A function to finalize upgrade to v3 (from v2). Can be called only once - * - * For more details see https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md - * @param _oldBurner The address of the old Burner contract to migrate from - * @param _contractsWithBurnerAllowances Contracts that have allowances for the old burner to be migrated - * @param _initialMaxExternalRatioBP Initial maximum external ratio in basis points + * @notice A function to finalize upgrade to v4 (from v3). Can be called only once */ - function finalizeUpgrade_v3( - address _oldBurner, - address[] _contractsWithBurnerAllowances, - uint256 _initialMaxExternalRatioBP - ) external { + function finalizeUpgrade_v4() external { require(hasInitialized(), "NOT_INITIALIZED"); - _checkContractVersion(2); - _setContractVersion(3); - - _migrateStorage_v2_to_v3(); - - _migrateBurner_v2_to_v3(_oldBurner, _contractsWithBurnerAllowances); - - _setMaxExternalRatioBP(_initialMaxExternalRatioBP); - } - - function _migrateStorage_v2_to_v3() internal { - // migrate storage to packed representation - bytes32 LIDO_LOCATOR_POSITION = keccak256("lido.Lido.lidoLocator"); - address locator = LIDO_LOCATOR_POSITION.getStorageAddress(); - assert(locator != address(0)); // sanity check - - _setLidoLocator(LIDO_LOCATOR_POSITION.getStorageAddress()); - LIDO_LOCATOR_POSITION.setStorageUint256(0); - - bytes32 BUFFERED_ETHER_POSITION = keccak256("lido.Lido.bufferedEther"); - _setBufferedEther(BUFFERED_ETHER_POSITION.getStorageUint256()); - BUFFERED_ETHER_POSITION.setStorageUint256(0); - - bytes32 DEPOSITED_VALIDATORS_POSITION = keccak256("lido.Lido.depositedValidators"); - _setDepositedValidators(DEPOSITED_VALIDATORS_POSITION.getStorageUint256()); - DEPOSITED_VALIDATORS_POSITION.setStorageUint256(0); - bytes32 CL_VALIDATORS_POSITION = keccak256("lido.Lido.beaconValidators"); - bytes32 CL_BALANCE_POSITION = keccak256("lido.Lido.beaconBalance"); - _setClBalanceAndClValidators( - CL_BALANCE_POSITION.getStorageUint256(), - CL_VALIDATORS_POSITION.getStorageUint256() - ); - CL_BALANCE_POSITION.setStorageUint256(0); - CL_VALIDATORS_POSITION.setStorageUint256(0); - - bytes32 TOTAL_SHARES_POSITION = keccak256("lido.StETH.totalShares"); - uint256 totalShares = TOTAL_SHARES_POSITION.getStorageUint256(); - assert(totalShares > 0); // sanity check - TOTAL_AND_EXTERNAL_SHARES_POSITION.setLowUint128(totalShares); - TOTAL_SHARES_POSITION.setStorageUint256(0); - } - - function _migrateBurner_v2_to_v3( - address _oldBurner, - address[] _contractsWithBurnerAllowances - ) internal { - require(_oldBurner != address(0), "OLD_BURNER_ADDRESS_ZERO"); - address burner = _burner(); - require(_oldBurner != burner, "OLD_BURNER_SAME_AS_NEW"); - - // migrate burner stETH balance - uint256 oldBurnerShares = _sharesOf(_oldBurner); - if (oldBurnerShares > 0) { - _transferShares(_oldBurner, burner, oldBurnerShares); - _emitTransferEvents(_oldBurner, burner, getPooledEthByShares(oldBurnerShares), oldBurnerShares); - } - - // initialize new burner with state from the old burner - IBurnerMigration(burner).migrate(_oldBurner); - - // migrating allowances - for (uint256 i = 0; i < _contractsWithBurnerAllowances.length; i++) { - uint256 oldAllowance = allowance(_contractsWithBurnerAllowances[i], _oldBurner); - _approve(_contractsWithBurnerAllowances[i], _oldBurner, 0); - _approve(_contractsWithBurnerAllowances[i], burner, oldAllowance); - } + /// @dev prevent migration if the last oracle report wasn't submitted, otherwise deposits + /// made after refSlot and before migration (i.e. report's tx) will be lost + IAccountingOracle oracle = _accountingOracle(); + (,,, bool mainDataSubmitted,,,,,) = oracle.getProcessingState(); + /// @dev pass in case of initial deploy + require(mainDataSubmitted || oracle.getLastProcessingRefSlot() == 0, "NO_REPORT"); + + _checkContractVersion(3); + _setContractVersion(4); + _migrateStorage_v3_to_v4(); + } + + function _migrateStorage_v3_to_v4() internal { + /// @dev storage slots used in v3 + // keccak256("lido.Lido.clBalanceAndClValidators") + bytes32 CL_BALANCE_AND_CL_VALIDATORS_POSITION = + 0xc36804a03ec742b57b141e4e5d8d3bd1ddb08451fd0f9983af8aaab357a78e2f; + // keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); + bytes32 BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = + 0xa84c096ee27e195f25d7b6c7c2a03229e49f1a2a5087e57ce7d7127707942fe3; + + (uint256 clValidatorsBalance, uint256 clValidators) = + CL_BALANCE_AND_CL_VALIDATORS_POSITION.getLowAndHighUint128(); + (uint256 bufferedEther, uint256 depositedValidators) = + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowAndHighUint128(); + + /// @dev convert ex-transientBalance to amount submitted to the Deposit contract + /// after the last accounting oracle report + uint256 depositedPostReport = (depositedValidators - clValidators) * DEPOSIT_SIZE; + _setBufferedEtherAndDepositedPostReport(bufferedEther, depositedPostReport); + /// @dev Since migration is only possible after a report and before the next frame begins, + /// the transient balance will apply to the current frame + (uint256 curNonce,) = _getCurrentFrame(); // get current refslot + _setDepositedNextReportAndLastDepositNonce(depositedPostReport, curNonce); + + /// @dev no pending balance at the moment of upgrade + _setClValidatorsBalanceAndClPendingBalance(clValidatorsBalance, 0); + _setSeedDepositsCount(depositedValidators); + + // wipe out the slots + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setStorageUint256(0); + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setStorageUint256(0); } /** @@ -370,10 +393,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { require(_maxStakeLimit <= uint96(-1) / 2, "TOO_LARGE_MAX_STAKE_LIMIT"); STAKING_STATE_POSITION.setStorageStakeLimitStruct( - STAKING_STATE_POSITION.getStorageStakeLimitStruct().setStakingLimit( - _maxStakeLimit, - _stakeLimitIncreasePerBlock - ) + STAKING_STATE_POSITION.getStorageStakeLimitStruct() + .setStakingLimit(_maxStakeLimit, _stakeLimitIncreasePerBlock) ); emit StakingLimitSet(_maxStakeLimit, _stakeLimitIncreasePerBlock); @@ -532,31 +553,120 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** - * @notice Unsafely change the deposited validators counter + * @return the amount of ether temporarily buffered on this contract balance + * @dev Buffered balance is kept on the contract from the moment the funds are received from user + * until the moment they are actually sent to the official Deposit contract or used to fulfill withdrawal requests + */ + function getBufferedEther() external view returns (uint256) { + return _getBufferedEther(); + } + + /** + * @notice Buffered ether split into reserve buckets. + * @param total Total buffered ether, equal to `getBufferedEther()`. + * @param unreserved Buffer remainder after both reserves are filled. Available for additional CL deposits + * beyond the deposits reserve + * @param depositsReserve Buffer portion available for CL deposits, protected from withdrawals demand. + * Resets on each oracle report, decreases via `withdrawDepositableEther()` + * @param withdrawalsReserve Buffer portion allocated to unfinalized withdrawals. Not depositable to CL. + * Zero when all withdrawal requests are finalized + */ + struct BufferedEtherAllocation { + uint256 total; + uint256 unreserved; + uint256 depositsReserve; + uint256 withdrawalsReserve; + } + + /** + * @notice Calculates buffered ether allocation across reserves + * @dev Buffer is split by priority: * - * The method unsafely changes deposited validator counter. - * Can be required when onboarding external validators to Lido - * (i.e., had deposited before and rotated their type-0x00 withdrawal credentials to Lido) + * 1. depositsReserve - per-frame CL deposit allowance, filled first + * 2. withdrawalsReserve - covers unfinalized withdrawal requests + * 3. unreserved - excess, available for additional CL deposits * - * @param _newDepositedValidators new value + * ┌─────────── Total Buffered Ether ───────────┐ + * ├────────────────────┬───────────────────────┼─────┬──────────────┐ + * │●●●●●●●●●●●●●●●●●●●●│●●●●●●●●●●●●●●●●●●●●●●●●○○○○○│○○○○○○○○○○○○○○│ + * ├────────────────────┼───────────────────────┼─────┼──────────────┤ + * └─ Deposits Reserve ─┼─ Withdrawals Reserve ─┘ ├─ Unreserved ─┘ + * └───── Unfinalized stETH ─────┘ * - * TODO: remove this with maxEB-friendly accounting + * ● - covered by Buffered Ether + * ○ - not covered by Buffered Ether + * + * depositsReserve = min(total, stored deposits reserve) + * withdrawalsReserve = min(total - depositsReserve, unfinalizedStETH) + * unreserved = total - depositsReserve - withdrawalsReserve */ - function unsafeChangeDepositedValidators(uint256 _newDepositedValidators) external { - _auth(UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE); + function _getBufferedEtherAllocation() internal view returns (BufferedEtherAllocation allocation) { + uint256 remaining = _getBufferedEther(); + allocation.total = remaining; + + allocation.depositsReserve = Math256.min(remaining, DEPOSITS_RESERVE_POSITION.getStorageUint256()); + remaining -= allocation.depositsReserve; - _setDepositedValidators(_newDepositedValidators); + allocation.withdrawalsReserve = Math256.min(remaining, _withdrawalQueue().unfinalizedStETH()); + remaining -= allocation.withdrawalsReserve; - emit DepositedValidatorsChanged(_newDepositedValidators); + allocation.unreserved = remaining; } /** - * @return the amount of ether temporarily buffered on this contract balance - * @dev Buffered balance is kept on the contract from the moment the funds are received from user - * until the moment they are actually sent to the official Deposit contract or used to fulfill withdrawal requests + * @notice Returns the currently effective deposits reserve — buffer portion available for CL deposits, protected + * from withdrawals demand + * @dev Capped by current buffered ether. See `_getBufferedEtherAllocation()` */ - function getBufferedEther() external view returns (uint256) { - return _getBufferedEther(); + function getDepositsReserve() external view returns (uint256 depositsReserve) { + return _getBufferedEtherAllocation().depositsReserve; + } + + /** + * @dev Stores new deposits reserve value and emits DepositsReserveSet event + */ + function _setDepositsReserve(uint256 _newDepositsReserve) internal { + DEPOSITS_RESERVE_POSITION.setStorageUint256(_newDepositsReserve); + emit DepositsReserveSet(_newDepositsReserve); + } + + /** + * @notice Returns the currently effective withdrawals reserve + * @dev This reserve is computed after deposits reserve is applied + * @return Amount reserved to satisfy unfinalized withdrawals + */ + function getWithdrawalsReserve() external view returns (uint256) { + return _getBufferedEtherAllocation().withdrawalsReserve; + } + + /** + * @notice Returns configured target for deposits reserve + * @return depositsReserveTarget Configured reserve target in wei + */ + function getDepositsReserveTarget() public view returns (uint256) { + return DEPOSITS_RESERVE_TARGET_POSITION.getStorageUint256(); + } + + /** + * @notice Sets deposits reserve target + * @dev Always updates target and emits DepositsReserveTargetSet + * If target is lowered below current reserve, reserve is reduced immediately + * If target is increased, reserve is not increased here and is synced on report processing via + * `_updateBufferedEtherAllocation()` + * @param _newDepositsReserveTarget New target value in wei + */ + function setDepositsReserveTarget(uint256 _newDepositsReserveTarget) external { + _auth(BUFFER_RESERVE_MANAGER_ROLE); + + DEPOSITS_RESERVE_TARGET_POSITION.setStorageUint256(_newDepositsReserveTarget); + emit DepositsReserveTargetSet(_newDepositsReserveTarget); + + uint256 currentDepositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + // Do not increase reserve mid-frame: this could reduce available ETH for withdrawals finalization + // relative to the report reference slot assumptions. Increases are applied on oracle report processing. + if (_newDepositsReserveTarget < currentDepositsReserve) { + _setDepositsReserve(_newDepositsReserveTarget); + } } /** @@ -597,6 +707,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** + * @dev DEPRECATED: Use getBalanceStats() for new integrations * @notice Get the key values related to the Consensus Layer side of the contract. * @return depositedValidators - number of deposited validators from Lido contract side * @return beaconValidators - number of Lido validators visible on Consensus Layer, reported by oracle @@ -607,8 +718,82 @@ contract Lido is Versioned, StETHPermit, AragonApp { view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance) { - depositedValidators = _getDepositedValidators(); - (beaconBalance, beaconValidators) = _getClBalanceAndClValidators(); + depositedValidators = _getSeedDepositsCount(); + (uint256 clValidatorsBalance, uint256 clPendingBalance) = _getClValidatorsBalanceAndClPendingBalance(); + /// @dev Since there is now no gap between the deposit on EL and its observation on the CL layer, + /// for compatibility, beaconValidators = depositedValidators. + /// @dev beaconBalance returned as sum of active and pending balances because this amounts + /// are visible on the CL side at moment of report + return (depositedValidators, depositedValidators, clValidatorsBalance.add(clPendingBalance)); + } + + /// @notice Returns current balance statistics + /// @return clValidatorsBalanceAtLastReport Sum of validator's active balances in wei + /// @return clPendingBalanceAtLastReport Sum of validator's pending deposits in wei + /// @return depositedSinceLastReport Deposits made since last oracle report + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + (clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport) = _getClValidatorsBalanceAndClPendingBalance(); + + depositedSinceLastReport = _getDepositedPostReport(); + (depositedForCurrentReport,) = _getDepositedNextReportAdjusted(); + /// @dev depositedNextReport is always less than depositedPostReport, so we can safely subtract + depositedForCurrentReport = depositedSinceLastReport - depositedForCurrentReport; + } + + /** + * To accurately track the ETH that was deposited between the refSlot and the report transaction, we use the following + * approach: + * + * Data structure can be represented as: + * - lastNonce - last deposit refSlot + * - depositedPostReport - total sum of all deposits across all periods since the last successful report + * - depositedNextReport - sum of deposits within the current reporting period, to be included in the next report + * + * Flow diagram: + * NOW + * ┌── depositedPostReport ────────────────┐ ↓ + * │○○○○○○○○○○○○○○│○●●○○R○○○●○○●○│○○●●●○○●○●○○○○│○○●●○○●○○●○○○○│ + * ┆ lastReport-↑ currentRefSlot-↑└────⁠┬────┘ + * ┆ ┆ currentReportFrame-↓ ┆ └depositedNextReport + * ⁠║ frame X ⁠║ frame X+1 ⁠║ frame X+2 ⁠║ frame X+3 ⁠║ + * + * R - report transaction slot + * ● - slot with deposits + * ○ - empty slot + * ⁠║ - frame refSlot + * + * Logic: + * - On any read/write operation, we first retrieve currentNonce (currentRefSlot) + * - Whenever the nonce changes (i.e. the reporting period changes), we reset depositedNextReport to zero + * - To obtain the exact deposit amount for the reporting periods, we compute: depositedPostReport - depositedNextReport + * - On each deposit, both counters are incremented: depositedPostReport += amount and depositedNextReport += amount + * - At reporting time, deposits already accounted for in the report are excluded from depositedPostReport, leaving + * only the current period: depositedPostReport = depositedNextReport + */ + /// @dev read and adjust the `depositedNextReport` value according to the current frame + function _getDepositedNextReportAdjusted() internal view returns (uint256 depositedNextReport, uint256 curNonce) { + uint256 lastNonce; + (depositedNextReport, lastNonce) = _getDepositedNextReportAndLastDepositNonce(); + (curNonce,) = _getCurrentFrame(); // get current refSlot + if (curNonce != lastNonce) { + // treating all unsettled amounts as belonging to previous periods (aka nonces), + // i.e., as already settled (accounted in upcoming report) + depositedNextReport = 0; + } + } + + /// @dev get currentFrameRefSlot from oracle processing state + function _getCurrentFrame() internal view returns (uint256 refSlot, uint256 refSlotTimestamp) { + (refSlot, refSlotTimestamp) = _accountingOracle().getCurrentFrame(); } /** @@ -621,50 +806,70 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @return the amount of ether in the buffer that can be deposited to the Consensus Layer - * @dev Takes into account unfinalized stETH required by WithdrawalQueue + * @dev Equals buffered ether minus withdrawals reserve from `_getBufferedEtherAllocation()` */ - function getDepositableEther() public view returns (uint256) { - uint256 bufferedEther = _getBufferedEther(); - uint256 withdrawalReserve = _withdrawalQueue().unfinalizedStETH(); - return bufferedEther > withdrawalReserve ? bufferedEther - withdrawalReserve : 0; + function getDepositableEther() external view returns (uint256) { + return _getDepositableEther(_getBufferedEtherAllocation()); } /** - * @notice Invoke a deposit call to the Staking Router contract and update buffered counters - * @param _maxDepositsCount max deposits count - * @param _stakingModuleId id of the staking module to be deposited - * @param _depositCalldata module calldata + * @notice Calculates depositable amount from precomputed buffer allocation + * @return Depositable amount, equal to `allocation.depositsReserve + allocation.unreserved` */ - function deposit(uint256 _maxDepositsCount, uint256 _stakingModuleId, bytes _depositCalldata) external { - ILidoLocator locator = _getLidoLocator(); - - require(msg.sender == locator.depositSecurityModule(), "APP_AUTH_DSM_FAILED"); - require(canDeposit(), "CAN_NOT_DEPOSIT"); + function _getDepositableEther(BufferedEtherAllocation allocation) internal pure returns (uint256) { + return allocation.depositsReserve + allocation.unreserved; + } - IStakingRouter stakingRouter = _stakingRouter(locator); - uint256 depositsCount = Math256.min( - _maxDepositsCount, - stakingRouter.getStakingModuleMaxDepositsCount(_stakingModuleId, getDepositableEther()) - ); + /** + * @dev Spends depositable buffer and updates stored deposits reserve accordingly. + * Decreases stored deposits reserve by spent amount, bounded below by zero + */ + function _spendDepositableEther(uint256 _depositAmount) internal { + BufferedEtherAllocation memory allocation = _getBufferedEtherAllocation(); + uint256 depositableEther = _getDepositableEther(allocation); + require(_depositAmount <= depositableEther, "NOT_ENOUGH_ETHER"); + + /// @dev the requested amount will be sent to DepositContract, so we increment + /// depositedPostReport counter to keep _getInternalEther value correct + uint256 depositedPostReport = _getDepositedPostReport().add(_depositAmount); + _setBufferedEtherAndDepositedPostReport(allocation.total.sub(_depositAmount), depositedPostReport); + emit Unbuffered(_depositAmount); + + (uint256 depositedNextReport, uint256 curNonce) = _getDepositedNextReportAdjusted(); + depositedNextReport = depositedNextReport.add(_depositAmount); + _setDepositedNextReportAndLastDepositNonce(depositedNextReport, curNonce); + + uint256 storedDepositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + if (storedDepositsReserve > 0) { + _setDepositsReserve(storedDepositsReserve > _depositAmount ? storedDepositsReserve - _depositAmount : 0); + } + } - uint256 depositsValue; - if (depositsCount > 0) { - depositsValue = depositsCount.mul(DEPOSIT_SIZE); - /// @dev firstly update the local state of the contract to prevent a reentrancy attack, - /// even if the StakingRouter is a trusted contract. + /** + * @notice Withdraw `_amount` of buffer to Staking Router + * @dev Can be called only by the Staking Router contract + * @notice _seedDepositsCount - DEPRECATED, it is used only for backward compatibility + * + * @param _amount amount of ETH to withdraw + * @param _seedDepositsCount amount of seed deposits. In case of top up this value will be equal to 0 + */ + function withdrawDepositableEther(uint256 _amount, uint256 _seedDepositsCount) external { + require(canDeposit(), "CAN_NOT_DEPOSIT"); + IStakingRouter stakingRouter = _stakingRouter(); + _auth(address(stakingRouter)); + require(_amount != 0, "ZERO_AMOUNT"); - (uint256 bufferedEther, uint256 depositedValidators) = _getBufferedEtherAndDepositedValidators(); - depositedValidators = depositedValidators.add(depositsCount); + _spendDepositableEther(_amount); - _setBufferedEtherAndDepositedValidators(bufferedEther.sub(depositsValue), depositedValidators); - emit Unbuffered(depositsValue); - emit DepositedValidatorsChanged(depositedValidators); + if (_seedDepositsCount > 0) { + uint256 newSeedDepositsCount = _getSeedDepositsCount().add(_seedDepositsCount); + _setSeedDepositsCount(newSeedDepositsCount); + /// @dev event name is kept for backward compatibility + emit DepositedValidatorsChanged(newSeedDepositsCount); } - /// @dev transfer ether to StakingRouter and make a deposit at the same time. All the ether - /// sent to StakingRouter is counted as deposited. If StakingRouter can't deposit all - /// passed ether it MUST revert the whole transaction (never happens in normal circumstances) - stakingRouter.deposit.value(depositsValue)(depositsCount, _stakingModuleId, _depositCalldata); + /// @dev forward the requested amount of ether to the StakingRouter + stakingRouter.receiveDepositableEther.value(_amount)(); } /** @@ -740,16 +945,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { _burnShares(msg.sender, _amountOfShares); uint256 stethAmount = getPooledEthByShares(_amountOfShares); - StakeLimitState.Data memory stakeLimitData = STAKING_STATE_POSITION.getStorageStakeLimitStruct(); - - /// NB: burning external shares must be allowed even when staking is paused to allow external ether withdrawals - if (stakeLimitData.isStakingLimitSet() && !stakeLimitData.isStakingPaused()) { - uint256 newStakeLimit = stakeLimitData.calculateCurrentStakeLimit() + stethAmount; - - STAKING_STATE_POSITION.setStorageStakeLimitStruct( - stakeLimitData.updatePrevStakeLimit(newStakeLimit) - ); - } + _increaseStakingLimit(stethAmount); // Historically, Lido contract does not emit Transfer to zero address events // for burning but emits SharesBurnt instead, so it's kept here for compatibility @@ -795,26 +991,29 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @notice Process CL related state changes as a part of the report processing * @dev All data validation was done by Accounting and OracleReportSanityChecker + * @dev Replaces validator counting in v3 with direct balance tracking for EIP-7251 support * @param _reportTimestamp timestamp of the report - * @param _preClValidators number of validators in the previous CL state (for event compatibility) - * @param _reportClValidators number of validators in the current CL state - * @param _reportClBalance total balance of the current CL state + * @param _clValidatorsBalance Validators balance on the consensus layer + * @param _clPendingBalance Pending deposits balance on the consensus layer */ - function processClStateUpdate( - uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance - ) external { + function processClStateUpdate(uint256 _reportTimestamp, uint256 _clValidatorsBalance, uint256 _clPendingBalance) + external + { _whenNotStopped(); _auth(_accounting()); - // Save the current CL balance and validators to - // calculate rewards on the next rebase - _setClBalanceAndClValidators(_reportClBalance, _reportClValidators); + (uint256 depositedNextReport, uint256 curNonce) = _getDepositedNextReportAdjusted(); + /// @dev just save adjusted depositedNextReport + _setDepositedNextReportAndLastDepositNonce(depositedNextReport, curNonce); + /// @dev Since `depositedPostReport` accumulates all deposits, including those that occurred + /// after `refSlot` but before the report, we must retain only the amount not + /// reflected in the report + _setDepositedPostReport(depositedNextReport); - emit CLValidatorsUpdated(_reportTimestamp, _preClValidators, _reportClValidators); - // cl balance change are logged in ETHDistributed event later + /// @dev new values of clValidatorsBalance and clPendingBalance should reflect all + /// deposits during the report frame + _setClValidatorsBalanceAndClPendingBalance(_clValidatorsBalance, _clPendingBalance); + emit CLBalancesUpdated(_reportTimestamp, _clValidatorsBalance, _clPendingBalance); } /** @@ -883,10 +1082,9 @@ contract Lido is Versioned, StETHPermit, AragonApp { // finalize withdrawals (send ether, assign shares for burning) if (_etherToLockOnWithdrawalQueue > 0) { - _withdrawalQueue(locator).finalize.value(_etherToLockOnWithdrawalQueue)( - _lastWithdrawalRequestToFinalize, - _withdrawalsShareRate - ); + _withdrawalQueue(locator) + .finalize + .value(_etherToLockOnWithdrawalQueue)(_lastWithdrawalRequestToFinalize, _withdrawalsShareRate); } uint256 postBufferedEther = _getBufferedEther() @@ -895,6 +1093,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { .sub(_etherToLockOnWithdrawalQueue); // Sent to WithdrawalQueue _setBufferedEther(postBufferedEther); + _updateBufferedEtherAllocation(); emit ETHDistributed( _reportTimestamp, @@ -906,18 +1105,30 @@ contract Lido is Versioned, StETHPermit, AragonApp { ); } + /** + * @dev Syncs stored deposits reserve to configured target after oracle report processing + */ + function _updateBufferedEtherAllocation() internal { + uint256 depositsReserveTarget = getDepositsReserveTarget(); + uint256 depositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + + if (depositsReserve != depositsReserveTarget) { + _setDepositsReserve(depositsReserveTarget); + } + } + /** * @notice Emits the `TokenRebase` and `InternalShareRateUpdated` events - * @param _reportTimestamp timestamp of the refSlot block fro the report applied + * @param _reportTimestamp timestamp of the refSlot block for the report applied * @param _timeElapsed seconds since the previous applied report * @param _preTotalShares the total number of shares before the oracle report tx * @param _preTotalEther the total amount of ether before the oracle report tx * @param _postTotalShares the total number of shares after the oracle report tx * @param _postTotalEther the total amount of ether after the oracle report tx - * @param _postInternalShares the total number of internal shares before the oracle report tx + * @param _postInternalShares the total number of internal shares after the oracle report tx * @param _postInternalEther the total amount of internal ether after the oracle tx * @param _sharesMintedAsFees the number of shares minted to pay fees to Lido and StakingModules - * @dev these events are used to calculate protocol gross (without protocol fess deducted) and net APR (StETH APR) + * @dev these events are used to calculate protocol gross (without protocol fees deducted) and net APR (StETH APR) * * preShareRate = preTotalEther * 1e27 / preTotalShares * postShareRate = postTotalEther * 1e27 / postTotalShares @@ -955,7 +1166,11 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @notice Overrides default AragonApp behavior to disallow recovery. */ - function transferToVault(address /* _token */) external { + function transferToVault( + address /* _token */ + ) + external + { revert("NOT_SUPPORTED"); } @@ -964,7 +1179,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { //////////////////////////////////////////////////////////////////////////// /** - * @notice DEPRECATED: Returns current withdrawal credentials of deposited validators + * @notice DEPRECATED: Returns current 0x01 withdrawal credentials of deposited validators * @dev DEPRECATED: use StakingRouter.getWithdrawalCredentials() instead */ function getWithdrawalCredentials() external view returns (bytes32) { @@ -1010,15 +1225,14 @@ contract Lido is Versioned, StETHPermit, AragonApp { IStakingRouter stakingRouter = _stakingRouter(); uint256 totalBasisPoints = stakingRouter.TOTAL_BASIS_POINTS(); uint256 totalFee = stakingRouter.getTotalFeeE4Precision(); - (uint256 treasuryFeeBasisPointsAbs, uint256 operatorsFeeBasisPointsAbs) = stakingRouter - .getStakingFeeAggregateDistributionE4Precision(); + (uint256 treasuryFeeBasisPointsAbs, uint256 operatorsFeeBasisPointsAbs) = + stakingRouter.getStakingFeeAggregateDistributionE4Precision(); insuranceFeeBasisPoints = 0; // explicitly set to zero treasuryFeeBasisPoints = uint16((treasuryFeeBasisPointsAbs * totalBasisPoints) / totalFee); operatorsFeeBasisPoints = uint16((operatorsFeeBasisPointsAbs * totalBasisPoints) / totalFee); } - /// @dev Process user deposit, mint liquid tokens and increase the pool buffer /// @param _referral address of referral. /// @return amount of StETH shares minted @@ -1039,20 +1253,14 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /// @dev Get the total amount of ether controlled by the protocol internally - /// (buffered + CL balance of StakingRouter controlled validators + transient) + /// (buffered ether + CL validators balance + CL pending balance + deposited since last report) function _getInternalEther() internal view returns (uint256) { - (uint256 bufferedEther, uint256 depositedValidators) = _getBufferedEtherAndDepositedValidators(); - (uint256 clBalance, uint256 clValidators) = _getClBalanceAndClValidators(); - - // clValidators can never exceed depositedValidators. - assert(depositedValidators >= clValidators); - // the total base balance (multiple of 32) of validators in transient state, - // i.e. submitted to the official Deposit contract but not yet visible in the CL state. - uint256 transientEther = (depositedValidators - clValidators) * DEPOSIT_SIZE; + (uint256 bufferedEther, uint256 depositedPostReport) = _getBufferedEtherAndDepositedPostReport(); + (uint256 clValidatorsBalance, uint256 clPendingBalance) = _getClValidatorsBalanceAndClPendingBalance(); - return bufferedEther - .add(clBalance) - .add(transientEther); + // With balance-based accounting, we don't need to calculate transientEther + // as pending deposits are already included in clPendingBalance + return bufferedEther.add(clValidatorsBalance).add(clPendingBalance).add(depositedPostReport); } /// @dev Calculate the amount of ether controlled by external entities @@ -1105,9 +1313,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { if (totalShares * maxRatioBP <= externalShares * TOTAL_BASIS_POINTS) return 0; - return - (totalShares * maxRatioBP - externalShares * TOTAL_BASIS_POINTS) / - (TOTAL_BASIS_POINTS - maxRatioBP); + return (totalShares * maxRatioBP - externalShares * TOTAL_BASIS_POINTS) / (TOTAL_BASIS_POINTS - maxRatioBP); } function _pauseStaking() internal { @@ -1154,6 +1360,16 @@ contract Lido is Versioned, StETHPermit, AragonApp { } } + function _increaseStakingLimit(uint256 _amount) internal { + StakeLimitState.Data memory stakeLimitData = STAKING_STATE_POSITION.getStorageStakeLimitStruct(); + /// NB: burning external shares must be allowed even when staking is paused to allow external ether withdrawals + if (stakeLimitData.isStakingLimitSet() && !stakeLimitData.isStakingPaused()) { + uint256 newStakeLimit = stakeLimitData.calculateCurrentStakeLimit() + _amount; + + STAKING_STATE_POSITION.setStorageStakeLimitStruct(stakeLimitData.updatePrevStakeLimit(newStakeLimit)); + } + } + /// @dev Bytecode size-efficient analog of the `auth(_role)` modifier /// @param _role Permission name function _auth(bytes32 _role) internal view { @@ -1165,12 +1381,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { require(msg.sender == _address, "APP_AUTH_FAILED"); } - function _stakingRouter(ILidoLocator _locator) internal view returns (IStakingRouter) { - return IStakingRouter(_locator.stakingRouter()); - } - function _stakingRouter() internal view returns (IStakingRouter) { - return _stakingRouter(_getLidoLocator()); + return IStakingRouter(_getLidoLocator().stakingRouter()); } function _withdrawalQueue(ILidoLocator _locator) internal view returns (IWithdrawalQueue) { @@ -1201,6 +1413,10 @@ contract Lido is Versioned, StETHPermit, AragonApp { return _accounting(_getLidoLocator()); } + function _accountingOracle() internal view returns (IAccountingOracle) { + return IAccountingOracle(_getLidoLocator().accountingOracle()); + } + function _elRewardsVault(ILidoLocator _locator) internal view returns (ILidoExecutionLayerRewardsVault) { return ILidoExecutionLayerRewardsVault(_locator.elRewardsVault()); } @@ -1252,44 +1468,74 @@ contract Lido is Versioned, StETHPermit, AragonApp { return TOTAL_AND_EXTERNAL_SHARES_POSITION.getLowAndHighUint128(); } + // helpers: buffered ether and deposited ether since last report + function _getBufferedEther() internal view returns (uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowUint128(); + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getLowUint128(); + } + + function _getDepositedPostReport() internal view returns (uint256) { + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getHighUint128(); + } + + function _getBufferedEtherAndDepositedPostReport() internal view returns (uint256, uint256) { + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getLowAndHighUint128(); } function _setBufferedEther(uint256 _newBufferedEther) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowUint128(_newBufferedEther); + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setLowUint128(_newBufferedEther); } - function _getDepositedValidators() internal view returns (uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getHighUint128(); + function _setDepositedPostReport(uint256 _newDepositedPostReport) internal { + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setHighUint128(_newDepositedPostReport); } - function _setDepositedValidators(uint256 _newDepositedValidators) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setHighUint128(_newDepositedValidators); + function _setBufferedEtherAndDepositedPostReport(uint256 _newBufferedEther, uint256 _newDepositedPostReport) + internal + { + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setLowAndHighUint128( + _newBufferedEther, _newDepositedPostReport + ); } - function _getBufferedEtherAndDepositedValidators() internal view returns (uint256, uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowAndHighUint128(); + function _getDepositedNextReportAndLastDepositNonce() internal view returns (uint256, uint256) { + return DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION.getLowAndHighUint128(); } - function _setBufferedEtherAndDepositedValidators( - uint256 _newBufferedEther, - uint256 _newDepositedValidators - ) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowAndHighUint128( - _newBufferedEther, - _newDepositedValidators + function _setDepositedNextReportAndLastDepositNonce(uint256 _depositedNextReport, uint256 _lastDepositNonce) + internal + { + DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION.setLowAndHighUint128( + _depositedNextReport, _lastDepositNonce ); } - function _getClBalanceAndClValidators() internal view returns (uint256, uint256) { - return CL_BALANCE_AND_CL_VALIDATORS_POSITION.getLowAndHighUint128(); + // helpers: [DEPRECATED] deposited validators count + + function _getSeedDepositsCount() internal view returns (uint256) { + return SEED_DEPOSITS_COUNT_POSITION.getLowUint128(); + } + + function _setSeedDepositsCount(uint256 _newSeedDepositsCount) internal { + SEED_DEPOSITS_COUNT_POSITION.setLowUint128(_newSeedDepositsCount); + } + + // helpers: CL validators and pending balances + + function _getClValidatorsBalanceAndClPendingBalance() internal view returns (uint256, uint256) { + return CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION.getLowAndHighUint128(); } - function _setClBalanceAndClValidators(uint256 _newClBalance, uint256 _newClValidators) internal { - CL_BALANCE_AND_CL_VALIDATORS_POSITION.setLowAndHighUint128(_newClBalance, _newClValidators); + function _setClValidatorsBalanceAndClPendingBalance(uint256 _newClValidatorsBalance, uint256 _newClPendingBalance) + internal + { + CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION.setLowAndHighUint128( + _newClValidatorsBalance, _newClPendingBalance + ); } + // --- + function _setLidoLocator(address _newLidoLocator) internal { LOCATOR_AND_MAX_EXTERNAL_RATIO_POSITION.setLowUint160(uint160(_newLidoLocator)); } diff --git a/contracts/0.4.24/template/LidoTemplate.sol b/contracts/0.4.24/template/LidoTemplate.sol index 92c01a16d4..e145189f53 100644 --- a/contracts/0.4.24/template/LidoTemplate.sol +++ b/contracts/0.4.24/template/LidoTemplate.sol @@ -614,8 +614,7 @@ contract LidoTemplate is IsContract { perms[1] = _state.lido.RESUME_ROLE(); perms[2] = _state.lido.STAKING_PAUSE_ROLE(); perms[3] = _state.lido.STAKING_CONTROL_ROLE(); - perms[4] = _state.lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(); - for (i = 0; i < 5; ++i) { + for (i = 0; i < 4; ++i) { _createPermissionForAgent(acl, _state.lido, perms[i], agent); } } diff --git a/contracts/0.8.25/CLValidatorVerifier.sol b/contracts/0.8.25/CLValidatorVerifier.sol new file mode 100644 index 0000000000..9fad56ddb1 --- /dev/null +++ b/contracts/0.8.25/CLValidatorVerifier.sol @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +import {GIndex, pack, concat} from "contracts/common/lib/GIndex.sol"; +import {SSZ} from "contracts/common/lib/SSZ.sol"; +import {BLS12_381} from "contracts/common/lib/BLS.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/ValidatorWitness.sol"; + +/** + * @title CLValidatorVerifier + * @author Lido + * @notice + * + * Smart contract verifying CL data of validators + */ +abstract contract CLValidatorVerifier { + // BeaconBlockHeader: state_root field gindex + uint8 private constant STATE_ROOT_DEPTH = 3; + uint256 private constant STATE_ROOT_POSITION = 3; + GIndex public immutable GI_STATE_ROOT = pack((1 << STATE_ROOT_DEPTH) + STATE_ROOT_POSITION, STATE_ROOT_DEPTH); + + // Position (from the end) of parent(slot, proposerIndex) node inside concatenated proof + uint256 private constant SLOT_PROPOSER_PARENT_PROOF_OFFSET = 2; + // EIP-4788 system contract + address public constant BEACON_ROOTS = 0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02; + + // validators[0] gindex before/after fork layout change + GIndex public immutable GI_FIRST_VALIDATOR_PREV; + GIndex public immutable GI_FIRST_VALIDATOR_CURR; + uint64 public immutable PIVOT_SLOT; + + error InvalidSlot(); + error RootNotFound(); + + constructor(GIndex _gIFirstValidatorPrev, GIndex _gIFirstValidatorCurr, uint64 _pivotSlot) { + GI_FIRST_VALIDATOR_PREV = _gIFirstValidatorPrev; + GI_FIRST_VALIDATOR_CURR = _gIFirstValidatorCurr; + PIVOT_SLOT = _pivotSlot; + } + + /// @notice Proves validator[i] under the same EIP-4788 anchor, checks WC, checks active status + function _verifyValidator( + BeaconRootData calldata _beaconRootData, + ValidatorWitness calldata _vw, + uint256 _validatorIndex, + bytes32 _expectedWithdrawalCredentials + ) internal view virtual { + _verifySlot(_vw.proofValidator, _beaconRootData.slot, _beaconRootData.proposerIndex); + + bytes32 parentBlockRoot = _getParentBlockRoot(_beaconRootData.childBlockTimestamp); + + GIndex gIndexValidator = concat(GI_STATE_ROOT, _getValidatorGI(_validatorIndex, _beaconRootData.slot)); + bytes32 validatorLeaf = _validatorHashTreeRoot(_vw, _expectedWithdrawalCredentials); + SSZ.verifyProof({proof: _vw.proofValidator, root: parentBlockRoot, leaf: validatorLeaf, gI: gIndexValidator}); + } + + /// @dev SSZ hash_tree_root(Validator) computed from witness fields. + function _validatorHashTreeRoot(ValidatorWitness calldata _w, bytes32 _expectedWithdrawalCredentials) + internal + view + returns (bytes32) + { + bytes32[8] memory leaves; + leaves[0] = BLS12_381.pubkeyRoot(_w.pubkey); + leaves[1] = _expectedWithdrawalCredentials; + leaves[2] = SSZ.toLittleEndian(_w.effectiveBalance); + leaves[3] = SSZ.toLittleEndian(_w.slashed ? uint64(1) : 0); + leaves[4] = SSZ.toLittleEndian(_w.activationEligibilityEpoch); + leaves[5] = SSZ.toLittleEndian(_w.activationEpoch); + leaves[6] = SSZ.toLittleEndian(_w.exitEpoch); + leaves[7] = SSZ.toLittleEndian(_w.withdrawableEpoch); + + bytes32[4] memory l1; + l1[0] = BLS12_381.sha256Pair(leaves[0], leaves[1]); + l1[1] = BLS12_381.sha256Pair(leaves[2], leaves[3]); + l1[2] = BLS12_381.sha256Pair(leaves[4], leaves[5]); + l1[3] = BLS12_381.sha256Pair(leaves[6], leaves[7]); + + bytes32[2] memory l2; + l2[0] = BLS12_381.sha256Pair(l1[0], l1[1]); + l2[1] = BLS12_381.sha256Pair(l1[2], l1[3]); + + return BLS12_381.sha256Pair(l2[0], l2[1]); + } + + /// @dev Checks that (slot, proposerIndex) parent node is present in the same concatenated proof. + function _verifySlot(bytes32[] calldata _proof, uint64 _slot, uint64 _proposerIndex) internal view { + bytes32 parentSlotProposer = BLS12_381.sha256Pair(SSZ.toLittleEndian(_slot), SSZ.toLittleEndian(_proposerIndex)); + if (_proof[_proof.length - SLOT_PROPOSER_PARENT_PROOF_OFFSET] != parentSlotProposer) { + revert InvalidSlot(); + } + } + + /// @dev GIndex for Validator[i] given slot (fork-aware). + function _getValidatorGI(uint256 _offset, uint64 _provenSlot) internal view returns (GIndex) { + GIndex gI = _provenSlot < PIVOT_SLOT ? GI_FIRST_VALIDATOR_PREV : GI_FIRST_VALIDATOR_CURR; + return gI.shr(_offset); + } + + /// @dev Reads parent_beacon_block_root from EIP-4788 by timestamp. + function _getParentBlockRoot(uint64 _childBlockTimestamp) internal view returns (bytes32) { + (bool success, bytes memory data) = BEACON_ROOTS.staticcall(abi.encode(_childBlockTimestamp)); + if (!success || data.length == 0) revert RootNotFound(); + return abi.decode(data, (bytes32)); + } +} diff --git a/contracts/0.8.25/TopUpGateway.sol b/contracts/0.8.25/TopUpGateway.sol new file mode 100644 index 0000000000..5454b2ddde --- /dev/null +++ b/contracts/0.8.25/TopUpGateway.sol @@ -0,0 +1,424 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {TopUpData, BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; +import {CLValidatorVerifier} from "./CLValidatorVerifier.sol"; +import { + AccessControlEnumerableUpgradeable +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; + +interface ILidoLocator { + function stakingRouter() external view returns (address); + function lido() external view returns (address); +} + +interface IStakingRouter { + function getStakingModuleWithdrawalCredentials(uint256 _stakingModuleId) external view returns (bytes32); + function canDeposit(uint256 _stakingModuleId) external view returns (bool); + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external; +} + +interface ILido { + function canDeposit() external view returns (bool); +} + +/** + * @title TopUpGateway + * @author Lido + * @notice TopUpGateway is a contract that serves as the entry point for validator top-ups + */ +contract TopUpGateway is CLValidatorVerifier, AccessControlEnumerableUpgradeable { + using WithdrawalCredentials for bytes32; + + ILidoLocator internal immutable LOCATOR; + + struct Storage { + uint64 maxValidatorsPerTopUp; // 64 + uint32 lastTopUpTimestamp; // 32 + uint32 lastTopUpBlock; // 32 + uint16 minBlockDistance; // 16 + uint16 maxRootAge; // 16 + uint64 targetBalanceGwei; // 64 + uint64 minTopUpGwei; // 64 + } + + /// @dev Storage slot: keccak256(abi.encode(uint256(keccak256("lido.TopUpGateway.storage")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 internal constant GATEWAY_STORAGE_POSITION = + 0x22e512057841e2bc1e6d80030c8bb8b4935377af2e64ba9bf8e6a3e88fb32200; + + uint256 internal constant PUBKEY_LENGTH = 48; + uint256 internal constant FAR_FUTURE_EPOCH = type(uint64).max; + uint256 public immutable SLOTS_PER_EPOCH; + + bytes32 public constant TOP_UP_ROLE = keccak256("TOP_UP_ROLE"); + bytes32 public constant MANAGE_LIMITS_ROLE = keccak256("MANAGE_LIMITS_ROLE"); + + constructor( + address _lidoLocator, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot, + uint256 _slotsPerEpoch + ) CLValidatorVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) { + if (_lidoLocator == address(0)) revert ZeroArgument("_lidoLocator"); + LOCATOR = ILidoLocator(_lidoLocator); + SLOTS_PER_EPOCH = _slotsPerEpoch; + _disableInitializers(); + } + + /// @notice Initializes the TopUpGateway proxy with admin, rate limits, and top-up balance parameters. + /// @param _admin Address to receive DEFAULT_ADMIN_ROLE + /// @param _maxValidatorsPerTopUp Maximum number of validators per single topUp call + /// @param _minBlockDistance Minimum blocks between topUp calls + /// @param _maxRootAgeSec Maximum age (seconds) of beacon root relative to block.timestamp + /// @param _targetBalanceGwei Target validator balance ceiling after top-up (in Gwei). + /// Top-up amount = targetBalance - currentTotal. + /// @param _minTopUpGwei Minimum top-up that can be performed (in Gwei). If calculated top-up < minTopUp, returns 0. + /// Must be <= _targetBalanceGwei. + /// + /// @dev Ethereum reference values (0x02 validators, MAX_EFFECTIVE_BALANCE = 2048 ETH): + /// _targetBalanceGwei = 2046.75 ETH (2048e9 - 1.25e9 Gwei) — leaves 1.25 ETH safety margin + /// _minTopUpGwei = 1 ETH (1e9 Gwei) — skip top-ups below 1 ETH + function initialize( + address _admin, + uint256 _maxValidatorsPerTopUp, + uint256 _minBlockDistance, + uint256 _maxRootAgeSec, + uint256 _targetBalanceGwei, + uint256 _minTopUpGwei + ) external initializer { + if (_admin == address(0)) revert ZeroArgument("_admin"); + __AccessControlEnumerable_init(); + _grantRole(DEFAULT_ADMIN_ROLE, _admin); + _setMaxValidatorsPerTopUp(_maxValidatorsPerTopUp); + _setMinBlockDistance(_minBlockDistance); + _setMaxRootAge(_maxRootAgeSec); + _setTopUpBalanceLimits(_targetBalanceGwei, _minTopUpGwei); + } + + /** + * @notice Method verifying Merkle proofs on validators and proceeding to top up validators + * via StakingRouter.topUp(stakingModuleId, keyIndices, operatorIds, pubkeys, topUpLimits) + * @param _topUps TopUpData structure, containing validators' container fields, pending deposits + * and Merkle proofs on inclusion of each container in Beacon State tree + * @dev Only callable by accounts with TOP_UP_ROLE. + * + * validatorIndices MUST be sorted in strictly ascending order. The corresponding keyIndices, + * operatorIds, validatorWitness and pendingBalanceGwei arrays must be aligned by position + * to validatorIndices[i]. + * + * Reverts if: + * - the caller doesn't have TOP_UP_ROLE (AccessControl); + * - validatorIndices is empty, or any of keyIndices, operatorIds, validatorWitness, + * pendingBalanceGwei has a length different from validatorIndices + * (`WrongArrayLength`); + * - validatorIndices length exceeds maxValidatorsPerTopUp (`MaxValidatorsPerTopUpExceeded`); + * - validatorIndices is not strictly increasing (not sorted or contains duplicates) (`InvalidValidatorIndicesSortOrder`); + * - fewer than minBlockDistance blocks have passed since the last top-up (`MinBlockDistanceNotMet`); + * - the beacon root is older than maxRootAge relative to block.timestamp (`RootIsTooOld`); + * - the beacon root childBlockTimestamp is not newer than the last top-up timestamp + * (`RootPrecedesLastTopUp`); + * - the module's withdrawal credentials are not of type 0x02 (`WrongWithdrawalCredentials`); + * - any validator pubkey has a length different from 48 bytes (`WrongPubkeyLength`); + * - any validator has activationEpoch >= current epoch (derived from beacon root slot) (`ValidatorIsNotActivated`); + * - any validator Merkle proof fails verification in CLValidatorVerifier. + */ + function topUp(TopUpData calldata _topUps) external onlyRole(TOP_UP_ROLE) { + Storage storage $ = _gatewayStorage(); + + uint256 validatorsCount = _topUps.validatorIndices.length; + if (validatorsCount == 0) revert WrongArrayLength(); + + if ( + _topUps.keyIndices.length != validatorsCount || _topUps.operatorIds.length != validatorsCount + || _topUps.validatorWitness.length != validatorsCount + || _topUps.pendingBalanceGwei.length != validatorsCount + ) { + revert WrongArrayLength(); + } + + if (validatorsCount > $.maxValidatorsPerTopUp) { + revert MaxValidatorsPerTopUpExceeded(); + } + + // Require validatorIndices to be strictly increasing. + for (uint256 i = 1; i < validatorsCount; ++i) { + if (_topUps.validatorIndices[i] <= _topUps.validatorIndices[i - 1]) { + revert InvalidValidatorIndicesSortOrder(); + } + } + + // Distance is for flexibility in future to control top-up frequency + _requireBlockDistancePassed(); + + // Check proof age + // 0. _topUps.beaconRootData.childBlockTimestamp is newer than timestamp of last top up + // 1. _topUps.beaconRootData.childBlockTimestamp is not older than maxRootAge + _verifyRootAge(_topUps.beaconRootData); + + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + + // Find and validate withdrawalCredentials 0x02 + bytes32 withdrawalCredentials = stakingRouter.getStakingModuleWithdrawalCredentials(_topUps.moduleId); + _requireWithdrawalCredentials02(withdrawalCredentials); + + bytes[] memory pubkeys = new bytes[](validatorsCount); + + uint256[] memory topUpLimits = new uint256[](validatorsCount); + + // 1. Evaluate top-up limit based on current balance, pending deposits, and configured limits + // 2. Verify proof data through CLValidatorProofVerifier + unchecked { + for (uint256 i; i < validatorsCount; ++i) { + // For each validator + ValidatorWitness calldata vw = _topUps.validatorWitness[i]; + + if (vw.pubkey.length != PUBKEY_LENGTH) { + revert WrongPubkeyLength(); + } + + _verifyValidatorWasActivated(_topUps.beaconRootData.slot, vw); + + _verifyValidator(_topUps.beaconRootData, vw, _topUps.validatorIndices[i], withdrawalCredentials); + + pubkeys[i] = vw.pubkey; + + // calculate top up limit accounting for current balance and pending deposits + topUpLimits[i] = _evaluateTopUpLimit(vw, _topUps.pendingBalanceGwei[i]) * 1 gwei; + } + } + + // Proceed to StakingRouter + IStakingRouter(stakingRouter) + .topUp(_topUps.moduleId, _topUps.keyIndices, _topUps.operatorIds, pubkeys, topUpLimits); + + _setLastTopUpData(); + } + + /** + * @notice Checks if top-up is possible for a given staking module + * @param _stakingModuleId Id of the staking module + * @return True if top-up is possible, false otherwise + * @dev Checks: module exists, module is active, block distance passed, Lido can deposit, and withdrawal credentials are 0x02 + */ + function canTopUp(uint256 _stakingModuleId) external view returns (bool) { + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + + if (!stakingRouter.canDeposit(_stakingModuleId)) return false; + if (!ILido(LOCATOR.lido()).canDeposit()) return false; + if (!_isBlockDistancePassed()) return false; + + bytes32 wc = stakingRouter.getStakingModuleWithdrawalCredentials(_stakingModuleId); + return wc.isType2(); + } + + /** + * @notice Returns the timestamp when last top up happened + */ + function getLastTopUpTimestamp() external view returns (uint256) { + return _gatewayStorage().lastTopUpTimestamp; + } + + /** + * @notice Returns the allowed amount of validators per top up + */ + function getMaxValidatorsPerTopUp() external view returns (uint256) { + return _gatewayStorage().maxValidatorsPerTopUp; + } + + /** + * @notice Returns the min block distance that should pass from last top up + */ + function getMinBlockDistance() external view returns (uint256) { + return _gatewayStorage().minBlockDistance; + } + + /** + * @notice Returns the maximum age (seconds) of beacon root relative to block.timestamp + */ + function getMaxRootAge() external view returns (uint256) { + return _gatewayStorage().maxRootAge; + } + + /** + * @notice Returns target validator balance ceiling after top-up (in Gwei) + */ + function getTargetBalanceGwei() external view returns (uint256) { + return _gatewayStorage().targetBalanceGwei; + } + + /** + * @notice Returns minimum top-up that can be performed (in Gwei). + */ + function getMinTopUpGwei() external view returns (uint256) { + return _gatewayStorage().minTopUpGwei; + } + + /** + * @notice Set max validators per top up value + * @param _newValue Max validators per top up value + */ + function setMaxValidatorsPerTopUp(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMaxValidatorsPerTopUp(_newValue); + } + + /** + * @notice Set min block distance + * @param _newValue Min block distance + */ + function setMinBlockDistance(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMinBlockDistance(_newValue); + } + + /** + * @notice Set targetBalanceGwei and minTopUpGwei values + * @param _targetBalanceGwei target validator balance ceiling after top-up (in Gwei) + * @param _minTopUpGwei minimum top-up that can be performed (in Gwei). + */ + function setTopUpBalanceLimits(uint256 _targetBalanceGwei, uint256 _minTopUpGwei) + external + onlyRole(MANAGE_LIMITS_ROLE) + { + _setTopUpBalanceLimits(_targetBalanceGwei, _minTopUpGwei); + } + + /// @notice Sets the maximum allowed age of beacon root relative to current block timestamp + /// @param _newValue Maximum age in seconds + function setMaxRootAge(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMaxRootAge(_newValue); + } + + function _isBlockDistancePassed() internal view returns (bool) { + Storage storage $ = _gatewayStorage(); + return $.lastTopUpBlock == 0 || block.number - $.lastTopUpBlock >= $.minBlockDistance; + } + + function _requireBlockDistancePassed() internal view { + if (!_isBlockDistancePassed()) { + revert MinBlockDistanceNotMet(); + } + } + + function _requireWithdrawalCredentials02(bytes32 _wc) internal pure { + if (!_wc.isType2()) { + revert WrongWithdrawalCredentials(); + } + } + + function _setLastTopUpData() internal { + Storage storage $ = _gatewayStorage(); + $.lastTopUpTimestamp = uint32(block.timestamp); + $.lastTopUpBlock = uint32(block.number); + emit LastTopUpChanged(block.timestamp); + } + + function _setMaxRootAge(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint16).max) revert TooLargeValue(); + _gatewayStorage().maxRootAge = uint16(_newValue); + + emit MaxRootAgeChanged(_newValue); + } + + function _setMaxValidatorsPerTopUp(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint64).max) revert TooLargeValue(); + _gatewayStorage().maxValidatorsPerTopUp = uint64(_newValue); + emit MaxValidatorsPerTopUpChanged(_newValue); + } + + function _setMinBlockDistance(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint16).max) revert TooLargeValue(); + _gatewayStorage().minBlockDistance = uint16(_newValue); + emit MinBlockDistanceChanged(_newValue); + } + + function _setTopUpBalanceLimits(uint256 _targetBalanceGwei, uint256 _minTopUpGwei) internal { + if (_targetBalanceGwei == 0 || _minTopUpGwei == 0) revert ZeroValue(); + if (_targetBalanceGwei > type(uint64).max || _minTopUpGwei > type(uint64).max) revert TooLargeValue(); + if (_minTopUpGwei > _targetBalanceGwei) revert MinTopUpExceedsTarget(); + + Storage storage $ = _gatewayStorage(); + $.targetBalanceGwei = uint64(_targetBalanceGwei); + $.minTopUpGwei = uint64(_minTopUpGwei); + emit TopUpBalanceLimitsChanged(_targetBalanceGwei, _minTopUpGwei); + } + + function _verifyRootAge(BeaconRootData calldata _beaconRootData) internal view { + if (block.timestamp > _beaconRootData.childBlockTimestamp + _gatewayStorage().maxRootAge) { + revert RootIsTooOld(); + } + + if (_beaconRootData.childBlockTimestamp <= _gatewayStorage().lastTopUpTimestamp) { + revert RootPrecedesLastTopUp(); + } + } + + function _verifyValidatorWasActivated(uint64 _slot, ValidatorWitness calldata _w) internal view { + // header slot epoch + uint64 epoch = uint64(_slot / SLOTS_PER_EPOCH); + // Validator should be activated earlier than current epoch + if (_w.activationEpoch >= epoch) revert ValidatorIsNotActivated(); + } + + function _evaluateTopUpLimit(ValidatorWitness calldata _validator, uint256 _pendingBalanceGwei) + internal + view + returns (uint256) + { + if ( + _validator.exitEpoch != FAR_FUTURE_EPOCH || _validator.slashed + || _validator.withdrawableEpoch != FAR_FUTURE_EPOCH + ) { + return 0; + } + + Storage storage $ = _gatewayStorage(); + uint256 currentTotal = _validator.effectiveBalance + _pendingBalanceGwei; + if (currentTotal >= $.targetBalanceGwei) return 0; + + uint256 topUpLimit = $.targetBalanceGwei - currentTotal; + if (topUpLimit < $.minTopUpGwei) return 0; + + return topUpLimit; + } + + function _gatewayStorage() internal pure returns (Storage storage $) { + bytes32 position = GATEWAY_STORAGE_POSITION; + assembly ("memory-safe") { + $.slot := position + } + } + + event MaxValidatorsPerTopUpChanged(uint256 newValue); + event MinBlockDistanceChanged(uint256 newValue); + event LastTopUpChanged(uint256 newValue); + event MaxRootAgeChanged(uint256 newValue); + event TopUpBalanceLimitsChanged(uint256 targetBalanceGwei, uint256 minTopUpGwei); + + error ZeroValue(); + error ZeroArgument(string argument); + error TooLargeValue(); + error RootIsTooOld(); + error RootPrecedesLastTopUp(); + error WrongArrayLength(); + error MaxValidatorsPerTopUpExceeded(); + error WrongWithdrawalCredentials(); + error WrongPubkeyLength(); + error MinBlockDistanceNotMet(); + error InvalidValidatorIndicesSortOrder(); + error ValidatorIsNotActivated(); + error MinTopUpExceedsTarget(); +} diff --git a/contracts/0.8.25/consolidation/ConsolidationBus.sol b/contracts/0.8.25/consolidation/ConsolidationBus.sol new file mode 100644 index 0000000000..0a1c026058 --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationBus.sol @@ -0,0 +1,434 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import { + AccessControlEnumerableUpgradeable +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; + +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IConsolidationGateway { + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable; +} + +/** + * @title ConsolidationBus + * @notice Message Bus for consolidation requests that decouples request submission from fee payment. + * + * The workflow: + * 1. Admins register/unregister publishers via grant/revoke PUBLISH_ROLE + * 2. Registered publishers add consolidation requests (PUBLISH_ROLE) + * 3. Executor bot executes batches, paying the required ETH fee + * The bus forwards the batch to ConsolidationGateway + * 4. Optional REMOVE_ROLE can remove batches from the pending queue + */ +contract ConsolidationBus is AccessControlEnumerableUpgradeable { + uint256 internal constant PUBKEY_LENGTH = 48; + + /** + * @notice Thrown when an invalid zero value is passed + * @param name Name of the argument that was zero + */ + error ZeroArgument(string name); + + /** + * @notice Thrown when attempting to set the admin address to zero + */ + error AdminCannotBeZero(); + + /** + * @notice Thrown when batch is empty + */ + error EmptyBatch(); + + /** + * @notice Thrown when attempting to remove an empty list of batch hashes + */ + error EmptyBatchHashes(); + + /** + * @notice Thrown when a source group has zero elements + * @param groupIndex Index of the empty group + */ + error EmptyGroup(uint256 groupIndex); + + /** + * @notice Thrown when batch size exceeds the limit + * @param size Actual batch size + * @param limit Maximum allowed batch size + */ + error BatchTooLarge(uint256 size, uint256 limit); + + /** + * @notice Thrown when the number of groups in a batch exceeds the limit + * @param groupsCount Actual number of groups + * @param limit Maximum allowed number of groups + */ + error TooManyGroups(uint256 groupsCount, uint256 limit); + + /** + * @notice Thrown when maxGroupsInBatch exceeds batchSize + * @param maxGroupsInBatch The max groups in batch value + * @param batchSizeLimit The batch size limit value + */ + error MaxGroupsExceedsBatchSize(uint256 maxGroupsInBatch, uint256 batchSizeLimit); + + /** + * @notice Thrown when attempting to add a batch that is already pending execution + * @param batchHash Hash of the batch that already exists in the pending queue + */ + error BatchAlreadyPending(bytes32 batchHash); + + /** + * @notice Thrown when batch is not found in storage + * @param batchHash Hash of the missing batch + */ + error BatchNotFound(bytes32 batchHash); + + /** + * @notice Thrown when source and target pubkeys are the same + * @param index Index of the invalid pair in the batch + */ + error SourceEqualsTarget(uint256 index); + + /** + * @notice Thrown when target pubkey length is invalid + * @param groupIndex Index of the group with invalid target pubkey + * @param length Actual pubkey length in bytes + */ + error InvalidTargetPubkeyLength(uint256 groupIndex, uint256 length); + + /** + * @notice Thrown when source pubkey length is invalid + * @param groupIndex Index of the group with invalid source pubkey + * @param sourceIndex Index of the source pubkey inside the group + * @param length Actual pubkey length in bytes + */ + error InvalidSourcePubkeyLength(uint256 groupIndex, uint256 sourceIndex, uint256 length); + + /** + * @notice Thrown when attempting to execute a batch before the execution delay has passed + * @param currentTime Current block timestamp + * @param executeAfter Earliest timestamp at which the batch can be executed + */ + error ExecutionDelayNotPassed(uint256 currentTime, uint256 executeAfter); + + /** + * @notice Emitted when the batch size limit is updated + * @param newLimit New batch size limit + */ + event BatchLimitUpdated(uint256 newLimit); + + /** + * @notice Emitted when the max groups in batch limit is updated + * @param newLimit New max groups in batch limit + */ + event MaxGroupsInBatchUpdated(uint256 newLimit); + + /** + * @notice Emitted when consolidation requests are added + * @param publisher Address of the publisher who added the requests + * @param batchData Encoded batch data (abi.encode(groups)) + */ + event RequestsAdded(address indexed publisher, bytes batchData); + + /** + * @notice Emitted when consolidation requests are executed + * @param batchHash Hash of the executed batch + * @param feePaid Amount of ETH paid for the execution + */ + event RequestsExecuted(bytes32 indexed batchHash, uint256 feePaid); + + /** + * @notice Emitted when batches are removed + * @param batchHashes Array of removed batch hashes + */ + event BatchesRemoved(bytes32[] batchHashes); + + /** + * @notice Emitted when the execution delay is updated + * @param newDelay New execution delay in seconds + */ + event ExecutionDelayUpdated(uint256 newDelay); + + bytes32 public constant MANAGE_ROLE = keccak256("MANAGE_ROLE"); + bytes32 public constant PUBLISH_ROLE = keccak256("PUBLISH_ROLE"); + bytes32 public constant REMOVE_ROLE = keccak256("REMOVE_ROLE"); + + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + struct BatchInfo { + address publisher; + uint64 addedAt; + } + + IConsolidationGateway internal immutable CONSOLIDATION_GATEWAY; + + uint256 internal _batchSize; + uint256 internal _maxGroupsInBatch; + uint256 internal _executionDelay; + mapping(bytes32 batchHash => BatchInfo info) internal _pendingBatches; + + constructor(address consolidationGateway) { + if (consolidationGateway == address(0)) revert ZeroArgument("consolidationGateway"); + + CONSOLIDATION_GATEWAY = IConsolidationGateway(consolidationGateway); + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param admin Lido DAO Aragon agent contract address. + /// @dev Proxy initialization method. + function initialize( + address admin, + uint256 initialBatchSize, + uint256 initialMaxGroupsInBatch, + uint256 initialExecutionDelay + ) external initializer { + if (admin == address(0)) revert AdminCannotBeZero(); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + _grantRole(MANAGE_ROLE, admin); + _grantRole(REMOVE_ROLE, admin); + + _setBatchSize(initialBatchSize); + _setMaxGroupsInBatch(initialMaxGroupsInBatch); + _setExecutionDelay(initialExecutionDelay); + } + + /** + * @notice Sets the maximum batch size limit + * @param limit New batch size limit + * @dev Reverts if caller does not have MANAGE_ROLE + */ + function setBatchSize(uint256 limit) external onlyRole(MANAGE_ROLE) { + _setBatchSize(limit); + } + + /** + * @notice Sets the maximum number of groups allowed in a batch + * @param limit New max groups in batch limit + * @dev Reverts if caller does not have MANAGE_ROLE + */ + function setMaxGroupsInBatch(uint256 limit) external onlyRole(MANAGE_ROLE) { + _setMaxGroupsInBatch(limit); + } + + /** + * @notice Sets the execution delay in seconds between adding and executing a batch + * @param delay New execution delay in seconds (0 means no delay) + * @dev Reverts if caller does not have MANAGE_ROLE + * @dev The execution delay is not snapshotted per batch + * Changes to this parameter apply retroactively to all pending batches + * MANAGE_ROLE holders are trusted + */ + function setExecutionDelay(uint256 delay) external onlyRole(MANAGE_ROLE) { + _setExecutionDelay(delay); + } + + /** + * @notice Removes batches from the queue + * @param batchHashes Array of batch hashes to remove + * @dev Reverts if caller does not have REMOVE_ROLE + * @dev Reverts if batchHashes is empty + * @dev Reverts if any batch is not found or already executed + */ + function removeBatches(bytes32[] calldata batchHashes) external onlyRole(REMOVE_ROLE) { + if (batchHashes.length == 0) revert EmptyBatchHashes(); + + for (uint256 i = 0; i < batchHashes.length; ++i) { + bytes32 batchHash = batchHashes[i]; + + if (_pendingBatches[batchHash].publisher == address(0)) revert BatchNotFound(batchHash); + + delete _pendingBatches[batchHash]; + } + emit BatchesRemoved(batchHashes); + } + + // ============== + // View methods + // ============== + + /** + * @notice Returns the current batch size limit + * @return Current maximum batch size + */ + function batchSize() external view returns (uint256) { + return _batchSize; + } + + /** + * @notice Returns the maximum number of groups allowed in a batch + * @return Current max groups in batch limit + */ + function maxGroupsInBatch() external view returns (uint256) { + return _maxGroupsInBatch; + } + + /** + * @notice Returns the current execution delay in seconds + * @return Current execution delay + */ + function executionDelay() external view returns (uint256) { + return _executionDelay; + } + + /** + * @notice Returns the address of the ConsolidationGateway + * @return Address of the ConsolidationGateway contract + */ + function getConsolidationGateway() external view returns (address) { + return address(CONSOLIDATION_GATEWAY); + } + + /** + * @notice Returns the batch info for a pending batch + * @param batchHash Hash of the batch to check + * @return Batch info struct with publisher address and addedAt timestamp (zero values if batch is not in queue) + */ + function getBatchInfo(bytes32 batchHash) external view returns (BatchInfo memory) { + return _pendingBatches[batchHash]; + } + + // =============== + // Publisher API + // =============== + + /** + * @notice Adds grouped consolidation requests to the queue + * @param groups Array of consolidation groups, where each group contains source pubkeys and a target pubkey + * @dev The same batch can be submitted again after it has been executed. + * @dev Reverts if: + * - Caller does not have PUBLISH_ROLE + * - Batch is empty + * - Any group is empty + * - Total batch size exceeds limit + * - Any source or target pubkey length is not 48 bytes + * - Any source pubkey equals its corresponding target pubkey + * - Batch already exists + */ + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external onlyRole(PUBLISH_ROLE) { + uint256 groupsCount = groups.length; + if (groupsCount == 0) revert EmptyBatch(); + + uint256 maxGroups = _maxGroupsInBatch; + if (groupsCount > maxGroups) revert TooManyGroups(groupsCount, maxGroups); + + uint256 totalCount = 0; + for (uint256 i = 0; i < groupsCount; ++i) { + uint256 groupSize = groups[i].sourcePubkeys.length; + if (groupSize == 0) revert EmptyGroup(i); + totalCount += groupSize; + } + + uint256 limit = _batchSize; + if (totalCount > limit) revert BatchTooLarge(totalCount, limit); + + for (uint256 i = 0; i < groupsCount; ++i) { + bytes calldata targetPubkey = groups[i].targetPubkey; + if (targetPubkey.length != PUBKEY_LENGTH) { + revert InvalidTargetPubkeyLength(i, targetPubkey.length); + } + + bytes32 targetHash = keccak256(targetPubkey); + bytes[] calldata group = groups[i].sourcePubkeys; + for (uint256 j = 0; j < group.length; ++j) { + bytes calldata sourcePubkey = group[j]; + if (sourcePubkey.length != PUBKEY_LENGTH) { + revert InvalidSourcePubkeyLength(i, j, sourcePubkey.length); + } + + if (keccak256(sourcePubkey) == targetHash) { + revert SourceEqualsTarget(i); + } + } + } + + bytes memory encodedBatch = abi.encode(groups); + + bytes32 batchHash = keccak256(encodedBatch); + + if (_pendingBatches[batchHash].publisher != address(0)) revert BatchAlreadyPending(batchHash); + + _pendingBatches[batchHash] = BatchInfo(msg.sender, uint64(block.timestamp)); + + emit RequestsAdded(msg.sender, encodedBatch); + } + + // ============== + // Executor API + // ============== + + /** + * @notice Executes a batch of grouped consolidation requests + * @param groups Array of consolidation witness groups, each containing source pubkeys and a target validator witness + * @dev Forwards the batch to ConsolidationGateway with msg.value as fee + * @dev Reverts if: + * - Batch was not added or was already executed/removed + */ + function executeConsolidation(IConsolidationGateway.ConsolidationWitnessGroup[] calldata groups) external payable { + // Reconstruct ConsolidationGroup[] to compute the batch hash that matches the publisher's submission + ConsolidationGroup[] memory publisherGroups = new ConsolidationGroup[](groups.length); + for (uint256 i = 0; i < groups.length; ++i) { + publisherGroups[i] = ConsolidationGroup({ + sourcePubkeys: groups[i].sourcePubkeys, + targetPubkey: groups[i].targetWitness.pubkey + }); + } + + bytes32 batchHash = keccak256(abi.encode(publisherGroups)); + + BatchInfo memory batch = _pendingBatches[batchHash]; + if (batch.publisher == address(0)) revert BatchNotFound(batchHash); + + uint256 executeAfter = uint256(batch.addedAt) + _executionDelay; + if (block.timestamp < executeAfter) revert ExecutionDelayNotPassed(block.timestamp, executeAfter); + + delete _pendingBatches[batchHash]; + + CONSOLIDATION_GATEWAY.addConsolidationRequests{value: msg.value}(groups, msg.sender); + + emit RequestsExecuted(batchHash, msg.value); + } + + // ================== + // Internal methods + // ================== + + function _setBatchSize(uint256 limit) internal { + if (limit == 0) revert ZeroArgument("batchSizeLimit"); + uint256 maxGroups = _maxGroupsInBatch; + if (maxGroups > limit) revert MaxGroupsExceedsBatchSize(maxGroups, limit); + _batchSize = limit; + emit BatchLimitUpdated(limit); + } + + function _setMaxGroupsInBatch(uint256 limit) internal { + if (limit == 0) revert ZeroArgument("maxGroupsInBatchLimit"); + uint256 currentBatchSize = _batchSize; + if (limit > currentBatchSize) revert MaxGroupsExceedsBatchSize(limit, currentBatchSize); + _maxGroupsInBatch = limit; + emit MaxGroupsInBatchUpdated(limit); + } + + function _setExecutionDelay(uint256 delay) internal { + _executionDelay = delay; + emit ExecutionDelayUpdated(delay); + } +} diff --git a/contracts/0.8.25/consolidation/ConsolidationGateway.sol b/contracts/0.8.25/consolidation/ConsolidationGateway.sol new file mode 100644 index 0000000000..0a5829cced --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationGateway.sol @@ -0,0 +1,379 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {LimitData, RateLimitStorage, RateLimit} from "contracts/common/lib/RateLimit.sol"; +import {PausableUntil} from "contracts/common/utils/PausableUntil.sol"; +import {AccessControlEnumerable} from "@openzeppelin/contracts-v5.2/access/extensions/AccessControlEnumerable.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {CLProofVerifier} from "contracts/0.8.25/vaults/predeposit_guarantee/CLProofVerifier.sol"; +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IDepositSecurityModule { + function isDepositsPaused() external view returns (bool); +} + +interface ILido { + function canDeposit() external view returns (bool); +} + +interface IWithdrawalVault { + function addConsolidationRequests(bytes[] calldata sourcePubkeys, bytes[] calldata targetPubkeys) external payable; + + function getConsolidationRequestFee() external view returns (uint256); +} + +/** + * @title ConsolidationGateway + * @notice ConsolidationGateway contract is one entrypoint for all consolidation requests in protocol. + * This contract is responsible for limiting consolidation requests, checking ADD_CONSOLIDATION_REQUEST_ROLE role before it gets to Withdrawal Vault. + */ +contract ConsolidationGateway is AccessControlEnumerable, PausableUntil, CLProofVerifier { + using RateLimitStorage for bytes32; + using RateLimit for LimitData; + + /** + * @notice Thrown when an invalid zero value is passed + * @param name Name of the argument that was zero + */ + error ZeroArgument(string name); + + /** + * @notice Thrown when attempting to set the admin address to zero + */ + error AdminCannotBeZero(); + + /** + * @notice Thrown when a consolidation fee insufficient + * @param feeRequired Amount of fee required to cover consolidation request + * @param passedValue Amount of fee sent to cover consolidation request + */ + error InsufficientFee(uint256 feeRequired, uint256 passedValue); + + /** + * @notice Thrown when a consolidation fee refund failed + */ + error FeeRefundFailed(); + + /** + * @notice Thrown when remaining consolidation requests limit is not enough to cover sender requests + * @param requestsCount Amount of requests that were sent for processing + * @param remainingLimit Amount of requests that still can be processed at current day + */ + error ConsolidationRequestsLimitExceeded(uint256 requestsCount, uint256 remainingLimit); + + /** + * @notice Thrown when a source group has zero elements + * @param groupIndex Index of the empty group + */ + error EmptyGroup(uint256 groupIndex); + + /** + * @notice Thrown when DSM deposits are paused + */ + error DSMDepositsPaused(); + + /** + * @notice Thrown when Lido deposits are paused (Lido stopped or bunker mode) + */ + error LidoDepositsPaused(); + + /** + * @notice Emitted when limits configs are set. + * @param maxConsolidationRequestsLimit The maximum number of consolidation requests. + * @param consolidationsPerFrame The number of consolidations that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + */ + event ConsolidationRequestsLimitSet( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ); + + /// @notice role that allows to pause the contract + bytes32 public constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + + /// @notice role that allows to resume the contract + bytes32 public constant RESUME_ROLE = keccak256("RESUME_ROLE"); + + bytes32 public constant ADD_CONSOLIDATION_REQUEST_ROLE = keccak256("ADD_CONSOLIDATION_REQUEST_ROLE"); + bytes32 public constant EXIT_LIMIT_MANAGER_ROLE = keccak256("EXIT_LIMIT_MANAGER_ROLE"); + + bytes32 public constant CONSOLIDATION_LIMIT_POSITION = + keccak256("lido.ConsolidationGateway.maxConsolidationRequestLimit"); + + uint256 internal constant COMPOUNDING_PREFIX = uint256(0x02) << 248; + + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + ILidoLocator internal immutable LOCATOR; + + /// @dev Ensures the contract's ETH balance is unchanged. + modifier preservesEthBalance() { + uint256 balanceBeforeCall = address(this).balance - msg.value; + _; + assert(address(this).balance == balanceBeforeCall); + } + + constructor( + address admin, + address lidoLocator, + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot + ) CLProofVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) { + if (admin == address(0)) revert AdminCannotBeZero(); + if (lidoLocator == address(0)) revert ZeroArgument("lidoLocator"); + LOCATOR = ILidoLocator(lidoLocator); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + _setConsolidationRequestLimit(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + /** + * @notice Resume the contract + * @dev Reverts if contracts is not paused + * @dev Reverts if sender has no `RESUME_ROLE` + */ + function resume() external onlyRole(RESUME_ROLE) { + _resume(); + } + + /** + * @notice Pause the contract for a specified period + * @param _duration pause duration in seconds (use `PAUSE_INFINITELY` for unlimited) + * @dev Reverts if contract is already paused + * @dev Reverts if sender has no `PAUSE_ROLE` + * @dev Reverts if zero duration is passed + */ + function pauseFor(uint256 _duration) external onlyRole(PAUSE_ROLE) { + _pauseFor(_duration); + } + + /** + * @notice Pause the contract until a specified timestamp + * @param _pauseUntilInclusive the last second to pause until inclusive + * @dev Reverts if the timestamp is in the past + * @dev Reverts if sender has no `PAUSE_ROLE` + * @dev Reverts if contract is already paused + */ + function pauseUntil(uint256 _pauseUntilInclusive) external onlyRole(PAUSE_ROLE) { + _pauseUntil(_pauseUntilInclusive); + } + + /** + * @dev Submits grouped Consolidation Requests to the Withdrawal Vault. + * Each group represents multiple source validators consolidating into a single target. + * @param groups An array of consolidation groups, where each group contains source public keys + * and a target validator witness with a CL proof of withdrawal credentials. + * @param refundRecipient The address that will receive any excess ETH sent for fees. + * + * @notice Reverts if: + * - The caller does not have the `ADD_CONSOLIDATION_REQUEST_ROLE` + * - The total fee value sent is insufficient to cover all provided consolidation requests. + * - There is not enough limit quota left in the current frame to process all requests. + */ + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable onlyRole(ADD_CONSOLIDATION_REQUEST_ROLE) preservesEthBalance whenResumed { + if (msg.value == 0) revert ZeroArgument("msg.value"); + uint256 groupsCount = groups.length; + if (groupsCount == 0) revert ZeroArgument("groups"); + + // Count total individual requests across all groups + uint256 requestsCount = 0; + for (uint256 i = 0; i < groupsCount; ++i) { + uint256 groupSize = groups[i].sourcePubkeys.length; + if (groupSize == 0) revert EmptyGroup(i); + requestsCount += groupSize; + } + + _checkConsolidationPreconditions(); + + (IWithdrawalVault withdrawalVault, bytes32 withdrawalCredentials) = _getWithdrawalVaultData(); + + for (uint256 i = 0; i < groupsCount; ++i) { + _validatePubKeyWCProof(groups[i].targetWitness, withdrawalCredentials); + } + + _consumeConsolidationRequestLimit(requestsCount); + + uint256 fee = withdrawalVault.getConsolidationRequestFee(); + uint256 totalFee = requestsCount * fee; + uint256 refund = _checkFee(totalFee); + + // Expand grouped requests into flat pairs for WithdrawalVault + (bytes[] memory sourcePubkeys, bytes[] memory targetPubkeys) = _prepareConsolidationPairs( + groups, + requestsCount + ); + withdrawalVault.addConsolidationRequests{value: totalFee}(sourcePubkeys, targetPubkeys); + + _refundFee(refund, refundRecipient); + } + + /** + * @notice Sets the maximum request limit and the frame during which a portion of the limit can be restored. + * @param maxConsolidationRequestsLimit The maximum number of consolidation requests. + * @param consolidationsPerFrame The number of consolidations that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + */ + function setConsolidationRequestLimit( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ) external onlyRole(EXIT_LIMIT_MANAGER_ROLE) { + _setConsolidationRequestLimit(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + /** + * @notice Returns information about current limits data + * @return maxConsolidationRequestsLimit Maximum consolidation requests limit + * @return consolidationsPerFrame The number of consolidations that can be restored per frame. + * @return frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + * @return prevConsolidationRequestsLimit Limit left after previous requests + * @return currentConsolidationRequestsLimit Current consolidation requests limit + */ + function getConsolidationRequestLimitFullInfo() + external + view + returns ( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec, + uint256 prevConsolidationRequestsLimit, + uint256 currentConsolidationRequestsLimit + ) + { + LimitData memory limitData = CONSOLIDATION_LIMIT_POSITION.getStorageLimit(); + maxConsolidationRequestsLimit = limitData.maxLimit; + consolidationsPerFrame = limitData.itemsPerFrame; + frameDurationInSec = limitData.frameDurationInSec; + prevConsolidationRequestsLimit = limitData.prevLimit; + + currentConsolidationRequestsLimit = limitData.isLimitSet() + ? limitData.calculateCurrentLimit(_getTimestamp()) + : type(uint256).max; + } + + /// Internal functions + + function _checkConsolidationPreconditions() internal view { + // If DSM paused deposits, some validators may not belong to Lido + // and can therefore have non-Lido withdrawal credentials. + // To avoid accepting consolidations into such validators, new consolidation requests are blocked. + // This acts as an additional safety check on top of validator proof verification. + if (IDepositSecurityModule(LOCATOR.depositSecurityModule()).isDepositsPaused()) { + revert DSMDepositsPaused(); + } + + // If Lido stopped or bunker mode is active, new consolidation requests must also be blocked. + if (!ILido(LOCATOR.lido()).canDeposit()) { + revert LidoDepositsPaused(); + } + } + + function _checkFee(uint256 fee) internal view returns (uint256 refund) { + if (msg.value < fee) { + revert InsufficientFee(fee, msg.value); + } + unchecked { + refund = msg.value - fee; + } + } + + function _refundFee(uint256 refund, address recipient) internal { + if (refund > 0) { + // If the refund recipient is not set, use the sender as the refund recipient + if (recipient == address(0)) { + recipient = msg.sender; + } + + (bool success, ) = recipient.call{value: refund}(""); + if (!success) { + revert FeeRefundFailed(); + } + } + } + + function _getTimestamp() internal view virtual returns (uint256) { + return block.timestamp; // solhint-disable-line not-rely-on-time + } + + function _setConsolidationRequestLimit( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ) internal { + uint256 timestamp = _getTimestamp(); + + CONSOLIDATION_LIMIT_POSITION.setStorageLimit( + CONSOLIDATION_LIMIT_POSITION.getStorageLimit().setLimits( + maxConsolidationRequestsLimit, + consolidationsPerFrame, + frameDurationInSec, + timestamp + ) + ); + + emit ConsolidationRequestsLimitSet(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + function _consumeConsolidationRequestLimit(uint256 requestsCount) internal { + LimitData memory limitData = CONSOLIDATION_LIMIT_POSITION.getStorageLimit(); + if (!limitData.isLimitSet()) { + return; + } + + uint256 limit = limitData.calculateCurrentLimit(_getTimestamp()); + + if (limit < requestsCount) { + revert ConsolidationRequestsLimitExceeded(requestsCount, limit); + } + + CONSOLIDATION_LIMIT_POSITION.setStorageLimit(limitData.updatePrevLimit(limit - requestsCount, _getTimestamp())); + } + + /// Flattens grouped source pubkeys and repeats each group's target pubkey. + function _prepareConsolidationPairs( + ConsolidationWitnessGroup[] calldata groups, + uint256 totalCount + ) internal pure returns (bytes[] memory sourcePubkeys, bytes[] memory targetPubkeys) { + sourcePubkeys = new bytes[](totalCount); + targetPubkeys = new bytes[](totalCount); + + uint256 idx = 0; + for (uint256 i = 0; i < groups.length; ++i) { + bytes[] calldata group = groups[i].sourcePubkeys; + bytes calldata target = groups[i].targetWitness.pubkey; + for (uint256 j = 0; j < group.length; ++j) { + sourcePubkeys[idx] = group[j]; + targetPubkeys[idx] = target; + ++idx; + } + } + } + + /// Returns the withdrawal vault and its 0x02 withdrawal credentials. + function _getWithdrawalVaultData() + internal + view + returns (IWithdrawalVault withdrawalVault, bytes32 withdrawalCredentials) + { + address vaultAddress = LOCATOR.withdrawalVault(); + withdrawalVault = IWithdrawalVault(vaultAddress); + + // withdrawalCredentials = 0x02 || 11 zero bytes || 20-byte vault address + withdrawalCredentials = bytes32(COMPOUNDING_PREFIX | uint160(vaultAddress)); + } +} diff --git a/contracts/0.8.25/consolidation/ConsolidationMigrator.sol b/contracts/0.8.25/consolidation/ConsolidationMigrator.sol new file mode 100644 index 0000000000..674b0c5ada --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationMigrator.sol @@ -0,0 +1,419 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; +import { + AccessControlEnumerableUpgradeable +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; + +/** + * @dev Minimal interface for StakingRouter to get module addresses + */ +interface IStakingRouter { + struct StakingModule { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory); +} + +/** + * @dev Unified interface for staking modules (NOR, SDVT, CMv1, CMv2) + * It also works for legacy staking modules (NOR, SDVT) where `getSigningKeys` returns different + * tuple `(bytes memory pubkeys, bytes memory signatures, bool[] memory used)`. + * The trick: `abi.decode(returndata, (bytes))` will decode only the first tuple element. + * This is safe as long as the first returned value really is `bytes pubkeys` in that position. + */ +interface IUnifiedStakingModule { + function getSigningKeys( + uint256 nodeOperatorId, + uint256 startIndex, + uint256 keysCount + ) external view returns (bytes memory); + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ); +} + +/** + * @dev Interface for ConsolidationBus to submit consolidation requests + */ +interface IConsolidationBus { + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external; +} + +/** + * @title ConsolidationMigrator + * @notice Validates and submits consolidation requests from source module to target module. + * + * The workflow: + * 1. Allows the consolidation manager to submit consolidation requests for operator pairs + * 2. Consolidation manager submit consolidation batches + * 3. Contract validates keys and forwards to ConsolidationBus + */ +contract ConsolidationMigrator is AccessControlEnumerableUpgradeable { + using EnumerableSet for EnumerableSet.UintSet; + + // ========== + // Errors + // ========== + + error ZeroArgument(string name); + error AdminCannotBeZero(); + error PairNotInAllowlist(uint256 sourceOperatorId, uint256 targetOperatorId); + error KeyNotDeposited(uint256 moduleId, uint256 operatorId, uint256 keyIndex); + error NotAuthorized(address caller, uint256 sourceOperatorId, uint256 targetOperatorId); + + // ========== + // Events + // ========== + + event ConsolidationPairAllowed( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + address indexed submitter + ); + event ConsolidationPairDisallowed( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + address indexed submitter + ); + event ConsolidationSubmitted( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + ConsolidationIndexGroup[] groups + ); + + // ========== + // Structs + // ========== + + struct ConsolidationIndexGroup { + uint256[] sourceKeyIndices; + uint256 targetKeyIndex; + } + + // ========== + // Roles + // ========== + + bytes32 public constant ALLOW_PAIR_ROLE = keccak256("ALLOW_PAIR_ROLE"); + bytes32 public constant DISALLOW_PAIR_ROLE = keccak256("DISALLOW_PAIR_ROLE"); + + // ========== + // Immutables + // ========== + + uint256 public constant PUBKEY_LENGTH = 48; + + IStakingRouter internal immutable STAKING_ROUTER; + IConsolidationBus internal immutable CONSOLIDATION_BUS; + uint256 internal immutable SOURCE_MODULE_ID; + uint256 internal immutable TARGET_MODULE_ID; + + // ========== + // Storage + // ========== + + /// @dev mapping(sourceOperatorId => set of allowed targetOperatorIds) + mapping(uint256 => EnumerableSet.UintSet) internal _allowedPairs; + + /// @dev mapping(sourceOperatorId => mapping(targetOperatorId => submitter address)) + mapping(uint256 => mapping(uint256 => address)) internal _submitters; + + // ========== + // Constructor + // ========== + + constructor(address stakingRouter, address consolidationBus, uint256 _sourceModuleId, uint256 _targetModuleId) { + if (stakingRouter == address(0)) revert ZeroArgument("stakingRouter"); + if (consolidationBus == address(0)) revert ZeroArgument("consolidationBus"); + if (_sourceModuleId == 0) revert ZeroArgument("sourceModuleId"); + if (_targetModuleId == 0) revert ZeroArgument("targetModuleId"); + + STAKING_ROUTER = IStakingRouter(stakingRouter); + CONSOLIDATION_BUS = IConsolidationBus(consolidationBus); + SOURCE_MODULE_ID = _sourceModuleId; + TARGET_MODULE_ID = _targetModuleId; + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param admin Lido DAO Aragon agent contract address. + /// @dev Proxy initialization method. + function initialize(address admin) external initializer { + if (admin == address(0)) revert AdminCannotBeZero(); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + } + + // ====================== + // Allowlist Management + // ====================== + + /** + * @notice Allows a consolidation pair (source operator -> target operator) with a designated submitter + * @param sourceOperatorId ID of the source operator in source module + * @param targetOperatorId ID of the target operator in target module + * @param submitter Address authorized to submit consolidation batches for this pair + * @dev Can be called multiple times to update the submitter for an existing pair + * @dev Reverts if caller does not have ALLOW_PAIR_ROLE or if submitter is zero address + */ + function allowPair( + uint256 sourceOperatorId, + uint256 targetOperatorId, + address submitter + ) external onlyRole(ALLOW_PAIR_ROLE) { + if (submitter == address(0)) revert ZeroArgument("submitter"); + + _allowedPairs[sourceOperatorId].add(targetOperatorId); + _submitters[sourceOperatorId][targetOperatorId] = submitter; + + emit ConsolidationPairAllowed(sourceOperatorId, targetOperatorId, submitter); + } + + /** + * @notice Disallows a consolidation pair and removes the submitter + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @dev Reverts if caller does not have DISALLOW_PAIR_ROLE + */ + function disallowPair(uint256 sourceOperatorId, uint256 targetOperatorId) external onlyRole(DISALLOW_PAIR_ROLE) { + bool removed = _allowedPairs[sourceOperatorId].remove(targetOperatorId); + if (!removed) revert PairNotInAllowlist(sourceOperatorId, targetOperatorId); + + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + delete _submitters[sourceOperatorId][targetOperatorId]; + + emit ConsolidationPairDisallowed(sourceOperatorId, targetOperatorId, submitter); + } + + /** + * @notice Allows a submitter to disallow their own pair (permissionless) + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @dev Caller must be the designated submitter for the pair + * @dev Reverts if caller is not the submitter + */ + function selfDisallowPair(uint256 sourceOperatorId, uint256 targetOperatorId) external { + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + if (msg.sender != submitter) { + revert NotAuthorized(msg.sender, sourceOperatorId, targetOperatorId); + } + + _allowedPairs[sourceOperatorId].remove(targetOperatorId); + delete _submitters[sourceOperatorId][targetOperatorId]; + + emit ConsolidationPairDisallowed(sourceOperatorId, targetOperatorId, msg.sender); + } + + // ============== + // View Methods + // ============== + + /** + * @notice Checks if a consolidation pair is allowed + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @return True if the pair is allowed + */ + function isPairAllowed(uint256 sourceOperatorId, uint256 targetOperatorId) external view returns (bool) { + return _allowedPairs[sourceOperatorId].contains(targetOperatorId); + } + + /** + * @notice Returns all allowed target operators for a given source operator + * @param sourceOperatorId ID of the source operator + * @return Array of allowed target operator IDs + */ + function getAllowedTargets(uint256 sourceOperatorId) external view returns (uint256[] memory) { + return _allowedPairs[sourceOperatorId].values(); + } + + /** + * @notice Returns the submitter address for a consolidation pair + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @return Address authorized to submit consolidation batches, or address(0) if pair not allowed + */ + function getSubmitter(uint256 sourceOperatorId, uint256 targetOperatorId) external view returns (address) { + return _submitters[sourceOperatorId][targetOperatorId]; + } + + /** + * @notice Returns the StakingRouter address + * @return Address of the StakingRouter + */ + function getStakingRouter() external view returns (address) { + return address(STAKING_ROUTER); + } + + /** + * @notice Returns the ConsolidationBus address + * @return Address of the ConsolidationBus + */ + function getConsolidationBus() external view returns (address) { + return address(CONSOLIDATION_BUS); + } + + /** + * @notice Returns the source module ID this migrator is bound to + * @return Source module ID + */ + function sourceModuleId() external view returns (uint256) { + return SOURCE_MODULE_ID; + } + + /** + * @notice Returns the target module ID this migrator is bound to + * @return Target module ID + */ + function targetModuleId() external view returns (uint256) { + return TARGET_MODULE_ID; + } + + // ============ + // Submit + // ============ + + /** + * @notice Submits a consolidation batch after validation + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @param groups Array of consolidation index groups, each containing source key indices and a target key index + * @dev Caller must be the designated submitter for this pair (set via allowPair) + * @dev Forwards the validated batch to ConsolidationBus + */ + function submitConsolidationBatch( + uint256 sourceOperatorId, + uint256 targetOperatorId, + ConsolidationIndexGroup[] calldata groups + ) external { + // Check authorization: caller must be the designated submitter for this pair + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + if (msg.sender != submitter) { + revert NotAuthorized(msg.sender, sourceOperatorId, targetOperatorId); + } + + // Validate the batch and get pubkeys + IConsolidationBus.ConsolidationGroup[] memory pubkeyGroups = _getValidatedConsolidationPubkeys( + sourceOperatorId, + targetOperatorId, + groups + ); + + CONSOLIDATION_BUS.addConsolidationRequests(pubkeyGroups); + + emit ConsolidationSubmitted(sourceOperatorId, targetOperatorId, groups); + } + + // ================== + // Internal Methods + // ================== + + /** + * @dev Validates consolidation key sets and returns corresponding pubkeys. + * Ensures all referenced keys are deposited. + */ + function _getValidatedConsolidationPubkeys( + uint256 sourceOperatorId, + uint256 targetOperatorId, + ConsolidationIndexGroup[] calldata groups + ) internal view returns (IConsolidationBus.ConsolidationGroup[] memory pubkeyGroups) { + uint256 groupsCount = groups.length; + + pubkeyGroups = new IConsolidationBus.ConsolidationGroup[](groupsCount); + for (uint256 i = 0; i < groupsCount; ++i) { + pubkeyGroups[i].sourcePubkeys = _validateAndExtractSourceKeys(sourceOperatorId, groups[i].sourceKeyIndices); + pubkeyGroups[i].targetPubkey = _validateAndExtractTargetKey(targetOperatorId, groups[i].targetKeyIndex); + } + } + + function _validateAndExtractSourceKeys( + uint256 operatorId, + uint256[] calldata keyIndices + ) internal view returns (bytes[] memory pubkeys) { + IUnifiedStakingModule module = _getModule(SOURCE_MODULE_ID); + + uint256 totalDeposited = _getDepositedValidatorsCount(module, operatorId); + + uint256 count = keyIndices.length; + pubkeys = new bytes[](count); + + for (uint256 i = 0; i < count; ++i) { + uint256 keyIndex = keyIndices[i]; + + if (keyIndex >= totalDeposited) { + revert KeyNotDeposited(SOURCE_MODULE_ID, operatorId, keyIndex); + } + + bytes memory key = module.getSigningKeys(operatorId, keyIndex, 1); + assert(key.length == PUBKEY_LENGTH); // Should always be 48 bytes for a single key + pubkeys[i] = key; + } + } + + function _validateAndExtractTargetKey( + uint256 operatorId, + uint256 keyIndex + ) internal view returns (bytes memory pubkey) { + IUnifiedStakingModule module = _getModule(TARGET_MODULE_ID); + + uint256 totalDeposited = _getDepositedValidatorsCount(module, operatorId); + + if (keyIndex >= totalDeposited) { + revert KeyNotDeposited(TARGET_MODULE_ID, operatorId, keyIndex); + } + + bytes memory key = module.getSigningKeys(operatorId, keyIndex, 1); + assert(key.length == PUBKEY_LENGTH); // Should always be 48 bytes for a single key + pubkey = key; + } + + function _getModule(uint256 moduleId) internal view returns (IUnifiedStakingModule) { + IStakingRouter.StakingModule memory sm = STAKING_ROUTER.getStakingModule(moduleId); + return IUnifiedStakingModule(sm.stakingModuleAddress); + } + + function _getDepositedValidatorsCount( + IUnifiedStakingModule module, + uint256 operatorId + ) internal view returns (uint256 totalDeposited) { + (, , , , , , totalDeposited, ) = module.getNodeOperatorSummary(operatorId); + } +} diff --git a/contracts/0.8.25/lib/BeaconChainDepositor.sol b/contracts/0.8.25/lib/BeaconChainDepositor.sol new file mode 100644 index 0000000000..81bebb835b --- /dev/null +++ b/contracts/0.8.25/lib/BeaconChainDepositor.sol @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: 2023 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +pragma solidity 0.8.25; + +import {MemUtils} from "contracts/common/lib/MemUtils.sol"; + +interface IDepositContract { + function get_deposit_root() external view returns (bytes32 rootHash); + + function deposit( + bytes calldata pubkey, // 48 bytes + bytes calldata withdrawal_credentials, // 32 bytes + bytes calldata signature, // 96 bytes + bytes32 deposit_data_root + ) external payable; +} + +library BeaconChainDepositor { + uint256 internal constant PUBLIC_KEY_LENGTH = 48; + uint256 internal constant SIGNATURE_LENGTH = 96; + + uint256 internal constant DEPOSIT_SIZE = 32 ether; + uint64 internal constant DEPOSIT_SIZE_IN_GWEI = 32 ether / 1 gwei; + + /// @dev Minimum deposit amount required by the Ethereum Deposit Contract + uint256 internal constant MIN_DEPOSIT = 1 ether; + + /// @dev Invokes a deposit call to the official Beacon Deposit contract + /// @param _depositContract - IDepositContract deposit contract + /// @param _keysCount amount of keys to deposit + /// @param _withdrawalCredentials Commitment to a public key for withdrawals + /// @param _publicKeysBatch A BLS12-381 public keys batch + /// @param _signaturesBatch A BLS12-381 signatures batch + function makeBeaconChainDeposits32ETH( + IDepositContract _depositContract, + uint256 _keysCount, + bytes memory _withdrawalCredentials, + bytes memory _publicKeysBatch, + bytes memory _signaturesBatch + ) public { + if (_publicKeysBatch.length != PUBLIC_KEY_LENGTH * _keysCount) { + revert InvalidPublicKeysBatchLength(_publicKeysBatch.length, PUBLIC_KEY_LENGTH * _keysCount); + } + if (_signaturesBatch.length != SIGNATURE_LENGTH * _keysCount) { + revert InvalidSignaturesBatchLength(_signaturesBatch.length, SIGNATURE_LENGTH * _keysCount); + } + + bytes memory publicKey = MemUtils.unsafeAllocateBytes(PUBLIC_KEY_LENGTH); + bytes memory signature = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH); + + for (uint256 i; i < _keysCount; ++i) { + MemUtils.copyBytes(_publicKeysBatch, publicKey, i * PUBLIC_KEY_LENGTH, 0, PUBLIC_KEY_LENGTH); + MemUtils.copyBytes(_signaturesBatch, signature, i * SIGNATURE_LENGTH, 0, SIGNATURE_LENGTH); + + _depositContract.deposit{value: DEPOSIT_SIZE}( + publicKey, + _withdrawalCredentials, + signature, + _computeDepositDataRootWithAmount(_withdrawalCredentials, publicKey, signature, DEPOSIT_SIZE_IN_GWEI) + ); + } + } + + function makeBeaconChainTopUp( + IDepositContract _depositContract, + bytes memory _withdrawalCredentials, + bytes[] memory _publicKeys, + uint256[] memory _amount + ) public { + uint256 len = _publicKeys.length; + if (len == 0) return; + if (len != _amount.length) revert ArrayLengthMismatch(); + + bytes memory dummySignature = new bytes(SIGNATURE_LENGTH); + + for (uint256 i; i < len; ++i) { + bytes memory pk = _publicKeys[i]; + + if (pk.length != PUBLIC_KEY_LENGTH) { + revert InvalidPublicKeysBatchLength(pk.length, PUBLIC_KEY_LENGTH); + } + + uint256 amount = _amount[i]; + + // obtainDepositData can return 0 amount for some keys + if (amount == 0) continue; + + // Amounts below minimum deposit (1 ETH) would fail at deposit contract + if (amount < MIN_DEPOSIT) { + revert DepositAmountTooLow(); + } + + uint256 amountGwei = amount / 1 gwei; + if (amountGwei > type(uint64).max) { + revert AmountTooLarge(); + } + uint64 amountGwei64 = uint64(amountGwei); + + // full DepositData root with custom amount + bytes32 depositDataRoot = + _computeDepositDataRootWithAmount(_withdrawalCredentials, pk, dummySignature, amountGwei64); + + _depositContract.deposit{value: amount}(pk, _withdrawalCredentials, dummySignature, depositDataRoot); + } + } + + function _computeDepositDataRootWithAmount( + bytes memory _withdrawalCredentials, + bytes memory _publicKey, + bytes memory _signature, + uint64 _amountGwei + ) private pure returns (bytes32) { + bytes32 publicKeyRoot = sha256(abi.encodePacked(_publicKey, bytes16(0))); + bytes32 signatureRoot = _computeSignatureRoot(_signature); + bytes8 amountLE = _toLittleEndian64(_amountGwei); + + return sha256( + abi.encodePacked( + sha256(abi.encodePacked(publicKeyRoot, _withdrawalCredentials)), + sha256(abi.encodePacked(amountLE, bytes24(0), signatureRoot)) + ) + ); + } + + function _computeSignatureRoot(bytes memory _signature) private pure returns (bytes32) { + bytes memory sigPart1 = MemUtils.unsafeAllocateBytes(64); + bytes memory sigPart2 = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH - 64); + + MemUtils.copyBytes(_signature, sigPart1, 0, 0, 64); + MemUtils.copyBytes(_signature, sigPart2, 64, 0, SIGNATURE_LENGTH - 64); + + return + sha256(abi.encodePacked(sha256(abi.encodePacked(sigPart1)), sha256(abi.encodePacked(sigPart2, bytes32(0))))); + } + + function _toLittleEndian64(uint64 value) private pure returns (bytes8 ret) { + ret = bytes8(0); + for (uint256 i = 0; i < 8; ++i) { + ret |= bytes8(bytes1(uint8(value >> (8 * i)))) >> (8 * i); + } + } + + error InvalidPublicKeysBatchLength(uint256 actual, uint256 expected); + error InvalidSignaturesBatchLength(uint256 actual, uint256 expected); + error ArrayLengthMismatch(); + error AmountTooLarge(); + error DepositAmountTooLow(); +} diff --git a/contracts/0.8.25/sr/ISRBase.sol b/contracts/0.8.25/sr/ISRBase.sol new file mode 100644 index 0000000000..5bb8e07b55 --- /dev/null +++ b/contracts/0.8.25/sr/ISRBase.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {StakingModuleStatus} from "./SRTypes.sol"; + +/** + * @title StakingRouter base interface, defines events and errors + * @author KRogLA + */ + +interface ISRBase { + /** + * Events + */ + event StakingModuleAdded(uint256 indexed stakingModuleId, address stakingModule, string name, address createdBy); + event StakingModuleShareLimitSet( + uint256 indexed stakingModuleId, uint256 stakeShareLimit, uint256 priorityExitShareThreshold, address setBy + ); + event StakingModuleFeesSet( + uint256 indexed stakingModuleId, uint256 stakingModuleFee, uint256 treasuryFee, address setBy + ); + event StakingModuleMaxDepositsPerBlockSet( + uint256 indexed stakingModuleId, uint256 maxDepositsPerBlock, address setBy + ); + event StakingModuleMinDepositBlockDistanceSet( + uint256 indexed stakingModuleId, uint256 minDepositBlockDistance, address setBy + ); + event StakingModuleStatusSet(uint256 indexed stakingModuleId, StakingModuleStatus status, address setBy); + + event WithdrawalCredentialsSet(bytes32 withdrawalCredentials, address setBy); + + event StakingRouterETHDeposited(uint256 indexed stakingModuleId, uint256 amount); + event DepositableEthReceived(uint256 amount); + + event ExitedAndStuckValidatorsCountsUpdateFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event RewardsMintedReportFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event StakingModuleExitedValidatorsIncompleteReporting( + uint256 indexed stakingModuleId, uint256 unreportedExitedValidatorsCount + ); + event WithdrawalsCredentialsChangeFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event StakingModuleExitNotificationFailed( + uint256 indexed stakingModuleId, uint256 indexed nodeOperatorId, bytes _publicKey + ); + + /** + * Errors + */ + + // Validation + error InvalidAmountGwei(); + error NotAuthorized(); + error ZeroAddress(); + error ZeroArgument(); + error ArraysLengthMismatch(); + error OracleExtraDataNotSubmitted(); + + // Oracle report + error InvalidReportData(uint256 code); + error ReportedExitedValidatorsExceedDeposited( + uint256 reportedExitedValidatorsCount, uint256 depositedValidatorsCount + ); + error UnexpectedCurrentValidatorsCount( + uint256 currentModuleExitedValidatorsCount, uint256 currentNodeOpExitedValidatorsCount + ); + error UnexpectedFinalExitedValidatorsCount( + uint256 newModuleTotalExitedValidatorsCount, uint256 newModuleTotalExitedValidatorsCountInStakingRouter + ); + error UnrecoverableModuleError(); + error ExitedValidatorsCountCannotDecrease(); + + // Deposits + error DirectETHTransfer(); + error ModuleReturnExceedTarget(); + error StakingModuleStatusTheSame(); + error EmptyKeysList(); + error WrongPubkeyLength(); + error AmountNotAlignedToGwei(); + error AllocationExceedsLimit(); + error ZeroDeposits(); + + // Staking module + error StakingModuleAddressExists(); + error StakingModulesLimitExceeded(); + error StakingModuleWrongName(); + error StakingModuleUnregistered(); + error StakingModuleNotActive(); + error WrongWithdrawalCredentialsType(); + error InvalidPriorityExitShareThreshold(); + error InvalidMinDepositBlockDistance(); + error InvalidMaxDepositPerBlockValue(); + error InvalidStakeShareLimit(); + error InvalidFeeSum(); + error InconsistentFeeSum(); + error UnexpectedModuleId(uint256 expectedId, uint256 actualId); +} diff --git a/contracts/0.8.25/sr/SRLib.sol b/contracts/0.8.25/sr/SRLib.sol new file mode 100644 index 0000000000..b022e53990 --- /dev/null +++ b/contracts/0.8.25/sr/SRLib.sol @@ -0,0 +1,932 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {Math, SafeCast} from "@openzeppelin/contracts-v5.2/utils/math/Math.sol"; +import {StorageSlot} from "@openzeppelin/contracts-v5.2/utils/StorageSlot.sol"; +import {MinFirstAllocationStrategy} from "contracts/common/lib/MinFirstAllocationStrategy.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {SRStorage} from "./SRStorage.sol"; +import {SRUtils} from "./SRUtils.sol"; +import { + ModuleState, + StakingModuleConfig, + StakingModuleStatus, + StakingModule, + ModuleStateConfig, + ModuleStateDeposits, + ModuleStateAccounting, + ValidatorExitData, + ValidatorsCountsCorrection, + RouterStateAccounting +} from "./SRTypes.sol"; +import {ISRBase} from "./ISRBase.sol"; + +/** + * @title StakingRouter helper external library + * @author KRogLA + */ + +library SRLib { + using StorageSlot for bytes32; + using WithdrawalCredentials for bytes32; + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + + /// @dev Protocol-level constants, built once per tx from immutables + /// @dev Due to SRLib is external library, we can't access immutable variables here, so we pass them as parameters + struct Config { + uint256 maxEBType1; + uint256 maxEBType2; + } + + struct ModuleParamsCache { + uint256 depositableCount; + uint256 activeCount; + uint16 shareLimit; + StakingModuleStatus status; + uint8 wcType; + } + + /// @notice One-time migration from old storage layout to new RouterState struct. + /// @dev Storage slot positions are computed inline for migration-only use. + /// After migration, this function can be removed. + function _migrateStorage(uint256 maxEBType1) public { + // skip migration if data already exists + if (SRStorage.getModulesCount() > 0) { + return; + } + + // Old storage slot positions (computed inline for migration-only use) + bytes32 LIDO_POS = keccak256("lido.StakingRouter.lido"); + bytes32 WITHDRAWAL_CREDENTIALS_POS = keccak256("lido.StakingRouter.withdrawalCredentials"); + bytes32 STAKING_MODULES_COUNT_POS = keccak256("lido.StakingRouter.stakingModulesCount"); + bytes32 LAST_STAKING_MODULE_ID_POS = keccak256("lido.StakingRouter.lastStakingModuleId"); + bytes32 CONTRACT_VERSION_POS = keccak256("lido.Versioned.contractVersion"); + bytes32 STAKING_MODULES_MAPPING_POS = keccak256("lido.StakingRouter.stakingModules"); + bytes32 STAKING_MODULE_INDICES_POS = keccak256("lido.StakingRouter.stakingModuleIndicesOneBased"); + + // cleanup old storage slots + delete LIDO_POS.getBytes32Slot().value; + delete CONTRACT_VERSION_POS.getBytes32Slot().value; + + // migrate last staking module ID + SRStorage.getRouterState().lastModuleId = uint24(LAST_STAKING_MODULE_ID_POS.getUint256Slot().value); + delete LAST_STAKING_MODULE_ID_POS.getBytes32Slot().value; + + // migrate WC + SRStorage.getRouterState().withdrawalCredentials = WITHDRAWAL_CREDENTIALS_POS.getBytes32Slot().value; + delete WITHDRAWAL_CREDENTIALS_POS.getBytes32Slot().value; + + uint256 modulesCount = STAKING_MODULES_COUNT_POS.getUint256Slot().value; + delete STAKING_MODULES_COUNT_POS.getBytes32Slot().value; + + // get old storage ref. for staking modules mapping + mapping(uint256 => StakingModule) storage oldStakingModules = + _getStorageStakingModulesMapping(STAKING_MODULES_MAPPING_POS); + // get old storage ref. for staking modules indices mapping + mapping(uint256 => uint256) storage oldStakingModuleIndices = + _getStorageStakingIndicesMapping(STAKING_MODULE_INDICES_POS); + + uint64 totalValidatorsBalanceGwei; + StakingModule memory smOld; + + for (uint256 i; i < modulesCount; ++i) { + smOld = oldStakingModules[i]; + + uint256 _moduleId = smOld.id; + // push module ID to EnumerableSet + SRStorage.addModuleId(_moduleId); + + ModuleState storage moduleState = _moduleId.getModuleState(); + + // 1 SSTORE + moduleState.name = smOld.name; + + // 1 SSTORE + moduleState.config = ModuleStateConfig({ + moduleAddress: smOld.stakingModuleAddress, + moduleFee: smOld.stakingModuleFee, + treasuryFee: smOld.treasuryFee, + stakeShareLimit: smOld.stakeShareLimit, + priorityExitShareThreshold: smOld.priorityExitShareThreshold, + status: StakingModuleStatus(smOld.status), + withdrawalCredentialsType: WithdrawalCredentials.WC_TYPE_01 + }); + + // 1 SSTORE + moduleState.deposits = ModuleStateDeposits({ + lastDepositAt: smOld.lastDepositAt, + lastDepositBlock: SafeCast.toUint64(smOld.lastDepositBlock), + maxDepositsPerBlock: smOld.maxDepositsPerBlock, + minDepositBlockDistance: smOld.minDepositBlockDistance + }); + + /// @dev calculate module effective balance at the migration moment + (uint256 exitedValidatorsCount, uint256 depositedValidatorsCount,) = + _getStakingModuleSummary(IStakingModule(smOld.stakingModuleAddress)); + // The module might not receive all exited validators data yet => we need to replacing + // the exitedValidatorsCount with the one that the staking router is aware of. + uint256 activeCount = + depositedValidatorsCount - Math.max(smOld.exitedValidatorsCount, exitedValidatorsCount); + uint64 validatorsBalanceGwei = SRUtils._toGwei(activeCount * maxEBType1); + + // 1 SSTORE + moduleState.accounting = ModuleStateAccounting({ + validatorsBalanceGwei: validatorsBalanceGwei, + exitedValidatorsCount: SafeCast.toUint64(smOld.exitedValidatorsCount) + }); + + totalValidatorsBalanceGwei += validatorsBalanceGwei; + + // cleanup old storage for staking module data + delete oldStakingModules[i]; + delete oldStakingModuleIndices[_moduleId]; + } + + // cleanup old mapping storage slots + delete STAKING_MODULES_MAPPING_POS.getBytes32Slot().value; + delete STAKING_MODULE_INDICES_POS.getBytes32Slot().value; + + /// @dev use the same value for both CL balance and active balance at migration moment, + /// next Oracle report will update the both values + SRStorage.getRouterState().accounting = + RouterStateAccounting({validatorsBalanceGwei: totalValidatorsBalanceGwei}); + } + + /// @dev Helper for migration - returns old staking modules mapping storage reference + function _getStorageStakingModulesMapping(bytes32 _position) + internal + pure + returns (mapping(uint256 => StakingModule) storage $) + { + assembly ("memory-safe") { + $.slot := _position + } + } + + /// @dev Helper for migration - returns old staking module indices mapping storage reference + function _getStorageStakingIndicesMapping(bytes32 _position) + internal + pure + returns (mapping(uint256 => uint256) storage $) + { + assembly ("memory-safe") { + $.slot := _position + } + } + + /// @notice Registers a new staking module. + /// @param _moduleAddress Address of staking module. + /// @param _moduleName Name of staking module. + /// @param _moduleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function _addModule(address _moduleAddress, string calldata _moduleName, StakingModuleConfig calldata _moduleConfig) + public + returns (uint256 newModuleId) + { + SRUtils._requireNotZero(_moduleAddress); + + if (bytes(_moduleName).length == 0 || bytes(_moduleName).length > SRUtils.MAX_STAKING_MODULE_NAME_LENGTH) { + revert ISRBase.StakingModuleWrongName(); + } + if (SRStorage.getModulesCount() >= SRUtils.MAX_STAKING_MODULES_COUNT) { + revert ISRBase.StakingModulesLimitExceeded(); + } + + SRUtils._requireWCTypeValid(_moduleConfig.withdrawalCredentialsType); + + // Check for duplicate module address + /// @dev due to small number of modules, we can afford to do this check on add + uint256 modulesCount = SRStorage.getModulesCount(); + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (_moduleAddress == moduleId.getModuleState().config.moduleAddress) { + revert ISRBase.StakingModuleAddressExists(); + } + } + + newModuleId = SRStorage.getRouterState().lastModuleId + 1; + // push new module ID to EnumerableSet + SRStorage.addModuleId(newModuleId); + + ModuleState storage moduleState = newModuleId.getModuleState(); + moduleState.config.moduleAddress = _moduleAddress; + moduleState.config.status = StakingModuleStatus.Active; + moduleState.config.withdrawalCredentialsType = uint8(_moduleConfig.withdrawalCredentialsType); + moduleState.name = _moduleName; + + emit ISRBase.StakingModuleAdded(newModuleId, _moduleAddress, _moduleName, msg.sender); + + _updateModuleParams( + newModuleId, + _moduleConfig.stakeShareLimit, + _moduleConfig.priorityExitShareThreshold, + _moduleConfig.stakingModuleFee, + _moduleConfig.treasuryFee, + _moduleConfig.maxDepositsPerBlock, + _moduleConfig.minDepositBlockDistance + ); + + // save last module ID + SRStorage.getRouterState().lastModuleId = uint24(newModuleId); + return newModuleId; + } + + /// @notice Validates share-related parameters. + /// @param _stakeShareLimit Stake share limit to validate (in basis points). + /// @param _priorityExitShareThreshold Priority exit share threshold to validate (in basis points). + function _validateShareParams(uint256 _stakeShareLimit, uint256 _priorityExitShareThreshold) private pure { + if (_stakeShareLimit > SRUtils.TOTAL_BASIS_POINTS) { + revert ISRBase.InvalidStakeShareLimit(); + } + if (_priorityExitShareThreshold > SRUtils.TOTAL_BASIS_POINTS) { + revert ISRBase.InvalidPriorityExitShareThreshold(); + } + if (_stakeShareLimit > _priorityExitShareThreshold) revert ISRBase.InvalidPriorityExitShareThreshold(); + } + + function _updateModuleParams( + uint256 _moduleId, + uint256 _stakeShareLimit, + uint256 _priorityExitShareThreshold, + uint256 _moduleFee, + uint256 _treasuryFee, + uint256 _maxDepositsPerBlock, + uint256 _minDepositBlockDistance + ) public { + _validateShareParams(_stakeShareLimit, _priorityExitShareThreshold); + if (_moduleFee + _treasuryFee > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + _requireConsistentFeeSum(_moduleId, _moduleFee, _treasuryFee); + if (_minDepositBlockDistance == 0 || _minDepositBlockDistance > type(uint64).max) { + revert ISRBase.InvalidMinDepositBlockDistance(); + } + if (_maxDepositsPerBlock > type(uint64).max) revert ISRBase.InvalidMaxDepositPerBlockValue(); + + // 1 SLOAD + ModuleStateConfig memory stateConfig = _moduleId.getModuleState().config; + // forge-lint: disable-start(unsafe-typecast) + stateConfig.moduleFee = uint16(_moduleFee); + stateConfig.treasuryFee = uint16(_treasuryFee); + stateConfig.stakeShareLimit = uint16(_stakeShareLimit); + stateConfig.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); + // 1 SSTORE + _moduleId.getModuleState().config = stateConfig; + + // 1 SLOAD + ModuleStateDeposits memory stateDeposits = _moduleId.getModuleState().deposits; + stateDeposits.maxDepositsPerBlock = SafeCast.toUint64(_maxDepositsPerBlock); + stateDeposits.minDepositBlockDistance = SafeCast.toUint64(_minDepositBlockDistance); + // forge-lint: disable-end(unsafe-typecast) + // 1 SSTORE + _moduleId.getModuleState().deposits = stateDeposits; + + address setBy = msg.sender; + emit ISRBase.StakingModuleShareLimitSet(_moduleId, _stakeShareLimit, _priorityExitShareThreshold, setBy); + emit ISRBase.StakingModuleFeesSet(_moduleId, _moduleFee, _treasuryFee, setBy); + emit ISRBase.StakingModuleMaxDepositsPerBlockSet(_moduleId, _maxDepositsPerBlock, setBy); + emit ISRBase.StakingModuleMinDepositBlockDistanceSet(_moduleId, _minDepositBlockDistance, setBy); + } + + function _requireConsistentFeeSum(uint256 _moduleId, uint256 _moduleFee, uint256 _treasuryFee) internal view { + uint256 feeSum = _moduleFee + _treasuryFee; + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (moduleId == _moduleId) continue; + + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + if (uint256(stateConfig.moduleFee) + uint256(stateConfig.treasuryFee) != feeSum) { + revert ISRBase.InconsistentFeeSum(); + } + } + } + + function _updateAllModuleFees(uint256[] calldata _moduleFees, uint256[] calldata _treasuryFees) public { + uint256 modulesCount = SRStorage.getModulesCount(); + if (_moduleFees.length != modulesCount || _treasuryFees.length != modulesCount) { + revert ISRBase.ArraysLengthMismatch(); + } + if (modulesCount == 0) { + return; + } + + uint256 expectedFeeSum = _moduleFees[0] + _treasuryFees[0]; + if (expectedFeeSum > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + + for (uint256 i = 1; i < modulesCount; ++i) { + uint256 feeSum = _moduleFees[i] + _treasuryFees[i]; + if (feeSum > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + if (feeSum != expectedFeeSum) revert ISRBase.InconsistentFeeSum(); + } + + address setBy = msg.sender; + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + // forge-lint: disable-start(unsafe-typecast) + stateConfig.moduleFee = uint16(_moduleFees[i]); + stateConfig.treasuryFee = uint16(_treasuryFees[i]); + // forge-lint: disable-end(unsafe-typecast) + moduleId.getModuleState().config = stateConfig; + emit ISRBase.StakingModuleFeesSet(moduleId, _moduleFees[i], _treasuryFees[i], setBy); + } + } + + /// @notice Updates only the share-related params of a staking module. + /// @param _moduleId Id of the staking module. + /// @param _stakeShareLimit New stake share limit (in basis points). + /// @param _priorityExitShareThreshold New priority exit share threshold (in basis points). + function _updateModuleShares(uint256 _moduleId, uint256 _stakeShareLimit, uint256 _priorityExitShareThreshold) + public + { + _validateShareParams(_stakeShareLimit, _priorityExitShareThreshold); + + // 1 SLOAD + ModuleStateConfig memory stateConfig = _moduleId.getModuleState().config; + + // forge-lint: disable-start(unsafe-typecast) + stateConfig.stakeShareLimit = uint16(_stakeShareLimit); + stateConfig.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); + // forge-lint: disable-end(unsafe-typecast) + + // 1 SSTORE + _moduleId.getModuleState().config = stateConfig; + + emit ISRBase.StakingModuleShareLimitSet(_moduleId, _stakeShareLimit, _priorityExitShareThreshold, msg.sender); + } + + /// @dev module state helpers + + function _setModuleStatus(uint256 _moduleId, StakingModuleStatus _status) public returns (bool isChanged) { + ModuleStateConfig storage stateConfig = _moduleId.getModuleState().config; + isChanged = stateConfig.status != _status; + if (isChanged) { + stateConfig.status = _status; + emit ISRBase.StakingModuleStatusSet(_moduleId, _status, msg.sender); + } + } + + /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. + function _getStakingModuleSummary(IStakingModule module) + internal + view + returns (uint256 exitedValidators, uint256 depositedValidators, uint256 depositableValidators) + { + return module.getStakingModuleSummary(); + } + + /// @notice Deposit allocation for modules + /// @dev Allocates deposits to staking modules based on their stake share limits and available capacity. + /// The allocation algorithm prioritizes modules with lower validator (WC 0x01 equivalent) counts (MinFirst strategy). + /// @dev Method uses conversion from/to Ether amounts due to MinFirstAllocationStrategy working with unit values. + /// @param _cfg - protocol-level constants + /// @param _allocateAmount - Eth amount that should be allocated into modules + /// @param _isTopUp - flag indicating whether the allocation is for top-up deposits + /// @return totalAllocated - amount actually allocated + /// @return allocated - Array of newly allocated amounts for each module + /// @return newAllocations - Array of new allocation amounts for each module + function _getDepositAllocations(Config calldata _cfg, uint256 _allocateAmount, bool _isTopUp) + public + view + returns (uint256 totalAllocated, uint256[] memory allocated, uint256[] memory newAllocations) + { + uint256 modulesCount = SRStorage.getModulesCount(); + if (modulesCount == 0) { + return (0, new uint256[](0), new uint256[](0)); + } + + // put calldata var to stack + uint256 initialDeposit = _cfg.maxEBType1; + // convert to validators equivalent + uint256 depositsToAllocate = _allocateAmount / initialDeposit; + // get current allocations and capacities in validators equivalent + uint256[] memory capacities; + // @dev using output parameter as temporary storage for current allocations + (allocated, capacities) = _getModulesAllocationAndCapacity(_cfg, depositsToAllocate, _isTopUp); + + // If no deposits to allocate, return current state + if (depositsToAllocate > 0) { + // Use MinFirstAllocationStrategy to allocate deposits + /// @dev due to library is external, the `allocated` array is not mutated + (totalAllocated, newAllocations) = + MinFirstAllocationStrategy.allocate(allocated, capacities, depositsToAllocate); + // Convert allocated validators and allocations per module back to Ether amounts + totalAllocated *= initialDeposit; + for (uint256 i = 0; i < modulesCount; ++i) { + // get allocation delta only: new - current + allocated[i] = (newAllocations[i] - allocated[i]) * initialDeposit; + newAllocations[i] *= initialDeposit; + } + } else { + newAllocations = new uint256[](modulesCount); + // Convert allocations per module back to Ether amounts + for (uint256 i = 0; i < modulesCount; ++i) { + newAllocations[i] = allocated[i] * initialDeposit; + allocated[i] = 0; + } + } + } + + /// @notice calculate allocation amount for single module + function _getModuleDepositAllocation( + Config calldata _cfg, + uint256 _moduleId, + uint256 _allocateAmount, + bool _isTopUp + ) public view returns (uint256 allocation) { + (, uint256[] memory allocated,) = _getDepositAllocations(_cfg, _allocateAmount, _isTopUp); + uint256 moduleIdx = SRUtils._getModuleIndexById(_moduleId); + allocation = allocated[moduleIdx]; + } + + /** + * @notice calculate allocation amounts for all modules + * @dev If `_isTopUp` is `true`, allocation is performed for top-up deposits targeting + * WC type `0x02` validators. In this case, `_cfg.maxEBType2` used + * to correctly calculate the module's capacity. + * + * @dev The Allocation logic must preserve the same priority between modules + * regardless of the allocation type or amount (initial seed deposits or top-ups). + * + * For seed deposits this is straightforward. Both regular modules (0x01) + * and modules with keys 0x02 use the same depositableValidatorsCount metric, + * so the allocation priority is naturally consistent. + * + * Top-up allocation is less obvious and requires additional considerations. + * + * Important facts: + * + * 1. Top-ups are only possible for modules with keys type 0x02. + * 2. The total top-up amount is limited by the unused capacity of already active keys. + * 3. The method call with the flag `isTopUp = true` is used only when calculating + * top-up allocations. In other words, the values returned for modules 0x01 + * are ignored by the caller. + * + * Since allocation uses the MinFirstAllocationStrategy, we must not exclude + * modules 0x01 from the selection during top-up calculations (for example, + * by setting their capacity to zero). If we did, the algorithm would attempt + * to distribute the entire available amount only across modules 0x02. + * + * This would incorrectly increase the priority of deposits into modules 0x02 + * relative to modules 0x01. + * + * Therefore the following approach is used: + * + * - For modules 0x01 we keep the same capacity as for regular seed deposits. + * Formally, these modules cannot receive top-ups, but they must remain + * visible to the allocation strategy to preserve priority ordering. + * + * - For modules 0x02 the capacity is set only to the remaining unused capacity + * of already active keys. + * + * At first glance this may appear to prioritize deposits into modules 0x01. + * However, taking fact #3 into account, the returned allocations for modules + * 0x01 are never used. They are only an artifact of the MinFirstAllocationStrategy. + * + * This design preserves the correct global priority between modules while + * still allowing the system to fully utilize the available top-up capacity + * of modules with keys type 0x02. + */ + function _getModulesAllocationAndCapacity(Config calldata _cfg, uint256 depositsToAllocate, bool _isTopUp) + internal + view + returns (uint256[] memory _allocations, uint256[] memory _capacities) + { + uint256 modulesCount = SRStorage.getModulesCount(); + _allocations = new uint256[](modulesCount); + + ModuleParamsCache[] memory cache = new ModuleParamsCache[](modulesCount); + ModuleState storage moduleState; + ModuleStateConfig memory stateConfig; + + uint256 totalValidators; + uint256 maxEBType1 = _cfg.maxEBType1; + for (uint256 i = 0; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + moduleState = moduleId.getModuleState(); + stateConfig = moduleState.config; + // caching config + cache[i].shareLimit = stateConfig.stakeShareLimit; + cache[i].status = stateConfig.status; + cache[i].wcType = stateConfig.withdrawalCredentialsType; + (uint256 exitedValidatorsCount, uint256 depositedValidatorsCount, uint256 depositableValidatorsCount) = + _getStakingModuleSummary(moduleId.getIStakingModule()); + cache[i].depositableCount = depositableValidatorsCount; + + // get active validators count + uint256 validatorsCount = depositedValidatorsCount + - Math.max(exitedValidatorsCount, moduleState.accounting.exitedValidatorsCount); + + // save to cache + cache[i].activeCount = validatorsCount; + + if (WithdrawalCredentials.isType2(stateConfig.withdrawalCredentialsType)) { + // Calculate equivalent of WC01 validators count rounded up: ceil(balance / maxEBType1) + validatorsCount = Math.ceilDiv(moduleId.getIStakingModuleV2().getTotalModuleStake(), maxEBType1); + } + _allocations[i] = validatorsCount; + totalValidators += validatorsCount; + } + // new total validators count after allocation + totalValidators += depositsToAllocate; + _capacities = new uint256[](modulesCount); + + // put calldata msxEBType2 to stack + uint256 maxEBType2 = _cfg.maxEBType2; + + for (uint256 i = 0; i < modulesCount; ++i) { + // module initial capacity = current allocation + uint256 validatorsCapacity = _allocations[i]; + if (cache[i].status == StakingModuleStatus.Active) { + if (_isTopUp && WithdrawalCredentials.isType2(cache[i].wcType)) { + // max eth capacity of active validators = n * maxEB, + // so capacity in validators equivalent = n * maxEBType2 / msxEBType1 + validatorsCapacity = cache[i].activeCount * maxEBType2 / maxEBType1; + } else { + validatorsCapacity = _allocations[i] + cache[i].depositableCount; + } + // Calculate target validators for each module based on stake share limits + // Target validators = (stakeShareLimit * totalValidators) / TOTAL_BASIS_POINTS + uint256 targetValidators = (cache[i].shareLimit * totalValidators) / SRUtils.TOTAL_BASIS_POINTS; + // Module capacity is limited by available validators and target share + validatorsCapacity = Math.min(targetValidators, validatorsCapacity); + } + + _capacities[i] = validatorsCapacity; + } + } + + /// @notice Handles tracking and penalization logic for a node operator who failed to exit their validator within the defined exit window. + /// @dev This function is called to report the current exit-related status of a validator belonging to a specific node operator. + /// It accepts a validator's public key, associated with the duration (in seconds) it was eligible to exit but has not exited. + /// This data could be used to trigger penalties for the node operator if the validator has been non-exiting for too long. + /// @param _stakingModuleId The ID of the staking module. + /// @param _nodeOperatorId The ID of the node operator whose validator status is being delivered. + /// @param _proofSlotTimestamp The timestamp (slot time) when the validator was last known to be in an active ongoing state. + /// @param _publicKey The public key of the validator being reported. + /// @param _eligibleToExitInSec The duration (in seconds) indicating how long the validator has been eligible to exit after request but has not exited. + function _reportValidatorExitDelay( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _stakingModuleId.getIStakingModule() + .reportValidatorExitDelay(_nodeOperatorId, _proofSlotTimestamp, _publicKey, _eligibleToExitInSec); + } + + /// @notice Handles the triggerable exit event for a set of validators. + /// @dev This function is called when validators are exited using triggerable exit requests on the Execution Layer. + /// @param validatorExitData An array of `ValidatorExitData` structs, each representing a validator + /// for which a triggerable exit was requested. Each entry includes: + /// - `stakingModuleId`: ID of the staking module. + /// - `nodeOperatorId`: ID of the node operator. + /// - `pubkey`: Validator public key, 48 bytes length. + /// @param _withdrawalRequestPaidFee Fee amount paid to send a withdrawal request on the Execution Layer (EL). + /// @param _exitType The type of exit being performed. + /// This parameter may be interpreted differently across various staking modules depending on their specific implementation. + function _onValidatorExitTriggered( + ValidatorExitData[] calldata validatorExitData, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) public { + ValidatorExitData calldata data; + for (uint256 i = 0; i < validatorExitData.length; ++i) { + data = validatorExitData[i]; + SRUtils._requireModuleIdExists(data.stakingModuleId); + try data.stakingModuleId.getIStakingModule() + .onValidatorExitTriggered(data.nodeOperatorId, data.pubkey, _withdrawalRequestPaidFee, _exitType) {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onValidatorExitTriggered() + /// reverts because of the "out of gas" error. Here we assume that the + /// onValidatorExitTriggered() method doesn't have reverts with + /// empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.StakingModuleExitNotificationFailed(data.stakingModuleId, data.nodeOperatorId, data.pubkey); + } + } + } + + /// @notice Reports the minted rewards to the staking modules with the specified ids. + /// @param _stakingModuleIds Ids of the staking modules. + /// @param _totalShares Total shares minted for the staking modules. + /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. + function _reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) public { + uint256 n = _stakingModuleIds.length; + if (_totalShares.length != n) revert ISRBase.ArraysLengthMismatch(); + + for (uint256 i = 0; i < n; ++i) { + if (_totalShares[i] == 0) continue; + SRUtils._requireModuleIdExists(_stakingModuleIds[i]); + + try _stakingModuleIds[i].getIStakingModule().onRewardsMinted(_totalShares[i]) {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onRewardsMinted() reverts because of the + /// "out of gas" error. Here we assume that the onRewardsMinted() method doesn't + /// have reverts with empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.RewardsMintedReportFailed(_stakingModuleIds[i], lowLevelRevertData); + } + } + } + + /// @notice Finalizes the reporting of the exited validators counts for the current + /// reporting frame. + /// + /// @dev Called by the oracle when the second phase of data reporting finishes, i.e. when the + /// oracle submitted the complete data on the exited validator counts per node operator + /// for the current reporting frame. See the docs for `updateExitedValidatorsCountByStakingModule` + /// for the description of the overall update process. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function _onValidatorsCountsByNodeOperatorReportingFinished() public { + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + ModuleState storage state = moduleId.getModuleState(); + IStakingModule stakingModule = state.getIStakingModule(); + + (uint256 exitedValidatorsCount,,) = _getStakingModuleSummary(stakingModule); + if (exitedValidatorsCount != state.accounting.exitedValidatorsCount) continue; + + // oracle finished updating exited validators for all node ops + try stakingModule.onExitedAndStuckValidatorsCountsUpdated() {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onExitedAndStuckValidatorsCountsUpdated() + /// reverts because of the "out of gas" error. Here we assume that the + /// onExitedAndStuckValidatorsCountsUpdated() method doesn't have reverts with + /// empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.ExitedAndStuckValidatorsCountsUpdateFailed(moduleId, lowLevelRevertData); + } + } + } + + /// @notice Decreases vetted signing keys counts per node operator for the staking module with + /// the specified id. + /// @param _stakingModuleId The id of the staking module to be updated. + /// @param _nodeOperatorIds Ids of the node operators to be updated. + /// @param _vettedSigningKeysCounts New counts of vetted signing keys for the specified node operators. + /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. + function _decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _checkOperatorsReportData(_nodeOperatorIds, _vettedSigningKeysCounts); + _stakingModuleId.getIStakingModule().decreaseVettedSigningKeysCount(_nodeOperatorIds, _vettedSigningKeysCounts); + } + + /// @notice Updates exited validators counts per node operator for the staking module with + /// the specified id. See the docs for `updateExitedValidatorsCountByStakingModule` for the + /// description of the overall update process. + /// + /// @param _stakingModuleId The id of the staking modules to be updated. + /// @param _nodeOperatorIds Ids of the node operators to be updated. + /// @param _exitedValidatorsCounts New counts of exited validators for the specified node operators. + function _reportStakingModuleOperatorExitedValidators( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _exitedValidatorsCounts + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _checkOperatorsReportData(_nodeOperatorIds, _exitedValidatorsCounts); + _stakingModuleId.getIStakingModule().updateExitedValidatorsCount(_nodeOperatorIds, _exitedValidatorsCounts); + } + + /// @notice Updates total numbers of exited validators for staking modules with the specified module ids. + /// @param _stakingModuleIds Ids of the staking modules to be updated. + /// @param _exitedValidatorsCounts New counts of exited validators for the specified staking modules. + /// @return The total increase in the aggregate number of exited validators across all updated modules. + /// + /// @dev The total numbers are stored in the staking router and can differ from the totals obtained by calling + /// `IStakingModule.getStakingModuleSummary()`. The overall process of updating validator counts is the following: + /// + /// 1. In the first data submission phase, the oracle calls `updateExitedValidatorsCountByStakingModule` on the + /// staking router, passing the totals by module. The staking router stores these totals and uses them to + /// distribute new stake and staking fees between the modules. There can only be single call of this function + /// per oracle reporting frame. + /// + /// 2. In the second part of the second data submission phase, the oracle calls + /// `StakingRouter.reportStakingModuleExitedValidatorsCountByNodeOperator` on the staking router which passes + /// the counts by node operator to the staking module by calling `IStakingModule.updateExitedValidatorsCount`. + /// This can be done multiple times for the same module, passing data for different subsets of node + /// operators. + /// + /// 3. At the end of the second data submission phase, it's expected for the aggregate exited validators count + /// across all module's node operators (stored in the module) to match the total count for this module + /// (stored in the staking router). However, it might happen that the second phase of data submission doesn't + /// finish until the new oracle reporting frame is started, in which case staking router will emit ISRBase.a warning + /// event `StakingModuleExitedValidatorsIncompleteReporting` when the first data submission phase is performed + /// for a new reporting frame. This condition will result in the staking module having an incomplete data about + /// the exited validator counts during the whole reporting frame. Handling this condition is + /// the responsibility of each staking module. + /// + /// 4. When the second reporting phase is finished, i.e. when the oracle submitted the complete data on the exited + /// validator counts per node operator for the current reporting frame, the oracle calls + /// `StakingRouter.onValidatorsCountsByNodeOperatorReportingFinished` which, in turn, calls + /// `IStakingModule.onExitedAndStuckValidatorsCountsUpdated` on all modules. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function _updateExitedValidatorsCountByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _exitedValidatorsCounts + ) public returns (uint256) { + uint256 n = _stakingModuleIds.length; + if (_exitedValidatorsCounts.length != n) revert ISRBase.ArraysLengthMismatch(); + + uint256 newlyExitedValidatorsCount; + + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + SRUtils._requireModuleIdExists(moduleId); + ModuleState storage state = moduleId.getModuleState(); + ModuleStateAccounting storage moduleAcc = state.accounting; + uint64 prevReportedExitedValidatorsCount = moduleAcc.exitedValidatorsCount; + + uint64 newReportedExitedValidatorsCount = SafeCast.toUint64(_exitedValidatorsCounts[i]); + + if (newReportedExitedValidatorsCount < prevReportedExitedValidatorsCount) { + revert ISRBase.ExitedValidatorsCountCannotDecrease(); + } + + (uint256 totalExitedValidators, uint256 totalDepositedValidators,) = + _getStakingModuleSummary(state.getIStakingModule()); + + if (newReportedExitedValidatorsCount > totalDepositedValidators) { + revert ISRBase.ReportedExitedValidatorsExceedDeposited( + newReportedExitedValidatorsCount, totalDepositedValidators + ); + } + + newlyExitedValidatorsCount += newReportedExitedValidatorsCount - prevReportedExitedValidatorsCount; + + if (totalExitedValidators < prevReportedExitedValidatorsCount) { + // not all of the exited validators were async reported to the module + unchecked { + emit ISRBase.StakingModuleExitedValidatorsIncompleteReporting( + moduleId, prevReportedExitedValidatorsCount - totalExitedValidators + ); + } + } + + // save new value + moduleAcc.exitedValidatorsCount = newReportedExitedValidatorsCount; + } + + return newlyExitedValidatorsCount; + } + + /// @notice Sets exited validators count for the given module and given node operator in that module + /// without performing critical safety checks, e.g. that exited validators count cannot decrease. + /// + /// Should only be used by the DAO in extreme cases and with sufficient precautions to correct invalid + /// data reported by the oracle committee due to a bug in the oracle daemon. + /// + /// @param _stakingModuleId Id of the staking module. + /// @param _nodeOperatorId Id of the node operator. + /// @param _triggerUpdateFinish Whether to call `onExitedAndStuckValidatorsCountsUpdated` on the module + /// after applying the corrections. + /// @param _correction See the docs for the `ValidatorsCountsCorrection` struct. + /// + /// @dev Reverts if the current numbers of exited validators of the module and node operator + /// don't match the supplied expected current values. + /// + /// @dev The function is restricted to the `UNSAFE_SET_EXITED_VALIDATORS_ROLE` role. + function _unsafeSetExitedValidatorsCount( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + bool _triggerUpdateFinish, + ValidatorsCountsCorrection calldata _correction + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + ModuleState storage state = _stakingModuleId.getModuleState(); + ModuleStateAccounting storage moduleAcc = state.accounting; + uint64 prevReportedExitedValidatorsCount = moduleAcc.exitedValidatorsCount; + IStakingModule stakingModule = state.getIStakingModule(); + + (,,,,, uint256 totalExitedValidators,,) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); + + if ( + _correction.currentModuleExitedValidatorsCount != prevReportedExitedValidatorsCount + || _correction.currentNodeOperatorExitedValidatorsCount != totalExitedValidators + ) { + revert ISRBase.UnexpectedCurrentValidatorsCount(prevReportedExitedValidatorsCount, totalExitedValidators); + } + + moduleAcc.exitedValidatorsCount = SafeCast.toUint64(_correction.newModuleExitedValidatorsCount); + + stakingModule.unsafeUpdateValidatorsCount(_nodeOperatorId, _correction.newNodeOperatorExitedValidatorsCount); + + (uint256 moduleTotalExitedValidators, uint256 moduleTotalDepositedValidators,) = + _getStakingModuleSummary(stakingModule); + + if (_correction.newModuleExitedValidatorsCount > moduleTotalDepositedValidators) { + revert ISRBase.ReportedExitedValidatorsExceedDeposited( + _correction.newModuleExitedValidatorsCount, moduleTotalDepositedValidators + ); + } + + if (_triggerUpdateFinish) { + if (moduleTotalExitedValidators != _correction.newModuleExitedValidatorsCount) { + revert ISRBase.UnexpectedFinalExitedValidatorsCount( + moduleTotalExitedValidators, _correction.newModuleExitedValidatorsCount + ); + } + + stakingModule.onExitedAndStuckValidatorsCountsUpdated(); + } + } + + /// @dev report MUST include all modules in the same order as they are registered in the SR + function _validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) public view { + uint256 n = SRStorage.getModulesCount(); + + if (_stakingModuleIds.length != n || _validatorBalancesGwei.length != n) { + revert ISRBase.ArraysLengthMismatch(); + } + + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (moduleId != _stakingModuleIds[i]) revert ISRBase.UnexpectedModuleId(moduleId, _stakingModuleIds[i]); + + SRUtils._ensureAmountGwei(_validatorBalancesGwei[i]); + } + } + + /// @dev report MUST include all modules in the same order as they are registered in the SR + function _reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) public { + _validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + + uint256 n = _stakingModuleIds.length; + uint64 totalValidatorsBalanceGwei; + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + ModuleStateAccounting storage moduleAcc = moduleId.getModuleState().accounting; + uint64 validatorsBalanceGwei = uint64(_validatorBalancesGwei[i]); + + moduleAcc.validatorsBalanceGwei = validatorsBalanceGwei; + + totalValidatorsBalanceGwei += validatorsBalanceGwei; + } + RouterStateAccounting storage routerAcc = SRStorage.getRouterState().accounting; + routerAcc.validatorsBalanceGwei = totalValidatorsBalanceGwei; + } + + /// @dev Save the last deposit state for the staking module + /// @param _moduleId id of the staking module to be deposited + function _updateModuleLastDepositState(uint256 _moduleId) public { + ModuleStateDeposits storage stateDeposits = _moduleId.getModuleState().deposits; + + stateDeposits.lastDepositAt = uint64(block.timestamp); + stateDeposits.lastDepositBlock = uint64(block.number); + } + + function _notifyStakingModulesOfWithdrawalCredentialsChange() public { + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + + try moduleId.getIStakingModule().onWithdrawalCredentialsChanged() {} + catch (bytes memory lowLevelRevertData) { + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + if (moduleId.getModuleState().config.status == StakingModuleStatus.Active) { + _setModuleStatus(moduleId, StakingModuleStatus.DepositsPaused); + } + emit ISRBase.WithdrawalsCredentialsChangeFailed(moduleId, lowLevelRevertData); + } + } + } + + function _checkOperatorsReportData(bytes calldata _ids, bytes calldata _values) internal pure { + if (_ids.length % 8 != 0 || _values.length % 16 != 0) { + revert ISRBase.InvalidReportData(3); + } + uint256 count = _ids.length / 8; + if (_values.length / 16 != count) { + revert ISRBase.InvalidReportData(2); + } + if (count == 0) { + revert ISRBase.InvalidReportData(1); + } + } +} diff --git a/contracts/0.8.25/sr/SRStorage.sol b/contracts/0.8.25/sr/SRStorage.sol new file mode 100644 index 0000000000..29ce99a053 --- /dev/null +++ b/contracts/0.8.25/sr/SRStorage.sol @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; +import {ModuleState, RouterState} from "./SRTypes.sol"; + +library SRStorage { + using EnumerableSet for EnumerableSet.UintSet; + using SRStorage for ModuleState; + + /// @dev RouterState storage position + bytes32 internal constant ROUTER_STORAGE_POSITION = keccak256( + abi.encode(uint256(keccak256(abi.encodePacked("lido.StakingRouter.routerStorage"))) - 1) + ) & ~bytes32(uint256(0xff)); + + /// @dev get RouterState storage reference + function getRouterState() internal pure returns (RouterState storage $) { + bytes32 _position = ROUTER_STORAGE_POSITION; + assembly ("memory-safe") { + $.slot := _position + } + } + + /** + * Module state helpers + */ + + function getModuleState(uint256 _moduleId) internal view returns (ModuleState storage) { + return getRouterState().moduleStates[_moduleId]; + } + + function getIStakingModule(ModuleState storage $) internal view returns (IStakingModule) { + return IStakingModule($.config.moduleAddress); + } + + function getIStakingModuleV2(ModuleState storage $) internal view returns (IStakingModuleV2) { + return IStakingModuleV2($.config.moduleAddress); + } + + function getIStakingModule(uint256 _moduleId) internal view returns (IStakingModule) { + return getModuleState(_moduleId).getIStakingModule(); + } + + function getIStakingModuleV2(uint256 _moduleId) internal view returns (IStakingModuleV2) { + return getModuleState(_moduleId).getIStakingModuleV2(); + } + + /** + * ModuleIds set helpers + */ + + function getModulesCount() internal view returns (uint256) { + return getRouterState().moduleIds.length(); + } + + function getModuleIds() internal view returns (uint256[] memory) { + return getRouterState().moduleIds.values(); + } + + function getModuleIdAt(uint256 _idx) internal view returns (uint256) { + return getRouterState().moduleIds.at(_idx); + } + + function isModuleExists(uint256 _moduleId) internal view returns (bool) { + return getRouterState().moduleIds.contains(_moduleId); + } + + /// @notice get module inner position in the list of modules (1-based) + /// @dev direct access to EnumerableSet internal storage + function getModuleIdInnerPosition(uint256 _moduleId) internal view returns (uint256) { + return getRouterState().moduleIds._inner._positions[bytes32(_moduleId)]; + } + + function addModuleId(uint256 _moduleId) internal { + getRouterState().moduleIds.add(_moduleId); + } +} diff --git a/contracts/0.8.25/sr/SRTypes.sol b/contracts/0.8.25/sr/SRTypes.sol new file mode 100644 index 0000000000..92bd52fbb8 --- /dev/null +++ b/contracts/0.8.25/sr/SRTypes.sol @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; + +/** + * @title StakingRouter shared types + * @author KRogLA + */ + +interface ILido { + function getDepositableEther() external view returns (uint256); + function withdrawDepositableEther(uint256 _amount, uint256 _seedDepositsCount) external; +} + +interface IAccountingOracle { + ///@dev returns a tuple instead of a structure to avoid allocating memory + function getProcessingState() + external + view + returns ( + uint256 currentFrameRefSlot, + uint256 processingDeadlineTime, + bytes32 mainDataHash, + bool mainDataSubmitted, + bytes32 extraDataHash, + uint256 extraDataFormat, + bool extraDataSubmitted, + uint256 extraDataItemsCount, + uint256 extraDataItemsSubmitted + ); + function getLastProcessingRefSlot() external view returns (uint256); +} + +/// @dev Since `enum` is `uint8` by nature, so the `status` is stored as `uint8` to avoid +/// possible problems when upgrading. But for human readability, we use `enum` as +/// function parameter type. More about conversion in the docs: +/// https://docs.soliditylang.org/en/v0.8.17/types.html#enums +enum StakingModuleStatus { + Active, // deposits and rewards allowed + DepositsPaused, // deposits NOT allowed, rewards allowed + Stopped // deposits and rewards NOT allowed +} + +/// @notice Configuration parameters for a staking module. +/// @dev Used when adding or updating a staking module to set operational limits, fee parameters, +/// and withdrawal credential type. +struct StakingModuleConfig { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + uint256 stakeShareLimit; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + uint256 priorityExitShareThreshold; + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + uint256 stakingModuleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + uint256 treasuryFee; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + uint256 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + uint256 minDepositBlockDistance; + /// @notice Withdrawal credentials type (0x01/0x02) + uint256 withdrawalCredentialsType; +} + +/// @dev old data struct, kept for backward compatibility +struct StakingModule { + /// @notice Unique id of the staking module. + uint24 id; + /// @notice Address of the staking module. + address stakingModuleAddress; + /// @notice Part of the fee taken from staking rewards that goes to the staking module. + uint16 stakingModuleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury. + uint16 treasuryFee; + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Formerly known as `targetShare`. + uint16 stakeShareLimit; + /// @notice Staking module status if staking module can not accept the deposits or can + /// participate in further reward distribution. + uint8 status; + /// @notice Name of the staking module. + string name; + /// @notice block.timestamp of the last deposit of the staking module. + /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositAt; + /// @notice block.number of the last deposit of the staking module. + /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. + uint256 lastDepositBlock; + /// @notice Number of exited validators. + uint256 exitedValidatorsCount; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + uint16 priorityExitShareThreshold; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. + uint64 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). + uint64 minDepositBlockDistance; + /// @notice Withdrawal credentials type (0x01/0x02) + uint8 withdrawalCredentialsType; + /// @notice total actual balance of validators for module in Gwei. + uint64 validatorsBalanceGwei; + } + +/// @dev 1 storage slot +struct ModuleStateConfig { + /// @notice Address of the staking module. + address moduleAddress; + /// @notice Part of the fee taken from staking rewards that goes to the staking module. + uint16 moduleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury. + uint16 treasuryFee; + /// @notice Maximum stake share that can be allocated to a module, in BP. + uint16 stakeShareLimit; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + uint16 priorityExitShareThreshold; + /// @notice Staking module status if staking module can not accept the deposits or can + /// participate in further reward distribution. + StakingModuleStatus status; + /// @notice Withdrawal credentials type (0x01/0x02) + uint8 withdrawalCredentialsType; + // uint8 _reserved; + // uint8 _reserved; +} + +/// @dev 1 storage slot +struct ModuleStateDeposits { + /// @notice block.timestamp of the last deposit of the staking module. + /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositAt; + /// @notice block.number of the last deposit of the staking module. + /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositBlock; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. + uint64 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). + uint64 minDepositBlockDistance; +} + +/// @dev 1 storage slot +struct ModuleStateAccounting { + /// @notice total actual balance of validators for module in Gwei. + uint64 validatorsBalanceGwei; + /// @notice Cumulative number of exited validators for module + uint64 exitedValidatorsCount; + // uint64 _reserved; + // uint64 _reserved; +} + +struct RouterStateAccounting { + /// @notice total actual balance of validators in Gwei. + uint64 validatorsBalanceGwei; + // uint64 _reserved; + // uint64 _reserved; + // uint64 _reserved; +} + +struct ModuleState { + /// @notice module config data + ModuleStateConfig config; // slot 0 + /// @notice deposits state data + ModuleStateDeposits deposits; // slot 1 + /// @notice accounting state data + ModuleStateAccounting accounting; // slot 2 + /// @notice Name of the staking module. + string name; // slot 3 +} + +struct RouterState { + // moduleId => ModuleState + mapping(uint256 => ModuleState) moduleStates; // slot 0 + EnumerableSet.UintSet moduleIds; // slot 1 + RouterStateAccounting accounting; // slot 2 + bytes32 withdrawalCredentials; // slot 3 + uint24 lastModuleId; +} + +/// @notice A summary of the staking module's validators. +struct StakingModuleSummary { + /// @notice The total number of validators in the EXITED state on the Consensus Layer. + /// @dev This value can't decrease in normal conditions. + uint256 totalExitedValidators; + /// @notice The total number of validators deposited via the official Deposit Contract. + /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this + /// counter is not decreasing. + uint256 totalDepositedValidators; + /// @notice The number of validators in the set available for deposit + uint256 depositableValidatorsCount; +} + +/// @notice A summary of node operator and its validators. +/// @dev old data struct, kept for backward compatibility +struct NodeOperatorSummary { + /// @notice Shows whether the current target limit applied to the node operator. + uint256 targetLimitMode; + /// @notice Relative target active validators limit for operator. + uint256 targetValidatorsCount; + /// @notice The number of validators with an expired request to exit time. + /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. + uint256 stuckValidatorsCount; + /// @notice The number of validators that can't be withdrawn, but deposit costs were + /// compensated to the Lido by the node operator. + /// @dev [deprecated] Refunded validators processing has been removed, this field is no longer used. + uint256 refundedValidatorsCount; + /// @notice A time when the penalty for stuck validators stops applying to node operator rewards. + /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. + uint256 stuckPenaltyEndTimestamp; + /// @notice The total number of validators in the EXITED state on the Consensus Layer. + /// @dev This value can't decrease in normal conditions. + uint256 totalExitedValidators; + /// @notice The total number of validators deposited via the official Deposit Contract. + /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this + /// counter is not decreasing. + uint256 totalDepositedValidators; + /// @notice The number of validators in the set available for deposit. + uint256 depositableValidatorsCount; +} + +/// @notice A collection of the staking module data stored across the StakingRouter and the +/// staking module contract. +/// +/// @dev This data, first of all, is designed for off-chain usage and might be redundant for +/// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. +struct StakingModuleDigest { + /// @notice The number of node operators registered in the staking module. + uint256 nodeOperatorsCount; + /// @notice The number of node operators registered in the staking module in active state. + uint256 activeNodeOperatorsCount; + /// @notice The current state of the staking module taken from the StakingRouter. + StakingModule state; + /// @notice A summary of the staking module's validators. + StakingModuleSummary summary; +} + +/// @notice A collection of the node operator data stored in the staking module. +/// @dev This data, first of all, is designed for off-chain usage and might be redundant for +/// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. +struct NodeOperatorDigest { + /// @notice Id of the node operator. + uint256 id; + /// @notice Shows whether the node operator is active or not. + bool isActive; + /// @notice A summary of node operator and its validators. + NodeOperatorSummary summary; +} + +struct ValidatorsCountsCorrection { + /// @notice The expected current number of exited validators of the module that is + /// being corrected. + uint256 currentModuleExitedValidatorsCount; + /// @notice The expected current number of exited validators of the node operator + /// that is being corrected. + uint256 currentNodeOperatorExitedValidatorsCount; + /// @notice The corrected number of exited validators of the module. + uint256 newModuleExitedValidatorsCount; + /// @notice The corrected number of exited validators of the node operator. + uint256 newNodeOperatorExitedValidatorsCount; +} + +struct ValidatorExitData { + uint256 stakingModuleId; + uint256 nodeOperatorId; + bytes pubkey; +} diff --git a/contracts/0.8.25/sr/SRUtils.sol b/contracts/0.8.25/sr/SRUtils.sol new file mode 100644 index 0000000000..6b96f26663 --- /dev/null +++ b/contracts/0.8.25/sr/SRUtils.sol @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {SRStorage} from "./SRStorage.sol"; +import {ModuleState} from "./SRTypes.sol"; +import {ISRBase} from "./ISRBase.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; + +/** + * @title StakingRouter utility functions + * @author KRogLA + */ +library SRUtils { + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + + uint256 public constant TOTAL_BASIS_POINTS = 10000; + uint256 public constant MAX_STAKING_MODULES_COUNT = 32; + /// @dev Restrict the name size with 31 bytes to storage in a single slot. + uint256 public constant MAX_STAKING_MODULE_NAME_LENGTH = 31; + + /// @dev Large enough to fit all existing Ether per entity, yet overflow-safe when aggregating a reasonable number of entities + uint256 internal constant MAX_VALUE_GWEI = 1_000_000_000 ether / 1 gwei; // i.e. 1B ETH + + /** + * Validation + */ + + function _requireNotZero(uint256 _value) internal pure { + if (_value == 0) revert ISRBase.ZeroArgument(); + } + + function _requireNotZero(address _address) internal pure { + if (_address == address(0)) revert ISRBase.ZeroAddress(); + } + + function _requireWCTypeValid(uint256 _wcType) internal pure { + if (!WithdrawalCredentials.isTypeValid(_wcType)) revert ISRBase.WrongWithdrawalCredentialsType(); + } + + function _requireWCType2(uint256 _wcType) internal pure { + if (!WithdrawalCredentials.isType2(_wcType)) revert ISRBase.WrongWithdrawalCredentialsType(); + } + + function _requireModuleIdExists(uint256 _moduleId) internal view { + if (!SRStorage.isModuleExists(_moduleId)) revert ISRBase.StakingModuleUnregistered(); + } + + /** + * Module helpers + */ + + /// @dev will cause an overflow error if moduleId does not exist + /// @param moduleId module id + /// @return module index in the list of modules (0-based) + function _getModuleIndexById(uint256 moduleId) internal view returns (uint256) { + /// @dev convert from 1-based position + return SRStorage.getModuleIdInnerPosition(moduleId) - 1; + } + + /// @dev get validators (active) balance of the module in ETH (wei) + function _getModuleValidatorsBalance(uint256 moduleId) internal view returns (uint256) { + return _fromGwei(moduleId.getModuleState().accounting.validatorsBalanceGwei); + } + + /// @dev get total validators (active) balance of all modules in ETH + function _getTotalModulesValidatorsBalance() internal view returns (uint256) { + return _fromGwei(SRStorage.getRouterState().accounting.validatorsBalanceGwei); + } + + /** + * Amount helpers + */ + + /// @dev checks if the amount not exceeds a reasonable limit and converts it to uint64 + /// @param amountGwei checked amount in gwei + /// @return validated amount in gwei as uint64 + function _ensureAmountGwei(uint256 amountGwei) internal pure returns (uint64) { + if (amountGwei > MAX_VALUE_GWEI) { + revert ISRBase.InvalidAmountGwei(); + } + return uint64(amountGwei); + } + + /// @dev converts amount from wei to gwei + function _toGwei(uint256 amount) internal pure returns (uint64) { + return _ensureAmountGwei(amount / 1 gwei); + } + + /// @dev converts amount from gwei to wei + /// @dev skip _ensureAmountGwei for the input amount due to using the method only as a reverse + /// conversion to values saved via _toGwei + function _fromGwei(uint256 amount) internal pure returns (uint256) { + return amount * 1 gwei; + } +} diff --git a/contracts/0.8.25/sr/StakingRouter.sol b/contracts/0.8.25/sr/StakingRouter.sol new file mode 100644 index 0000000000..7690f03516 --- /dev/null +++ b/contracts/0.8.25/sr/StakingRouter.sol @@ -0,0 +1,1158 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {Math} from "@openzeppelin/contracts-v5.2/utils/math/Math.sol"; +import { + AccessControlEnumerableUpgradeable, + EnumerableSet +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; +import {BeaconChainDepositor, IDepositContract} from "contracts/0.8.25/lib/BeaconChainDepositor.sol"; +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; +import {SRLib} from "./SRLib.sol"; +import {SRStorage} from "./SRStorage.sol"; +import {SRUtils} from "./SRUtils.sol"; +import {ISRBase} from "./ISRBase.sol"; + +import { + ModuleState, + StakingModuleStatus, + StakingModuleConfig, + ValidatorsCountsCorrection, + ValidatorExitData, + StakingModule, + StakingModuleSummary, + NodeOperatorSummary, + StakingModuleDigest, + NodeOperatorDigest, + ModuleStateConfig, + ModuleStateDeposits, + ModuleStateAccounting, + ILido +} from "./SRTypes.sol"; + +contract StakingRouter is ISRBase, AccessControlEnumerableUpgradeable { + using WithdrawalCredentials for bytes32; + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + using EnumerableSet for EnumerableSet.AddressSet; + + /// @dev ACL roles + bytes32 public constant MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = keccak256("MANAGE_WITHDRAWAL_CREDENTIALS_ROLE"); + bytes32 public constant STAKING_MODULE_MANAGE_ROLE = keccak256("STAKING_MODULE_MANAGE_ROLE"); + bytes32 public constant STAKING_MODULE_SHARE_MANAGE_ROLE = keccak256("STAKING_MODULE_SHARE_MANAGE_ROLE"); + bytes32 public constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); + bytes32 public constant REPORT_EXITED_VALIDATORS_ROLE = keccak256("REPORT_EXITED_VALIDATORS_ROLE"); + bytes32 public constant REPORT_VALIDATOR_EXITING_STATUS_ROLE = keccak256("REPORT_VALIDATOR_EXITING_STATUS_ROLE"); + bytes32 public constant REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = keccak256("REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE"); + bytes32 public constant UNSAFE_SET_EXITED_VALIDATORS_ROLE = keccak256("UNSAFE_SET_EXITED_VALIDATORS_ROLE"); + bytes32 public constant REPORT_REWARDS_MINTED_ROLE = keccak256("REPORT_REWARDS_MINTED_ROLE"); + + uint256 public constant FEE_PRECISION_POINTS = 10 ** 20; // 100 * 10 ** 18 + uint64 internal constant PUBKEY_LENGTH = 48; + + IDepositContract public immutable DEPOSIT_CONTRACT; + ILido public immutable LIDO; + ILidoLocator public immutable LIDO_LOCATOR; + + /// @notice Max Effective Balance for Withdrawal Credentials types + /// @dev for Ethereum chain: 32 ether and 2048 ether + uint256 public immutable MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + uint256 public immutable MAX_EFFECTIVE_BALANCE_WC_TYPE_02; + + /// @dev backward-compatible getter for a constant moved to a shared library + function INITIAL_DEPOSIT_SIZE() external view returns (uint256) { + return MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function TOTAL_BASIS_POINTS() external pure returns (uint256) { + return SRUtils.TOTAL_BASIS_POINTS; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function MAX_STAKING_MODULES_COUNT() external pure returns (uint256) { + return SRUtils.MAX_STAKING_MODULES_COUNT; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function MAX_STAKING_MODULE_NAME_LENGTH() external pure returns (uint256) { + return SRUtils.MAX_STAKING_MODULE_NAME_LENGTH; + } + + constructor( + address _depositContract, + address _lido, + address _lidoLocator, + uint256 _maxEBType1, + uint256 _maxEBType2 + ) { + SRUtils._requireNotZero(_depositContract); + SRUtils._requireNotZero(_lido); + SRUtils._requireNotZero(_lidoLocator); + + DEPOSIT_CONTRACT = IDepositContract(_depositContract); + LIDO = ILido(_lido); + LIDO_LOCATOR = ILidoLocator(_lidoLocator); + + SRUtils._requireNotZero(_maxEBType1); + SRUtils._requireNotZero(_maxEBType2); + MAX_EFFECTIVE_BALANCE_WC_TYPE_01 = _maxEBType1; + MAX_EFFECTIVE_BALANCE_WC_TYPE_02 = _maxEBType2; + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param _admin Lido DAO Aragon agent contract address. + /// @param _withdrawalCredentials 0x01 credentials to withdraw ETH on Consensus Layer side. + /// @dev Proxy initialization method. + function initialize(address _admin, bytes32 _withdrawalCredentials) external reinitializer(4) { + if (_admin == address(0)) revert ZeroAddress(); + + _grantRole(DEFAULT_ADMIN_ROLE, _admin); + _setWithdrawalCredentials(_withdrawalCredentials); + } + + /// @notice A function to migrate upgrade to v4 (from v3) and use OpenZeppelin versioning. + function finalizeUpgrade_v4() external reinitializer(4) { + // migrate current modules to new storage + SRLib._migrateStorage(MAX_EFFECTIVE_BALANCE_WC_TYPE_01); + + /// @dev migrate OZ roles + /// Due to OZ 5.2 AccessControl uses ERC-7201 namespaced storage at different slots we should + /// migrate roles from old storage to new one. + /// We use only _roleMembers mapping and safely ignore the second mapping _roles, because + /// both mappings are updated atomically, so we only need one. + + // pre upgrade roles + bytes32[9] memory roles = [ + DEFAULT_ADMIN_ROLE, + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE, + STAKING_MODULE_MANAGE_ROLE, + STAKING_MODULE_UNVETTING_ROLE, + REPORT_EXITED_VALIDATORS_ROLE, + REPORT_VALIDATOR_EXITING_STATUS_ROLE, + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE, + UNSAFE_SET_EXITED_VALIDATORS_ROLE, + REPORT_REWARDS_MINTED_ROLE + ]; + + EnumerableSet.AddressSet storage members; + for (uint256 i = 0; i < roles.length; ++i) { + bytes32 role = roles[i]; + members = _getStorageRoleMembersOld()[role]; + for (uint256 j; j < members.length(); ++j) { + _grantRole(role, members.at(j)); + } + } + } + + /// @dev Helper for migration - returns OZ AccessControlEnumerable _roleMembers mapping storage reference + function _getStorageRoleMembersOld() private pure returns (mapping(bytes32 => EnumerableSet.AddressSet) storage $) { + /// @dev Old _roleMembers storage slot. + bytes32 position = keccak256("openzeppelin.AccessControlEnumerable._roleMembers"); + assembly ("memory-safe") { + $.slot := position + } + } + + /// @dev Prohibit direct transfer to contract. + receive() external payable { + revert DirectETHTransfer(); + } + + /// @notice Registers a new staking module. + /// @param _name Name of staking module. + /// @param _stakingModuleAddress Address of staking module. + /// @param _stakingModuleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function addStakingModule( + string calldata _name, + address _stakingModuleAddress, + StakingModuleConfig calldata _stakingModuleConfig + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + uint256 newModuleId = SRLib._addModule(_stakingModuleAddress, _name, _stakingModuleConfig); + + /// @dev Simulate last deposit state to prevent real deposits into the new ModuleState via + /// DepositSecurityModule just after the addition. + _updateModuleLastDepositState(newModuleId, 0); + } + + /// @notice Updates staking module params. + /// @param _stakingModuleId Staking module id. + // @param _stakingModuleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateStakingModule( + uint256 _stakingModuleId, + uint256 _stakeShareLimit, + uint256 _priorityExitShareThreshold, + uint256 _stakingModuleFee, + uint256 _treasuryFee, + uint256 _maxDepositsPerBlock, + uint256 _minDepositBlockDistance + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + SRUtils._requireModuleIdExists(_stakingModuleId); + SRLib._updateModuleParams( + _stakingModuleId, + _stakeShareLimit, + _priorityExitShareThreshold, + _stakingModuleFee, + _treasuryFee, + _maxDepositsPerBlock, + _minDepositBlockDistance + ); + } + + /// @notice Updates fees for all staking modules in a single atomic operation. + /// @param _stakingModuleFees New staking module fee values in the current module iteration order (returned by `getStakingModuleIds()`). + /// @param _treasuryFees New treasury fee values in the current module iteration order. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateAllStakingModulesFees(uint256[] calldata _stakingModuleFees, uint256[] calldata _treasuryFees) + external + onlyRole(STAKING_MODULE_MANAGE_ROLE) + { + SRLib._updateAllModuleFees(_stakingModuleFees, _treasuryFees); + } + + /// @notice Updates staking module share params. + /// @param _stakingModuleId Staking module id. + /// @param _stakeShareLimit New stake share limit value. + /// @param _priorityExitShareThreshold New priority exit share threshold value. + /// @dev The function is restricted to the `STAKING_MODULE_SHARE_MANAGE_ROLE` role. + function updateModuleShares(uint256 _stakingModuleId, uint16 _stakeShareLimit, uint16 _priorityExitShareThreshold) + external + onlyRole(STAKING_MODULE_SHARE_MANAGE_ROLE) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + SRLib._updateModuleShares(_stakingModuleId, _stakeShareLimit, _priorityExitShareThreshold); + } + + /// @notice Updates the limit of the validators that can be used for deposit. + /// @param _stakingModuleId Id of the staking module. + /// @param _nodeOperatorId Id of the node operator. + /// @param _targetLimitMode Target limit mode. + /// @param _targetLimit Target limit of the node operator. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateTargetValidatorsLimits( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _targetLimitMode, + uint256 _targetLimit + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + SRUtils._requireModuleIdExists(_stakingModuleId); + _stakingModuleId.getIStakingModule() + .updateTargetValidatorsLimits(_nodeOperatorId, _targetLimitMode, _targetLimit); + } + + /// @dev See {SRLib._reportRewardsMinted}. + /// + /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. + function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) + external + onlyRole(REPORT_REWARDS_MINTED_ROLE) + { + SRLib._reportRewardsMinted(_stakingModuleIds, _totalShares); + } + + /// @dev See {SRLib._updateExitedValidatorsCountByStakingModule}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function updateExitedValidatorsCountByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _exitedValidatorsCounts + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) returns (uint256) { + return SRLib._updateExitedValidatorsCountByStakingModule(_stakingModuleIds, _exitedValidatorsCounts); + } + + /// @dev The function is restricted to the same role as `updateExitedValidatorsCountByStakingModule`, + /// i.e. `REPORT_EXITED_VALIDATORS_ROLE` role. + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._reportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + } + + /// @notice Validates a validator balances report against the current StakingRouter module set and limits. + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view { + SRLib._validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + } + + /// @dev See {SRLib._reportStakingModuleOperatorExitedValidators}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function reportStakingModuleExitedValidatorsCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _exitedValidatorsCounts + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._reportStakingModuleOperatorExitedValidators(_stakingModuleId, _nodeOperatorIds, _exitedValidatorsCounts); + } + + /// @dev DEPRECATED + /// @dev See {SRLib._unsafeSetExitedValidatorsCount}. + function unsafeSetExitedValidatorsCount( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + bool _triggerUpdateFinish, + ValidatorsCountsCorrection calldata _correction + ) external onlyRole(UNSAFE_SET_EXITED_VALIDATORS_ROLE) { + SRLib._unsafeSetExitedValidatorsCount(_stakingModuleId, _nodeOperatorId, _triggerUpdateFinish, _correction); + } + + /// @dev See {SRLib._onValidatorsCountsByNodeOperatorReportingFinished}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function onValidatorsCountsByNodeOperatorReportingFinished() external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._onValidatorsCountsByNodeOperatorReportingFinished(); + } + + /// @dev See {SRLib._decreaseStakingModuleVettedKeysCountByNodeOperator}. + /// + /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. + function decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external onlyRole(STAKING_MODULE_UNVETTING_ROLE) { + SRLib._decreaseStakingModuleVettedKeysCountByNodeOperator( + _stakingModuleId, _nodeOperatorIds, _vettedSigningKeysCounts + ); + } + + /// @dev See {SRLib._reportValidatorExitDelay}. + function reportValidatorExitDelay( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) external onlyRole(REPORT_VALIDATOR_EXITING_STATUS_ROLE) { + SRLib._reportValidatorExitDelay( + _stakingModuleId, _nodeOperatorId, _proofSlotTimestamp, _publicKey, _eligibleToExitInSec + ); + } + + /// @dev See {SRLib._onValidatorExitTriggered}. + function onValidatorExitTriggered( + ValidatorExitData[] calldata validatorExitData, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) external onlyRole(REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE) { + SRLib._onValidatorExitTriggered(validatorExitData, _withdrawalRequestPaidFee, _exitType); + } + + /// @notice Returns all registered staking modules. + /// @return moduleStates Array of staking modules. + function getStakingModules() external view returns (StakingModule[] memory) { + uint256 modulesCount = SRStorage.getModulesCount(); + StakingModule[] memory moduleStates = new StakingModule[](modulesCount); + + for (uint256 i; i < modulesCount; ++i) { + moduleStates[i] = _getModuleStateCompat(SRStorage.getModuleIdAt(i)); + } + return moduleStates; + } + + /// @notice Returns state for staking modules. + /// @param _stakingModuleId Id of the staking module. + /// @return stateConfig staking modules config state + function getStakingModuleStateConfig(uint256 _stakingModuleId) + external + view + returns (ModuleStateConfig memory stateConfig) + { + (, stateConfig) = _getModuleState(_stakingModuleId); + } + + function getStakingModuleStateDeposits(uint256 _stakingModuleId) + external + view + returns (ModuleStateDeposits memory stateDeposits) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + stateDeposits = state.deposits; + } + + function getStakingModuleStateAccounting(uint256 _stakingModuleId) + external + view + returns (uint64 validatorsBalanceGwei, uint64 exitedValidatorsCount) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + ModuleStateAccounting memory moduleAcc = state.accounting; + return (moduleAcc.validatorsBalanceGwei, moduleAcc.exitedValidatorsCount); + } + + /// @notice Returns the ids of all registered staking modules. + /// @return stakingModuleIds Array of staking module ids. + function getStakingModuleIds() external view returns (uint256[] memory) { + return SRStorage.getModuleIds(); + } + + /// @notice Returns the staking module by its id. + /// @param _stakingModuleId Id of the staking module. + /// @return moduleState Staking module data. + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getModuleStateCompat(_stakingModuleId); + } + + /// @notice Returns total number of staking modules. + /// @return Total number of staking modules. + function getStakingModulesCount() external view returns (uint256) { + return SRStorage.getModulesCount(); + } + + /// @notice Returns true if staking module with the given id was registered via `addStakingModule`, false otherwise. + /// @param _stakingModuleId Id of the staking module. + /// @return True if staking module with the given id was registered, false otherwise. + function hasStakingModule(uint256 _stakingModuleId) public view returns (bool) { + return SRStorage.isModuleExists(_stakingModuleId); + } + + /// @notice Returns status of staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Status of the staking module. + function getStakingModuleStatus(uint256 _stakingModuleId) public view returns (StakingModuleStatus) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _stakingModuleId.getModuleState().config.status; + } + + function getContractVersion() external view returns (uint256) { + return _getInitializedVersion(); + } + + /// @notice Returns all-validators summary in the staking module. + /// @param _stakingModuleId Id of the staking module to return summary for. + /// @return summary Staking module summary. + function getStakingModuleSummary(uint256 _stakingModuleId) + external + view + returns (StakingModuleSummary memory summary) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getStakingModuleSummaryStruct(_stakingModuleId); + } + + /// @notice Returns node operator summary from the staking module. + /// @param _stakingModuleId Id of the staking module where node operator is onboarded. + /// @param _nodeOperatorId Id of the node operator to return summary for. + /// @return summary Node operator summary. + function getNodeOperatorSummary(uint256 _stakingModuleId, uint256 _nodeOperatorId) + external + view + returns (NodeOperatorSummary memory summary) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getNodeOperatorSummary(_stakingModuleId.getIStakingModule(), _nodeOperatorId); + } + + /// @notice Returns staking module digest for each staking module registered in the staking router. + /// @return Array of staking module digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getAllStakingModuleDigests() external view returns (StakingModuleDigest[] memory) { + return getStakingModuleDigests(SRStorage.getModuleIds()); + } + + /// @notice Returns staking module digest for passed staking module ids. + /// @param _stakingModuleIds Ids of the staking modules to return data for. + /// @return digests Array of staking module digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getStakingModuleDigests(uint256[] memory _stakingModuleIds) + public + view + returns (StakingModuleDigest[] memory digests) + { + digests = new StakingModuleDigest[](_stakingModuleIds.length); + + for (uint256 i = 0; i < _stakingModuleIds.length; ++i) { + uint256 stakingModuleId = _stakingModuleIds[i]; + SRUtils._requireModuleIdExists(stakingModuleId); + IStakingModule stakingModule = stakingModuleId.getIStakingModule(); + + digests[i].nodeOperatorsCount = _getStakingModuleNodeOperatorsCount(stakingModule); + digests[i].activeNodeOperatorsCount = _getStakingModuleActiveNodeOperatorsCount(stakingModule); + digests[i].state = _getModuleStateCompat(stakingModuleId); + digests[i].summary = _getStakingModuleSummaryStruct(stakingModuleId); + } + } + + /// @notice Returns node operator digest for each node operator registered in the given staking module. + /// @param _stakingModuleId Id of the staking module to return data for. + /// @return Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getAllNodeOperatorDigests(uint256 _stakingModuleId) external view returns (NodeOperatorDigest[] memory) { + return getNodeOperatorDigests( + _stakingModuleId, 0, _getStakingModuleNodeOperatorsCount(_stakingModuleId.getIStakingModule()) + ); + } + + /// @notice Returns node operator digest for passed node operator ids in the given staking module. + /// @param _stakingModuleId Id of the staking module where node operators registered. + /// @param _offset Node operators offset starting with 0. + /// @param _limit The max number of node operators to return. + /// @return Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getNodeOperatorDigests(uint256 _stakingModuleId, uint256 _offset, uint256 _limit) + public + view + returns (NodeOperatorDigest[] memory) + { + return getNodeOperatorDigests( + _stakingModuleId, _getStakingModuleNodeOperatorIds(_stakingModuleId.getIStakingModule(), _offset, _limit) + ); + } + + /// @notice Returns node operator digest for a slice of node operators registered in the given + /// staking module. + /// @param _stakingModuleId Id of the staking module where node operators registered. + /// @param _nodeOperatorIds Ids of the node operators to return data for. + /// @return digests Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getNodeOperatorDigests(uint256 _stakingModuleId, uint256[] memory _nodeOperatorIds) + public + view + returns (NodeOperatorDigest[] memory digests) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + digests = new NodeOperatorDigest[](_nodeOperatorIds.length); + for (uint256 i = 0; i < _nodeOperatorIds.length; ++i) { + uint256 nodeOperatorId = _nodeOperatorIds[i]; + IStakingModule stakingModule = _stakingModuleId.getIStakingModule(); + + digests[i].id = nodeOperatorId; + digests[i].isActive = _getStakingModuleNodeOperatorIsActive(stakingModule, nodeOperatorId); + digests[i].summary = _getNodeOperatorSummary(stakingModule, nodeOperatorId); + } + } + + /// @notice Sets the staking module status flag for participation in further deposits and/or reward distribution. + /// @param _stakingModuleId Id of the staking module to be updated. + /// @param _status New status of the staking module. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) + external + onlyRole(STAKING_MODULE_MANAGE_ROLE) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + if (!SRLib._setModuleStatus(_stakingModuleId, _status)) revert StakingModuleStatusTheSame(); + } + + /// @notice Returns whether the staking module is stopped. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the staking module is stopped, false otherwise. + function getStakingModuleIsStopped(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Stopped; + } + + /// @notice Returns whether the deposits are paused for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the deposits are paused, false otherwise. + function getStakingModuleIsDepositsPaused(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.DepositsPaused; + } + + /// @notice Returns whether the staking module is active. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the staking module is active, false otherwise. + function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Active; + } + + /// @notice Returns staking module nonce. + /// @param _stakingModuleId Id of the staking module. + /// @return Staking module nonce. + function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _stakingModuleId.getIStakingModule().getNonce(); + } + + /// @notice Returns the last deposit block for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Last deposit block for the staking module. + function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.lastDepositBlock; + } + + /// @notice Returns the min deposit block distance for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Min deposit block distance for the staking module. + function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.minDepositBlockDistance; + } + + /// @notice Returns the max deposits count per block for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Max deposits count per block for the staking module. + function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.maxDepositsPerBlock; + } + + /// @notice Returns active validators count for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return activeValidatorsCount Active validators count for the staking module. + function getStakingModuleActiveValidatorsCount(uint256 _stakingModuleId) + external + view + returns (uint256 activeValidatorsCount) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + (uint256 totalExitedValidators, uint256 totalDepositedValidators,) = _getStakingModuleSummary(_stakingModuleId); + + activeValidatorsCount = + totalDepositedValidators - Math.max(state.accounting.exitedValidatorsCount, totalExitedValidators); + } + + /// @notice Returns withdrawal credentials type + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @return withdrawal credentials: 0x01... - for Legacy modules, 0x02... - for New modules + function getStakingModuleWithdrawalCredentials(uint256 _stakingModuleId) external view returns (bytes32) { + (, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + return _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + } + + /// @notice Returns the max count of deposits which the staking module can provide data for based + /// on the passed `_maxDepositsValue` amount. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _maxDepositsValue Max amount of ether that might be used for deposits count calculation. + /// @return Max number of deposits might be done using the given staking module. + function getStakingModuleMaxDepositsCount(uint256 _stakingModuleId, uint256 _maxDepositsValue) + public + view + returns (uint256) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + // If module is not active, then it capacity is 0, so stakingModuleDepositableEthAmount will be 0. + // Module capacity is calculated based on the depositableValidatorsCount (from getStakingModuleSummary), so + // stakingModuleDepositableEthAmount is already capped by the module capacity and represents the max ETH amount possible to deposit. + return + _getModuleDepositAllocation(_stakingModuleId, _maxDepositsValue, false) / MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) + && _stakingModuleId.getModuleState().config.status == StakingModuleStatus.Active; + } + + /** + * @notice A payable function for depositable eth acquisition. Can be called only by `Lido` + */ + function receiveDepositableEther() external payable { + _checkAppAuth(address(LIDO)); + + emit DepositableEthReceived(msg.value); + } + + /// @notice Method performs top-up calls to the official Deposit contract. Determines how much Lido buffered ether can be deposited + /// to the staking module, obtains keys from the staking module with exact allocation for each key, pulls ether from Lido, + /// and performs the top-up call. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _keyIndices List of keys' indices + /// @param _operatorIds List of operator indices + /// @param _pubkeys List of validator public keys to top up + /// @param _topUpLimits Maximum amount (in wei) that can be deposited per key based on CL data and TopUpGateway logic + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external { + _checkAppAuth(_getTopUpGateway()); + _validateTopUpInputs(_keyIndices, _operatorIds, _topUpLimits, _pubkeys); + + (, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + + if (stateConfig.status != StakingModuleStatus.Active) revert StakingModuleNotActive(); + + /// @dev This method is only supported for new modules (0x02 withdrawal credentials) + SRUtils._requireWCType2(stateConfig.withdrawalCredentialsType); + + // Get allocation based on target share + uint256 depositableEther = LIDO.getDepositableEther(); + uint256 smDepositableEthAmount = _getModuleDepositAllocation(_stakingModuleId, depositableEther, true); + + // Call allocateDeposits on the staking module to determine for what amount deposit each key + // The module verifies keys belong to it and reverts if invalid. + // Even if smDepositableEthAmount is 0, we still call the module + // to allow CSM queue cursor advancement. + uint256[] memory allocations; + uint256 smDepositableEthAmountRounded = smDepositableEthAmount - (smDepositableEthAmount % 1 gwei); + allocations = IStakingModuleV2(stateConfig.moduleAddress) + .allocateDeposits(smDepositableEthAmountRounded, _pubkeys, _keyIndices, _operatorIds, _topUpLimits); + + // Calculate total amount from allocations returned by module (in wei) + uint256 amount; + unchecked { + for (uint256 i; i < allocations.length; ++i) { + if (allocations[i] % 1 gwei != 0) { + revert AmountNotAlignedToGwei(); + } + + if (allocations[i] > _topUpLimits[i]) { + revert AllocationExceedsLimit(); + } + + amount += allocations[i]; + } + } + + // Verify sum of allocations does not exceed module's max deposit amount + if (amount > smDepositableEthAmountRounded) { + revert ModuleReturnExceedTarget(); + } + + if (amount > 0) { + uint256 etherBalanceBeforeDeposits = address(this).balance; + // Pull ETH from Lido + LIDO.withdrawDepositableEther(amount, 0); + + bytes32 withdrawalCredentials = _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + bytes memory wcBytes = abi.encodePacked(withdrawalCredentials); + + // Make beacon chain top-up deposits + BeaconChainDepositor.makeBeaconChainTopUp(DEPOSIT_CONTRACT, wcBytes, _pubkeys, allocations); + + uint256 etherBalanceAfterDeposits = address(this).balance; + + /// @dev All pulled ETH must be deposited + assert(etherBalanceBeforeDeposits == etherBalanceAfterDeposits); + } + } + + function _validateTopUpInputs( + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + uint256[] calldata _topUpLimits, + bytes[] calldata _pubkeys + ) internal pure { + uint256 n = _keyIndices.length; + + if (n == 0) { + revert EmptyKeysList(); + } + + if (_operatorIds.length != n || _topUpLimits.length != n || _pubkeys.length != n) { + revert ArraysLengthMismatch(); + } + + for (uint256 i; i < n; ++i) { + if (_pubkeys[i].length != PUBKEY_LENGTH) { + revert WrongPubkeyLength(); + } + } + } + + /// @notice Returns the aggregate fee distribution proportion. + /// @return modulesFee Modules aggregate fee in base precision. + /// @return treasuryFee Treasury fee in base precision. + /// @return basePrecision Base precision: a value corresponding to the full fee. + function getStakingFeeAggregateDistribution() + public + view + returns (uint96 modulesFee, uint96 treasuryFee, uint256 basePrecision) + { + uint96[] memory moduleFees; + uint96 totalFee; + (,, moduleFees, totalFee, basePrecision) = getStakingRewardsDistribution(); + for (uint256 i; i < moduleFees.length; ++i) { + modulesFee += moduleFees[i]; + } + treasuryFee = totalFee - modulesFee; + } + + /// @notice Return shares table. + /// @return recipients Rewards recipient addresses corresponding to each module. + /// @return stakingModuleIds Module IDs. + /// @return stakingModuleFees Fee of each recipient. + /// @return totalFee Total fee to mint for each staking module and treasury. + /// @return precisionPoints Base precision number, which constitutes 100% fee. + function getStakingRewardsDistribution() + public + view + returns ( + address[] memory recipients, + uint256[] memory stakingModuleIds, + uint96[] memory stakingModuleFees, + uint96 totalFee, + uint256 precisionPoints + ) + { + uint256 totalValidatorsBalance = SRUtils._getTotalModulesValidatorsBalance(); + uint256 stakingModulesCount = totalValidatorsBalance == 0 ? 0 : SRStorage.getModulesCount(); + + stakingModuleIds = new uint256[](stakingModulesCount); + recipients = new address[](stakingModulesCount); + stakingModuleFees = new uint96[](stakingModulesCount); + precisionPoints = FEE_PRECISION_POINTS; + + /// @dev Return empty response if there are no staking modules or active validators yet. + if (stakingModulesCount == 0) { + return (recipients, stakingModuleIds, stakingModuleFees, totalFee, precisionPoints); + } + + uint256 rewardedStakingModulesCount = 0; + + for (uint256 i; i < stakingModulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + uint256 allocation = SRUtils._getModuleValidatorsBalance(moduleId); + + /// @dev Skip staking modules which have no active balance. + if (allocation == 0) continue; + + stakingModuleIds[rewardedStakingModulesCount] = moduleId; + + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + recipients[rewardedStakingModulesCount] = stateConfig.moduleAddress; + + (uint96 moduleFee, uint96 treasuryFee) = _computeModuleFee(allocation, totalValidatorsBalance, stateConfig); + + /// @dev If the staking module has the Stopped status for some reason, then + /// the staking module's rewards go to the treasury, so that the DAO has ability + /// to manage them (e.g. to compensate the staking module in case of an error, etc.) + if (stateConfig.status != StakingModuleStatus.Stopped) { + stakingModuleFees[rewardedStakingModulesCount] = moduleFee; + } + totalFee += treasuryFee + moduleFee; + + unchecked { + ++rewardedStakingModulesCount; + } + } + + // Total fee never exceeds 100%. + assert(totalFee <= precisionPoints); + + /// @dev Shrink arrays. + if (rewardedStakingModulesCount < stakingModulesCount) { + assembly ("memory-safe") { + mstore(stakingModuleIds, rewardedStakingModulesCount) + mstore(recipients, rewardedStakingModulesCount) + mstore(stakingModuleFees, rewardedStakingModulesCount) + } + } + + return (recipients, stakingModuleIds, stakingModuleFees, totalFee, precisionPoints); + } + + function getModuleValidatorsBalance(uint256 moduleId) external view returns (uint256) { + SRUtils._requireModuleIdExists(moduleId); + return SRUtils._getModuleValidatorsBalance(moduleId); + } + + function getTotalModulesValidatorsBalance() external view returns (uint256) { + return SRUtils._getTotalModulesValidatorsBalance(); + } + + function _computeModuleFee( + uint256 validatorsBalance, + uint256 totalValidatorsBalance, + ModuleStateConfig memory stateConfig + ) internal pure returns (uint96 moduleFee, uint96 treasuryFee) { + uint256 share = + validatorsBalance * FEE_PRECISION_POINTS / totalValidatorsBalance; + moduleFee = uint96(share * stateConfig.moduleFee / SRUtils.TOTAL_BASIS_POINTS); + treasuryFee = uint96(share * stateConfig.treasuryFee / SRUtils.TOTAL_BASIS_POINTS); + } + + /// @notice Returns the same as getStakingRewardsDistribution() but in reduced, 1e4 precision (DEPRECATED). + /// @dev Helper only for Lido contract. Use getStakingRewardsDistribution() instead. + /// @return totalFee Total fee to mint for each staking module and treasury in reduced, 1e4 precision. + function getTotalFeeE4Precision() external view returns (uint16 totalFee) { + /// @dev The logic is placed here but in Lido contract to save Lido bytecode. + (,,, uint96 totalFeeInHighPrecision, uint256 precision) = getStakingRewardsDistribution(); + // Here we rely on (totalFeeInHighPrecision <= precision). + totalFee = _toE4Precision(totalFeeInHighPrecision, precision); + } + + /// @notice Returns the same as getStakingFeeAggregateDistribution() but in reduced, 1e4 precision (DEPRECATED). + /// @dev Helper only for Lido contract. Use getStakingFeeAggregateDistribution() instead. + /// @return modulesFee Modules aggregate fee in reduced, 1e4 precision. + /// @return treasuryFee Treasury fee in reduced, 1e4 precision. + function getStakingFeeAggregateDistributionE4Precision() + external + view + returns (uint16 modulesFee, uint16 treasuryFee) + { + /// @dev The logic is placed here but in Lido contract to save Lido bytecode. + (uint256 modulesFeeHighPrecision, uint256 treasuryFeeHighPrecision, uint256 precision) = + getStakingFeeAggregateDistribution(); + // Here we rely on ({modules,treasury}FeeHighPrecision <= precision). + modulesFee = _toE4Precision(modulesFeeHighPrecision, precision); + treasuryFee = _toE4Precision(treasuryFeeHighPrecision, precision); + } + + /// @notice Returns new deposits allocation after the distribution of the `_depositAmount` deposits. + /// @param _depositAmount The maximum ETH amount of deposits to be allocated. + /// @param _isTopUp Whether the allocation is requested for top-up (true) or initial deposits (false). + /// @return totalAllocated - amount actually allocated + /// @return allocated - Array of newly allocated amounts for each module + /// @return newAllocations - Array of new allocation amounts for each module + function getDepositAllocations(uint256 _depositAmount, bool _isTopUp) + public + view + returns (uint256 totalAllocated, uint256[] memory allocated, uint256[] memory newAllocations) + { + (totalAllocated, allocated, newAllocations) = + SRLib._getDepositAllocations(_getConfig(), _depositAmount, _isTopUp); + } + + /// @notice Invokes a deposit call to the official Deposit contract. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _depositCalldata Staking module calldata. + /// @dev Only the DepositSecurityModule is allowed to call this method. + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external { + _checkAppAuth(_getDepositSecurityModule()); + (ModuleState storage state, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + + if (stateConfig.status != StakingModuleStatus.Active) revert StakingModuleNotActive(); + + bytes32 withdrawalCredentials = _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + address stakingModuleAddress = stateConfig.moduleAddress; + + // Get depositable ether from Lido (similar to topUp) + uint256 depositableEther = LIDO.getDepositableEther(); + uint256 stakingModuleDepositableEthAmount = + _getModuleDepositAllocation(_stakingModuleId, depositableEther, false); + // Calculate max deposits count (capped by max and module capacity) + (,, uint256 depositableValidatorsCount) = _getStakingModuleSummary(_stakingModuleId); + uint256 maxDepositsCount = Math.min( + Math.min(state.deposits.maxDepositsPerBlock, depositableValidatorsCount), + stakingModuleDepositableEthAmount / MAX_EFFECTIVE_BALANCE_WC_TYPE_01 // max possible initial deposits count + ); + + if (maxDepositsCount == 0) revert ZeroDeposits(); + + // Get deposit data from module first - it may return fewer keys than requested + (bytes memory publicKeysBatch, bytes memory signaturesBatch) = + IStakingModule(stakingModuleAddress).obtainDepositData(maxDepositsCount, _depositCalldata); + + // Calculate actual deposits count from returned keys + if (publicKeysBatch.length % PUBKEY_LENGTH != 0) revert WrongPubkeyLength(); + uint256 actualDepositsCount = publicKeysBatch.length / PUBKEY_LENGTH; + + if (actualDepositsCount > maxDepositsCount) revert ModuleReturnExceedTarget(); + + // Calculate actual deposit value based on keys returned + uint256 depositsValue = actualDepositsCount * MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + + /// @dev Update the local state of the contract to prevent a reentrancy attack + /// even though the staking modules are trusted contracts. + _updateModuleLastDepositState(_stakingModuleId, depositsValue); + + if (actualDepositsCount == 0) return; + + uint256 etherBalanceBeforeDeposits = address(this).balance; + + // Pull ETH from Lido based on actual keys returned + LIDO.withdrawDepositableEther(depositsValue, actualDepositsCount); + + BeaconChainDepositor.makeBeaconChainDeposits32ETH( + DEPOSIT_CONTRACT, + actualDepositsCount, + abi.encodePacked(withdrawalCredentials), + publicKeysBatch, + signaturesBatch + ); + + uint256 etherBalanceAfterDeposits = address(this).balance; + + /// @dev All pulled ETH must be deposited and self balance stay the same. + assert(etherBalanceBeforeDeposits == etherBalanceAfterDeposits); + } + + /// @notice Set 0x01 credentials to withdraw ETH on Consensus Layer side. + /// @param _withdrawalCredentials 0x01 withdrawal credentials field as defined in the Consensus Layer specs. + /// @dev Note that setWithdrawalCredentials discards all unused deposits data as the signatures are invalidated. + /// @dev The function is restricted to the `MANAGE_WITHDRAWAL_CREDENTIALS_ROLE` role. + function setWithdrawalCredentials(bytes32 _withdrawalCredentials) + external + onlyRole(MANAGE_WITHDRAWAL_CREDENTIALS_ROLE) + { + _setWithdrawalCredentials(_withdrawalCredentials); + } + + /// @notice Returns current credentials to withdraw ETH on Consensus Layer side. + /// @return Withdrawal credentials. + function getWithdrawalCredentials() public view returns (bytes32) { + return SRStorage.getRouterState().withdrawalCredentials; + } + + function _setWithdrawalCredentials(bytes32 wc) internal { + SRUtils._requireNotZero(WithdrawalCredentials.getAddr(wc)); + SRUtils._requireWCTypeValid(WithdrawalCredentials.getType(wc)); + SRStorage.getRouterState().withdrawalCredentials = wc; + emit WithdrawalCredentialsSet(wc, _msgSender()); + + // Notify all staking modules about the withdrawal credentials change + SRLib._notifyStakingModulesOfWithdrawalCredentialsChange(); + } + + function _getWithdrawalCredentialsWithType(uint8 withdrawalCredentialsType) internal view returns (bytes32) { + bytes32 wc = getWithdrawalCredentials(); + return wc.setType(withdrawalCredentialsType); + } + + /// @dev Save the last deposit state for the staking module and emit the event + /// @param stakingModuleId id of the staking module to be deposited + /// @param depositsValue value to deposit + function _updateModuleLastDepositState(uint256 stakingModuleId, uint256 depositsValue) internal { + SRLib._updateModuleLastDepositState(stakingModuleId); + emit StakingRouterETHDeposited(stakingModuleId, depositsValue); + } + + /// @notice Allocation for single module based on target share + /// @param moduleId Id of staking module + /// @param amountToAllocate Eth amount that can be deposited in module + /// @param isTopUp Whether the allocation is for top-up deposits + /// @return allocation Eth amount that can be deposited in module with id `moduleId` (can be less than `amountToAllocate`) + function _getModuleDepositAllocation(uint256 moduleId, uint256 amountToAllocate, bool isTopUp) + internal + view + returns (uint256 allocation) + { + return SRLib._getModuleDepositAllocation(_getConfig(), moduleId, amountToAllocate, isTopUp); + } + + /// module wrapper + function _getStakingModuleNodeOperatorsCount(IStakingModule _stakingModule) internal view returns (uint256) { + return _stakingModule.getNodeOperatorsCount(); + } + + function _getStakingModuleActiveNodeOperatorsCount(IStakingModule _stakingModule) internal view returns (uint256) { + return _stakingModule.getActiveNodeOperatorsCount(); + } + + function _getStakingModuleNodeOperatorIds(IStakingModule _stakingModule, uint256 _offset, uint256 _limit) + internal + view + returns (uint256[] memory) + { + return _stakingModule.getNodeOperatorIds(_offset, _limit); + } + + function _getStakingModuleNodeOperatorIsActive(IStakingModule _stakingModule, uint256 _nodeOperatorId) + internal + view + returns (bool) + { + return _stakingModule.getNodeOperatorIsActive(_nodeOperatorId); + } + + /// --- + + function _getModuleState(uint256 _moduleId) + internal + view + returns (ModuleState storage state, ModuleStateConfig storage stateConfig) + { + SRUtils._requireModuleIdExists(_moduleId); + state = _moduleId.getModuleState(); + stateConfig = state.config; + } + + function _getModuleStateCompat(uint256 _moduleId) internal view returns (StakingModule memory moduleState) { + moduleState.id = uint24(_moduleId); + + ModuleState storage state = _moduleId.getModuleState(); + moduleState.name = state.name; + + /// @dev use multiply SLOAD as this data readonly by offchain tools, so minimize bytecode size + + ModuleStateConfig storage stateConfig = state.config; + moduleState.stakingModuleAddress = stateConfig.moduleAddress; + moduleState.stakingModuleFee = stateConfig.moduleFee; + moduleState.treasuryFee = stateConfig.treasuryFee; + moduleState.stakeShareLimit = stateConfig.stakeShareLimit; + moduleState.status = uint8(stateConfig.status); + moduleState.priorityExitShareThreshold = stateConfig.priorityExitShareThreshold; + moduleState.withdrawalCredentialsType = stateConfig.withdrawalCredentialsType; + + ModuleStateDeposits storage stateDeposits = state.deposits; + moduleState.lastDepositAt = stateDeposits.lastDepositAt; + moduleState.lastDepositBlock = stateDeposits.lastDepositBlock; + moduleState.maxDepositsPerBlock = stateDeposits.maxDepositsPerBlock; + moduleState.minDepositBlockDistance = stateDeposits.minDepositBlockDistance; + + ModuleStateAccounting storage moduleAcc = state.accounting; + moduleState.validatorsBalanceGwei = moduleAcc.validatorsBalanceGwei; + moduleState.exitedValidatorsCount = moduleAcc.exitedValidatorsCount; + } + + /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. + function _getStakingModuleSummary(uint256 _moduleId) + internal + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount) + { + return _moduleId.getIStakingModule().getStakingModuleSummary(); + } + + function _getStakingModuleSummaryStruct(uint256 _stakingModuleId) + internal + view + returns (StakingModuleSummary memory summary) + { + (summary.totalExitedValidators, summary.totalDepositedValidators, summary.depositableValidatorsCount) = + _getStakingModuleSummary(_stakingModuleId); + } + + function _getNodeOperatorSummary(IStakingModule _stakingModule, uint256 _nodeOperatorId) + internal + view + returns (NodeOperatorSummary memory summary) + { + ( + summary.targetLimitMode, + summary.targetValidatorsCount,,,, + summary.totalExitedValidators, + summary.totalDepositedValidators, + summary.depositableValidatorsCount + ) = _stakingModule.getNodeOperatorSummary(_nodeOperatorId); + } + + function _getAccountingOracle() internal view returns (address) { + return LIDO_LOCATOR.accountingOracle(); + } + + function _getTopUpGateway() internal view returns (address) { + return LIDO_LOCATOR.topUpGateway(); + } + + function _getDepositSecurityModule() internal view returns (address) { + return LIDO_LOCATOR.depositSecurityModule(); + } + + function _checkAppAuth(address app) internal view { + if (_msgSender() != app) revert NotAuthorized(); + } + + /// @notice memory config cache + /// @dev Build once per tx, reuse across all lib calls + function _getConfig() private view returns (SRLib.Config memory) { + return + SRLib.Config({maxEBType1: MAX_EFFECTIVE_BALANCE_WC_TYPE_01, maxEBType2: MAX_EFFECTIVE_BALANCE_WC_TYPE_02}); + } + + function _toE4Precision(uint256 _value, uint256 _precision) internal pure returns (uint16) { + return uint16((_value * SRUtils.TOTAL_BASIS_POINTS) / _precision); + } +} diff --git a/contracts/0.8.9/Accounting.sol b/contracts/0.8.9/Accounting.sol index 975b428e0f..7dbb9dca5f 100644 --- a/contracts/0.8.9/Accounting.sol +++ b/contracts/0.8.9/Accounting.sol @@ -14,8 +14,21 @@ import {IVaultHub} from "contracts/common/interfaces/IVaultHub.sol"; import {IPostTokenRebaseReceiver} from "./interfaces/IPostTokenRebaseReceiver.sol"; import {WithdrawalQueue} from "./WithdrawalQueue.sol"; -import {StakingRouter} from "./StakingRouter.sol"; +interface IStakingRouter { + function getStakingRewardsDistribution() + external + view + returns ( + address[] memory recipients, + uint256[] memory stakingModuleIds, + uint96[] memory stakingModuleFees, + uint96 totalFee, + uint256 precisionPoints + ); + + function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) external; +} /// @title Lido Accounting contract /// @author folkyatina @@ -29,14 +42,15 @@ contract Accounting { IBurner burner; WithdrawalQueue withdrawalQueue; IPostTokenRebaseReceiver postTokenRebaseReceiver; - StakingRouter stakingRouter; + IStakingRouter stakingRouter; IVaultHub vaultHub; } /// @notice snapshot of the protocol state that may be changed during the report struct PreReportState { - uint256 clValidators; - uint256 clBalance; + uint256 clValidatorsBalance; + uint256 clPendingBalance; + uint256 depositedBalance; uint256 totalPooledEther; uint256 totalShares; uint256 depositedValidators; @@ -89,9 +103,6 @@ contract Accounting { uint256 treasurySharesToMint; } - /// @notice deposit size in wei (for pre-maxEB accounting) - uint256 private constant DEPOSIT_SIZE = 32 ether; - ILidoLocator public immutable LIDO_LOCATOR; ILido public immutable LIDO; @@ -135,7 +146,7 @@ contract Accounting { /// @dev reads the current state of the protocol to the memory function _snapshotPreReportState(Contracts memory _contracts, bool isSimulation) internal view returns (PreReportState memory pre) { - (pre.depositedValidators, pre.clValidators, pre.clBalance) = LIDO.getBeaconStat(); + (pre.clValidatorsBalance, pre.clPendingBalance,, pre.depositedBalance) = LIDO.getBalanceStats(); pre.totalPooledEther = LIDO.getTotalPooledEther(); pre.totalShares = LIDO.getTotalShares(); pre.externalShares = LIDO.getExternalShares(); @@ -165,10 +176,8 @@ contract Accounting { _report ); - // Principal CL balance is the sum of the current CL balance and - // validator deposits during this report - // TODO: to support maxEB we need to get rid of validator counting - update.principalClBalance = _pre.clBalance + (_report.clValidators - _pre.clValidators) * DEPOSIT_SIZE; + // Principal CL balance is sum of previous balances and new deposits + update.principalClBalance = _pre.clValidatorsBalance + _pre.clPendingBalance + _pre.depositedBalance; // Limit the rebase to avoid oracle frontrunning // by leaving some ether to sit in EL rewards vault or withdrawals vault @@ -182,7 +191,7 @@ contract Accounting { _pre.totalPooledEther - _pre.externalEther, // we need to change the base as shareRate is now calculated on _pre.totalShares - _pre.externalShares, // internal ether and shares, but inside it's still total update.principalClBalance, - _report.clBalance, + _report.clValidatorsBalance + _report.clPendingBalance, _report.withdrawalVaultBalance, _report.elRewardsVaultBalance, _report.sharesRequestedToBurn, @@ -190,13 +199,13 @@ contract Accounting { update.sharesToFinalizeWQ ); - uint256 postInternalSharesBeforeFees = - _pre.totalShares - _pre.externalShares // internal shares before - - update.totalSharesToBurn; // shares to be burned for withdrawals and cover + uint256 postInternalSharesBeforeFees = _pre.totalShares - + _pre.externalShares - // internal shares before + update.totalSharesToBurn; // shares to be burned for withdrawals and cover update.postInternalEther = _pre.totalPooledEther - _pre.externalEther // internal ether before - + _report.clBalance + update.withdrawalsVaultTransfer - update.principalClBalance + + _report.clValidatorsBalance + _report.clPendingBalance + update.withdrawalsVaultTransfer - update.principalClBalance + update.elRewardsVaultTransfer - update.etherToFinalizeWQ; @@ -208,7 +217,10 @@ contract Accounting { postInternalSharesBeforeFees ); - update.postInternalShares = postInternalSharesBeforeFees + update.sharesToMintAsFees + _pre.badDebtToInternalize; + update.postInternalShares = + postInternalSharesBeforeFees + + update.sharesToMintAsFees + + _pre.badDebtToInternalize; uint256 postExternalShares = _pre.externalShares - _pre.badDebtToInternalize; // can't underflow by design update.postTotalShares = update.postInternalShares + postExternalShares; @@ -232,7 +244,7 @@ contract Accounting { /// @return sharesToMintAsFees total number of shares to be minted as Lido Core fee /// @return feeDistribution the number of shares that is minted to each module or treasury function _calculateProtocolFees( - StakingRouter _stakingRouter, + IStakingRouter _stakingRouter, ReportValues calldata _report, CalculatedValues memory _update, uint256 _internalSharesBeforeFees @@ -283,7 +295,7 @@ contract Accounting { // but with fees taken as ether deduction instead of minting shares // to learn the amount of shares we need to mint to compensate for this fee - uint256 unifiedClBalance = _report.clBalance + _update.withdrawalsVaultTransfer; + uint256 unifiedClBalance = _report.clValidatorsBalance + _report.clPendingBalance + _update.withdrawalsVaultTransfer; // Don't mint/distribute any protocol fee on the non-profitable Lido oracle report // (when consensus layer balance delta is zero or negative). // See LIP-12 for details: @@ -313,7 +325,7 @@ contract Accounting { uint256 totalModuleFeeShares = 0; - for (uint256 i; i < stakingModuleFees.length; ++i) { + for (uint256 i; i < length; ++i) { uint256 moduleFee = stakingModuleFees[i]; if (moduleFee > 0) { uint256 moduleFeeShares = (_totalSharesToMintAsFees * moduleFee) / _totalFee; @@ -343,7 +355,11 @@ contract Accounting { ]; } - LIDO.processClStateUpdate(_report.timestamp, _pre.clValidators, _report.clValidators, _report.clBalance); + LIDO.processClStateUpdate( + _report.timestamp, + _report.clValidatorsBalance, + _report.clPendingBalance + ); if (_pre.badDebtToInternalize > 0) { _contracts.vaultHub.decreaseInternalizedBadDebt(_pre.badDebtToInternalize); @@ -356,7 +372,7 @@ contract Accounting { LIDO.collectRewardsAndProcessWithdrawals( _report.timestamp, - _report.clBalance, + _report.clValidatorsBalance + _report.clPendingBalance, _update.principalClBalance, _update.withdrawalsVaultTransfer, _update.elRewardsVaultTransfer, @@ -401,9 +417,7 @@ contract Accounting { CalculatedValues memory _update ) internal { if (_report.timestamp >= block.timestamp) revert IncorrectReportTimestamp(_report.timestamp, block.timestamp); - if (_report.clValidators < _pre.clValidators || _report.clValidators > _pre.depositedValidators) { - revert IncorrectReportValidators(_report.clValidators, _pre.clValidators, _pre.depositedValidators); - } + // Validator count validation removed for MaxEB support - now using balance-based accounting // Oracle should consider this limitation: // During the AO report the ether to finalize the WQ cannot be greater or equal to `simulatedPostInternalEther` @@ -411,13 +425,15 @@ contract Accounting { _contracts.oracleReportSanityChecker.checkAccountingOracleReport( _report.timeElapsed, - _update.principalClBalance, - _report.clBalance, + _pre.clValidatorsBalance, + _pre.clPendingBalance, + _report.clValidatorsBalance, + _report.clPendingBalance, _report.withdrawalVaultBalance, _report.elRewardsVaultBalance, _report.sharesRequestedToBurn, - _pre.clValidators, - _report.clValidators + _pre.depositedBalance, + _update.withdrawalsVaultTransfer ); if (_report.withdrawalFinalizationBatches.length > 0) { @@ -493,13 +509,12 @@ contract Accounting { IBurner(burner), WithdrawalQueue(withdrawalQueue), IPostTokenRebaseReceiver(postTokenRebaseReceiver), - StakingRouter(payable(stakingRouter)), + IStakingRouter(stakingRouter), IVaultHub(vaultHub) ); } error NotAuthorized(string operation, address addr); error IncorrectReportTimestamp(uint256 reportTimestamp, uint256 upperBoundTimestamp); - error IncorrectReportValidators(uint256 reportValidators, uint256 minValidators, uint256 maxValidators); error InternalSharesCantBeZero(); } diff --git a/contracts/0.8.9/BeaconChainDepositor.sol b/contracts/0.8.9/BeaconChainDepositor.sol deleted file mode 100644 index 4bcd2f5f37..0000000000 --- a/contracts/0.8.9/BeaconChainDepositor.sol +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-FileCopyrightText: 2023 Lido -// SPDX-License-Identifier: GPL-3.0 - -// See contracts/COMPILERS.md -pragma solidity 0.8.9; - -import {MemUtils} from "../common/lib/MemUtils.sol"; - -interface IDepositContract { - function get_deposit_root() external view returns (bytes32 rootHash); - - function deposit( - bytes calldata pubkey, // 48 bytes - bytes calldata withdrawal_credentials, // 32 bytes - bytes calldata signature, // 96 bytes - bytes32 deposit_data_root - ) external payable; -} - -contract BeaconChainDepositor { - uint256 internal constant PUBLIC_KEY_LENGTH = 48; - uint256 internal constant SIGNATURE_LENGTH = 96; - uint256 internal constant DEPOSIT_SIZE = 32 ether; - - /// @dev deposit amount 32eth in gweis converted to little endian uint64 - /// DEPOSIT_SIZE_IN_GWEI_LE64 = toLittleEndian64(32 ether / 1 gwei) - uint64 internal constant DEPOSIT_SIZE_IN_GWEI_LE64 = 0x0040597307000000; - - IDepositContract public immutable DEPOSIT_CONTRACT; - - constructor(address _depositContract) { - if (_depositContract == address(0)) revert DepositContractZeroAddress(); - DEPOSIT_CONTRACT = IDepositContract(_depositContract); - } - - /// @dev Invokes a deposit call to the official Beacon Deposit contract - /// @param _keysCount amount of keys to deposit - /// @param _withdrawalCredentials Commitment to a public key for withdrawals - /// @param _publicKeysBatch A BLS12-381 public keys batch - /// @param _signaturesBatch A BLS12-381 signatures batch - function _makeBeaconChainDeposits32ETH( - uint256 _keysCount, - bytes memory _withdrawalCredentials, - bytes memory _publicKeysBatch, - bytes memory _signaturesBatch - ) internal { - if (_publicKeysBatch.length != PUBLIC_KEY_LENGTH * _keysCount) { - revert InvalidPublicKeysBatchLength(_publicKeysBatch.length, PUBLIC_KEY_LENGTH * _keysCount); - } - if (_signaturesBatch.length != SIGNATURE_LENGTH * _keysCount) { - revert InvalidSignaturesBatchLength(_signaturesBatch.length, SIGNATURE_LENGTH * _keysCount); - } - - bytes memory publicKey = MemUtils.unsafeAllocateBytes(PUBLIC_KEY_LENGTH); - bytes memory signature = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH); - - for (uint256 i; i < _keysCount;) { - MemUtils.copyBytes(_publicKeysBatch, publicKey, i * PUBLIC_KEY_LENGTH, 0, PUBLIC_KEY_LENGTH); - MemUtils.copyBytes(_signaturesBatch, signature, i * SIGNATURE_LENGTH, 0, SIGNATURE_LENGTH); - - DEPOSIT_CONTRACT.deposit{value: DEPOSIT_SIZE}( - publicKey, _withdrawalCredentials, signature, _computeDepositDataRoot(_withdrawalCredentials, publicKey, signature) - ); - - unchecked { - ++i; - } - } - } - - /// @dev computes the deposit_root_hash required by official Beacon Deposit contract - /// @param _publicKey A BLS12-381 public key. - /// @param _signature A BLS12-381 signature - function _computeDepositDataRoot(bytes memory _withdrawalCredentials, bytes memory _publicKey, bytes memory _signature) - private - pure - returns (bytes32) - { - // Compute deposit data root (`DepositData` hash tree root) according to deposit_contract.sol - bytes memory sigPart1 = MemUtils.unsafeAllocateBytes(64); - bytes memory sigPart2 = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH - 64); - MemUtils.copyBytes(_signature, sigPart1, 0, 0, 64); - MemUtils.copyBytes(_signature, sigPart2, 64, 0, SIGNATURE_LENGTH - 64); - - bytes32 publicKeyRoot = sha256(abi.encodePacked(_publicKey, bytes16(0))); - bytes32 signatureRoot = sha256(abi.encodePacked(sha256(abi.encodePacked(sigPart1)), sha256(abi.encodePacked(sigPart2, bytes32(0))))); - - return sha256( - abi.encodePacked( - sha256(abi.encodePacked(publicKeyRoot, _withdrawalCredentials)), - sha256(abi.encodePacked(DEPOSIT_SIZE_IN_GWEI_LE64, bytes24(0), signatureRoot)) - ) - ); - } - - error DepositContractZeroAddress(); - error InvalidPublicKeysBatchLength(uint256 actual, uint256 expected); - error InvalidSignaturesBatchLength(uint256 actual, uint256 expected); -} diff --git a/contracts/0.8.9/DepositSecurityModule.sol b/contracts/0.8.9/DepositSecurityModule.sol index b39ef28bb0..3edd27be95 100644 --- a/contracts/0.8.9/DepositSecurityModule.sol +++ b/contracts/0.8.9/DepositSecurityModule.sol @@ -7,7 +7,6 @@ pragma solidity 0.8.9; import {ECDSA} from "../common/lib/ECDSA.sol"; interface ILido { - function deposit(uint256 _maxDepositsCount, uint256 _stakingModuleId, bytes calldata _depositCalldata) external; function canDeposit() external view returns (bool); } @@ -17,16 +16,15 @@ interface IDepositContract { interface IStakingRouter { function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256); - function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256); - function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool); function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256); function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256); - function hasStakingModule(uint256 _stakingModuleId) external view returns (bool); + function canDeposit(uint256 _stakingModuleId) external view returns (bool); function decreaseStakingModuleVettedKeysCountByNodeOperator( uint256 _stakingModuleId, bytes calldata _nodeOperatorIds, bytes calldata _vettedSigningKeysCounts ) external; + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external; } /** @@ -308,7 +306,7 @@ contract DepositSecurityModule { * Reverts if any of the addresses is already a guardian or is zero. */ function addGuardians(address[] memory addresses, uint256 newQuorum) external onlyOwner { - for (uint256 i = 0; i < addresses.length; ) { + for (uint256 i = 0; i < addresses.length;) { _addGuardian(addresses[i]); unchecked { @@ -415,22 +413,16 @@ contract DepositSecurityModule { * - the staking module is active; * - the guardian quorum is not set to zero; * - the deposit distance is greater than the minimum required; - * - LIDO.canDeposit() returns true. + * - LIDO.canDeposit() returns true; + * - STAKING_ROUTER.canDeposit returns true. */ function canDeposit(uint256 stakingModuleId) external view returns (bool) { - if (!STAKING_ROUTER.hasStakingModule(stakingModuleId)) return false; + if (!STAKING_ROUTER.canDeposit(stakingModuleId)) return false; - bool isModuleActive = STAKING_ROUTER.getStakingModuleIsActive(stakingModuleId); bool isDepositDistancePassed = _isMinDepositDistancePassed(stakingModuleId); bool isLidoCanDeposit = LIDO.canDeposit(); - return ( - !isDepositsPaused - && isModuleActive - && quorum > 0 - && isDepositDistancePassed - && isLidoCanDeposit - ); + return (!isDepositsPaused && quorum > 0 && isDepositDistancePassed && isLidoCanDeposit); } /** @@ -462,12 +454,13 @@ contract DepositSecurityModule { /// guardian to react and pause deposits to all modules. uint256 lastDepositToModuleBlock = STAKING_ROUTER.getStakingModuleLastDepositBlock(stakingModuleId); uint256 minDepositBlockDistance = STAKING_ROUTER.getStakingModuleMinDepositBlockDistance(stakingModuleId); - uint256 maxLastDepositBlock = lastDepositToModuleBlock >= lastDepositBlock ? lastDepositToModuleBlock : lastDepositBlock; + uint256 maxLastDepositBlock = + lastDepositToModuleBlock >= lastDepositBlock ? lastDepositToModuleBlock : lastDepositBlock; return block.number - maxLastDepositBlock >= minDepositBlockDistance; } /** - * @notice Calls LIDO.deposit(maxDepositsPerBlock, stakingModuleId, depositCalldata). + * @notice Calls STAKING_ROUTER.deposit(stakingModuleId, depositCalldata). * @param blockNumber The block number at which the deposit intent was created. * @param blockHash The block hash at which the deposit intent was created. * @param depositRoot The deposit root hash. @@ -509,15 +502,15 @@ contract DepositSecurityModule { if (nonce != onchainNonce) revert ModuleNonceChanged(); if (quorum == 0 || sortedGuardianSignatures.length < quorum) revert DepositNoQuorum(); - if (!STAKING_ROUTER.getStakingModuleIsActive(stakingModuleId)) revert DepositInactiveModule(); + if (!STAKING_ROUTER.canDeposit(stakingModuleId)) revert DepositInactiveModule(); if (!_isMinDepositDistancePassed(stakingModuleId)) revert DepositTooFrequent(); if (blockHash == bytes32(0) || blockhash(blockNumber) != blockHash) revert DepositUnexpectedBlockHash(); if (isDepositsPaused) revert DepositsArePaused(); _verifyAttestSignatures(depositRoot, blockNumber, blockHash, stakingModuleId, nonce, sortedGuardianSignatures); - uint256 maxDepositsPerBlock = STAKING_ROUTER.getStakingModuleMaxDepositsPerBlock(stakingModuleId); - LIDO.deposit(maxDepositsPerBlock, stakingModuleId, depositCalldata); + // Call StakingRouter instead of Lido - SR will pull ETH from Lido + STAKING_ROUTER.deposit(stakingModuleId, depositCalldata); _setLastDepositBlock(block.number); } @@ -537,7 +530,7 @@ contract DepositSecurityModule { address prevSignerAddr; address signerAddr; - for (uint256 i = 0; i < sigs.length; ) { + for (uint256 i = 0; i < sigs.length;) { signerAddr = ECDSA.recover(msgHash, sigs[i].r, sigs[i].vs); if (!_isGuardian(signerAddr)) revert InvalidSignature(); if (signerAddr <= prevSignerAddr) revert SignaturesNotSorted(); @@ -620,9 +613,7 @@ contract DepositSecurityModule { if (blockHash == bytes32(0) || blockhash(blockNumber) != blockHash) revert UnvetUnexpectedBlockHash(); STAKING_ROUTER.decreaseStakingModuleVettedKeysCountByNodeOperator( - stakingModuleId, - nodeOperatorIds, - vettedSigningKeysCounts + stakingModuleId, nodeOperatorIds, vettedSigningKeysCounts ); } } diff --git a/contracts/0.8.9/LidoLocator.sol b/contracts/0.8.9/LidoLocator.sol index f60d04029f..d0fe0dd515 100644 --- a/contracts/0.8.9/LidoLocator.sol +++ b/contracts/0.8.9/LidoLocator.sol @@ -29,6 +29,7 @@ contract LidoLocator is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -36,6 +37,7 @@ contract LidoLocator is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } error ZeroAddress(); @@ -56,6 +58,7 @@ contract LidoLocator is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -63,6 +66,7 @@ contract LidoLocator is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; //solhint-enable immutable-vars-naming /** @@ -86,6 +90,7 @@ contract LidoLocator is ILidoLocator { oracleDaemonConfig = _assertNonZero(_config.oracleDaemonConfig); validatorExitDelayVerifier = _assertNonZero(_config.validatorExitDelayVerifier); triggerableWithdrawalsGateway = _assertNonZero(_config.triggerableWithdrawalsGateway); + consolidationGateway = _assertNonZero(_config.consolidationGateway); accounting = _assertNonZero(_config.accounting); predepositGuarantee = _assertNonZero(_config.predepositGuarantee); wstETH = _assertNonZero(_config.wstETH); @@ -93,6 +98,7 @@ contract LidoLocator is ILidoLocator { vaultFactory = _assertNonZero(_config.vaultFactory); lazyOracle = _assertNonZero(_config.lazyOracle); operatorGrid = _assertNonZero(_config.operatorGrid); + topUpGateway = _assertNonZero(_config.topUpGateway); } function coreComponents() external view returns ( diff --git a/contracts/0.8.9/StakingRouter.sol b/contracts/0.8.9/StakingRouter.sol deleted file mode 100644 index 8643d1d425..0000000000 --- a/contracts/0.8.9/StakingRouter.sol +++ /dev/null @@ -1,1505 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -/* See contracts/COMPILERS.md */ -pragma solidity 0.8.9; - -import {MinFirstAllocationStrategy} from "contracts/common/lib/MinFirstAllocationStrategy.sol"; -import {Math256} from "contracts/common/lib/Math256.sol"; -import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; - -import {AccessControlEnumerable} from "./utils/access/AccessControlEnumerable.sol"; -import {UnstructuredStorage} from "./lib/UnstructuredStorage.sol"; -import {Versioned} from "./utils/Versioned.sol"; -import {BeaconChainDepositor} from "./BeaconChainDepositor.sol"; - -contract StakingRouter is AccessControlEnumerable, BeaconChainDepositor, Versioned { - using UnstructuredStorage for bytes32; - - /// @dev Events - event StakingModuleAdded(uint256 indexed stakingModuleId, address stakingModule, string name, address createdBy); - event StakingModuleShareLimitSet(uint256 indexed stakingModuleId, uint256 stakeShareLimit, uint256 priorityExitShareThreshold, address setBy); - event StakingModuleFeesSet(uint256 indexed stakingModuleId, uint256 stakingModuleFee, uint256 treasuryFee, address setBy); - event StakingModuleStatusSet(uint256 indexed stakingModuleId, StakingModuleStatus status, address setBy); - event StakingModuleExitedValidatorsIncompleteReporting(uint256 indexed stakingModuleId, uint256 unreportedExitedValidatorsCount); - event StakingModuleMaxDepositsPerBlockSet( - uint256 indexed stakingModuleId, uint256 maxDepositsPerBlock, address setBy - ); - event StakingModuleMinDepositBlockDistanceSet( - uint256 indexed stakingModuleId, uint256 minDepositBlockDistance, address setBy - ); - event WithdrawalCredentialsSet(bytes32 withdrawalCredentials, address setBy); - event WithdrawalsCredentialsChangeFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - event ExitedAndStuckValidatorsCountsUpdateFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - event RewardsMintedReportFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - - /// Emitted when the StakingRouter received ETH - event StakingRouterETHDeposited(uint256 indexed stakingModuleId, uint256 amount); - - event StakingModuleExitNotificationFailed( - uint256 indexed stakingModuleId, - uint256 indexed nodeOperatorId, - bytes _publicKey - ); - - /// @dev Errors - error ZeroAddressLido(); - error ZeroAddressAdmin(); - error ZeroAddressStakingModule(); - error InvalidStakeShareLimit(); - error InvalidFeeSum(); - error StakingModuleNotActive(); - error EmptyWithdrawalsCredentials(); - error DirectETHTransfer(); - error InvalidReportData(uint256 code); - error ExitedValidatorsCountCannotDecrease(); - error ReportedExitedValidatorsExceedDeposited( - uint256 reportedExitedValidatorsCount, - uint256 depositedValidatorsCount - ); - error StakingModulesLimitExceeded(); - error StakingModuleUnregistered(); - error AppAuthLidoFailed(); - error StakingModuleStatusTheSame(); - error StakingModuleWrongName(); - error UnexpectedCurrentValidatorsCount( - uint256 currentModuleExitedValidatorsCount, - uint256 currentNodeOpExitedValidatorsCount - ); - error UnexpectedFinalExitedValidatorsCount ( - uint256 newModuleTotalExitedValidatorsCount, - uint256 newModuleTotalExitedValidatorsCountInStakingRouter - ); - error InvalidDepositsValue(uint256 etherValue, uint256 depositsCount); - error StakingModuleAddressExists(); - error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); - error UnrecoverableModuleError(); - error InvalidPriorityExitShareThreshold(); - error InvalidMinDepositBlockDistance(); - error InvalidMaxDepositPerBlockValue(); - - enum StakingModuleStatus { - Active, // deposits and rewards allowed - DepositsPaused, // deposits NOT allowed, rewards allowed - Stopped // deposits and rewards NOT allowed - } - - struct StakingModule { - /// @notice Unique id of the staking module. - uint24 id; - /// @notice Address of the staking module. - address stakingModuleAddress; - /// @notice Part of the fee taken from staking rewards that goes to the staking module. - uint16 stakingModuleFee; - /// @notice Part of the fee taken from staking rewards that goes to the treasury. - uint16 treasuryFee; - /// @notice Maximum stake share that can be allocated to a module, in BP. - /// @dev Formerly known as `targetShare`. - uint16 stakeShareLimit; - /// @notice Staking module status if staking module can not accept the deposits or can - /// participate in further reward distribution. - uint8 status; - /// @notice Name of the staking module. - string name; - /// @notice block.timestamp of the last deposit of the staking module. - /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. - uint64 lastDepositAt; - /// @notice block.number of the last deposit of the staking module. - /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. - uint256 lastDepositBlock; - /// @notice Number of exited validators. - uint256 exitedValidatorsCount; - /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. - uint16 priorityExitShareThreshold; - /// @notice The maximum number of validators that can be deposited in a single block. - /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. - /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. - uint64 maxDepositsPerBlock; - /// @notice The minimum distance between deposits in blocks. - /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. - /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). - uint64 minDepositBlockDistance; - } - - struct StakingModuleCache { - address stakingModuleAddress; - uint24 stakingModuleId; - uint16 stakingModuleFee; - uint16 treasuryFee; - uint16 stakeShareLimit; - StakingModuleStatus status; - uint256 activeValidatorsCount; - uint256 availableValidatorsCount; - } - - struct ValidatorExitData { - uint256 stakingModuleId; - uint256 nodeOperatorId; - bytes pubkey; - } - - bytes32 public constant MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = keccak256("MANAGE_WITHDRAWAL_CREDENTIALS_ROLE"); - bytes32 public constant STAKING_MODULE_MANAGE_ROLE = keccak256("STAKING_MODULE_MANAGE_ROLE"); - bytes32 public constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); - bytes32 public constant REPORT_EXITED_VALIDATORS_ROLE = keccak256("REPORT_EXITED_VALIDATORS_ROLE"); - bytes32 public constant REPORT_VALIDATOR_EXITING_STATUS_ROLE = keccak256("REPORT_VALIDATOR_EXITING_STATUS_ROLE"); - bytes32 public constant REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = keccak256("REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE"); - bytes32 public constant UNSAFE_SET_EXITED_VALIDATORS_ROLE = keccak256("UNSAFE_SET_EXITED_VALIDATORS_ROLE"); - bytes32 public constant REPORT_REWARDS_MINTED_ROLE = keccak256("REPORT_REWARDS_MINTED_ROLE"); - - bytes32 internal constant LIDO_POSITION = keccak256("lido.StakingRouter.lido"); - - /// @dev Credentials to withdraw ETH on Consensus Layer side. - bytes32 internal constant WITHDRAWAL_CREDENTIALS_POSITION = keccak256("lido.StakingRouter.withdrawalCredentials"); - - /// @dev Total count of staking modules. - bytes32 internal constant STAKING_MODULES_COUNT_POSITION = keccak256("lido.StakingRouter.stakingModulesCount"); - /// @dev Id of the last added staking module. This counter grow on staking modules adding. - bytes32 internal constant LAST_STAKING_MODULE_ID_POSITION = keccak256("lido.StakingRouter.lastStakingModuleId"); - /// @dev Mapping is used instead of array to allow to extend the StakingModule. - bytes32 internal constant STAKING_MODULES_MAPPING_POSITION = keccak256("lido.StakingRouter.stakingModules"); - /// @dev Position of the staking modules in the `_stakingModules` map, plus 1 because - /// index 0 means a value is not in the set. - bytes32 internal constant STAKING_MODULE_INDICES_MAPPING_POSITION = keccak256("lido.StakingRouter.stakingModuleIndicesOneBased"); - - uint256 public constant FEE_PRECISION_POINTS = 10 ** 20; // 100 * 10 ** 18 - uint256 public constant TOTAL_BASIS_POINTS = 10000; - uint256 public constant MAX_STAKING_MODULES_COUNT = 32; - /// @dev Restrict the name size with 31 bytes to storage in a single slot. - uint256 public constant MAX_STAKING_MODULE_NAME_LENGTH = 31; - - constructor(address _depositContract) BeaconChainDepositor(_depositContract) {} - - /// @notice Initializes the contract. - /// @param _admin Lido DAO Aragon agent contract address. - /// @param _lido Lido address. - /// @param _withdrawalCredentials Credentials to withdraw ETH on Consensus Layer side. - /// @dev Proxy initialization method. - function initialize(address _admin, address _lido, bytes32 _withdrawalCredentials) external { - if (_admin == address(0)) revert ZeroAddressAdmin(); - if (_lido == address(0)) revert ZeroAddressLido(); - - _initializeContractVersionTo(3); - - _setupRole(DEFAULT_ADMIN_ROLE, _admin); - - LIDO_POSITION.setStorageAddress(_lido); - WITHDRAWAL_CREDENTIALS_POSITION.setStorageBytes32(_withdrawalCredentials); - emit WithdrawalCredentialsSet(_withdrawalCredentials, msg.sender); - } - - /// @dev Prohibit direct transfer to contract. - receive() external payable { - revert DirectETHTransfer(); - } - - /// @notice A function to finalize upgrade to v2 (from v1). Removed and no longer used. - /// @dev https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md - /// See historical usage in commit: https://github.com/lidofinance/core/blob/c19480aa3366b26aa6eac17f85a6efae8b9f4f72/contracts/0.8.9/StakingRouter.sol#L190 - // function finalizeUpgrade_v2( - // uint256[] memory _priorityExitShareThresholds, - // uint256[] memory _maxDepositsPerBlock, - // uint256[] memory _minDepositBlockDistances - // ) external - - /// @notice Finalizes upgrade to v3 (from v2). Can be called only once. - function finalizeUpgrade_v3() external { - _checkContractVersion(2); - _updateContractVersion(3); - } - - /// @notice Returns Lido contract address. - /// @return Lido contract address. - function getLido() public view returns (address) { - return LIDO_POSITION.getStorageAddress(); - } - - /// @notice Registers a new staking module. - /// @param _name Name of staking module. - /// @param _stakingModuleAddress Address of staking module. - /// @param _stakeShareLimit Maximum share that can be allocated to a module. - /// @param _priorityExitShareThreshold Module's priority exit share threshold. - /// @param _stakingModuleFee Fee of the staking module taken from the staking rewards. - /// @param _treasuryFee Treasury fee. - /// @param _maxDepositsPerBlock The maximum number of validators that can be deposited in a single block. - /// @param _minDepositBlockDistance The minimum distance between deposits in blocks. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function addStakingModule( - string calldata _name, - address _stakingModuleAddress, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - if (_stakingModuleAddress == address(0)) revert ZeroAddressStakingModule(); - if (bytes(_name).length == 0 || bytes(_name).length > MAX_STAKING_MODULE_NAME_LENGTH) revert StakingModuleWrongName(); - - uint256 newStakingModuleIndex = getStakingModulesCount(); - - if (newStakingModuleIndex >= MAX_STAKING_MODULES_COUNT) - revert StakingModulesLimitExceeded(); - - for (uint256 i; i < newStakingModuleIndex; ) { - if (_stakingModuleAddress == _getStakingModuleByIndex(i).stakingModuleAddress) - revert StakingModuleAddressExists(); - - unchecked { - ++i; - } - } - - StakingModule storage newStakingModule = _getStakingModuleByIndex(newStakingModuleIndex); - uint24 newStakingModuleId = uint24(LAST_STAKING_MODULE_ID_POSITION.getStorageUint256()) + 1; - - newStakingModule.id = newStakingModuleId; - newStakingModule.name = _name; - newStakingModule.stakingModuleAddress = _stakingModuleAddress; - /// @dev Since `enum` is `uint8` by nature, so the `status` is stored as `uint8` to avoid - /// possible problems when upgrading. But for human readability, we use `enum` as - /// function parameter type. More about conversion in the docs: - /// https://docs.soliditylang.org/en/v0.8.17/types.html#enums - newStakingModule.status = uint8(StakingModuleStatus.Active); - - /// @dev Simulate zero value deposit to prevent real deposits into the new StakingModule via - /// DepositSecurityModule just after the addition. - _updateModuleLastDepositState(newStakingModule, newStakingModuleId, 0); - - _setStakingModuleIndexById(newStakingModuleId, newStakingModuleIndex); - LAST_STAKING_MODULE_ID_POSITION.setStorageUint256(newStakingModuleId); - STAKING_MODULES_COUNT_POSITION.setStorageUint256(newStakingModuleIndex + 1); - - emit StakingModuleAdded(newStakingModuleId, _stakingModuleAddress, _name, msg.sender); - _updateStakingModule( - newStakingModule, - newStakingModuleId, - _stakeShareLimit, - _priorityExitShareThreshold, - _stakingModuleFee, - _treasuryFee, - _maxDepositsPerBlock, - _minDepositBlockDistance - ); - } - - /// @notice Updates staking module params. - /// @param _stakingModuleId Staking module id. - /// @param _stakeShareLimit Target total stake share. - /// @param _priorityExitShareThreshold Module's priority exit share threshold. - /// @param _stakingModuleFee Fee of the staking module taken from the staking rewards. - /// @param _treasuryFee Treasury fee. - /// @param _maxDepositsPerBlock The maximum number of validators that can be deposited in a single block. - /// @param _minDepositBlockDistance The minimum distance between deposits in blocks. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function updateStakingModule( - uint256 _stakingModuleId, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - _updateStakingModule( - stakingModule, - _stakingModuleId, - _stakeShareLimit, - _priorityExitShareThreshold, - _stakingModuleFee, - _treasuryFee, - _maxDepositsPerBlock, - _minDepositBlockDistance - ); - } - - function _updateStakingModule( - StakingModule storage stakingModule, - uint256 _stakingModuleId, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) internal { - if (_stakeShareLimit > TOTAL_BASIS_POINTS) revert InvalidStakeShareLimit(); - if (_priorityExitShareThreshold > TOTAL_BASIS_POINTS) revert InvalidPriorityExitShareThreshold(); - if (_stakeShareLimit > _priorityExitShareThreshold) revert InvalidPriorityExitShareThreshold(); - if (_stakingModuleFee + _treasuryFee > TOTAL_BASIS_POINTS) revert InvalidFeeSum(); - if (_minDepositBlockDistance == 0 || _minDepositBlockDistance > type(uint64).max) revert InvalidMinDepositBlockDistance(); - if (_maxDepositsPerBlock > type(uint64).max) revert InvalidMaxDepositPerBlockValue(); - - stakingModule.stakeShareLimit = uint16(_stakeShareLimit); - stakingModule.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); - stakingModule.treasuryFee = uint16(_treasuryFee); - stakingModule.stakingModuleFee = uint16(_stakingModuleFee); - stakingModule.maxDepositsPerBlock = uint64(_maxDepositsPerBlock); - stakingModule.minDepositBlockDistance = uint64(_minDepositBlockDistance); - - emit StakingModuleShareLimitSet(_stakingModuleId, _stakeShareLimit, _priorityExitShareThreshold, msg.sender); - emit StakingModuleFeesSet(_stakingModuleId, _stakingModuleFee, _treasuryFee, msg.sender); - emit StakingModuleMaxDepositsPerBlockSet(_stakingModuleId, _maxDepositsPerBlock, msg.sender); - emit StakingModuleMinDepositBlockDistanceSet(_stakingModuleId, _minDepositBlockDistance, msg.sender); - } - - /// @notice Updates the limit of the validators that can be used for deposit. - /// @param _stakingModuleId Id of the staking module. - /// @param _nodeOperatorId Id of the node operator. - /// @param _targetLimitMode Target limit mode. - /// @param _targetLimit Target limit of the node operator. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function updateTargetValidatorsLimits( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - uint256 _targetLimitMode, - uint256 _targetLimit - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - _getIStakingModuleById(_stakingModuleId).updateTargetValidatorsLimits( - _nodeOperatorId, _targetLimitMode, _targetLimit - ); - } - - /// @notice Reports the minted rewards to the staking modules with the specified ids. - /// @param _stakingModuleIds Ids of the staking modules. - /// @param _totalShares Total shares minted for the staking modules. - /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. - function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) - external - onlyRole(REPORT_REWARDS_MINTED_ROLE) - { - _validateEqualArrayLengths(_stakingModuleIds.length, _totalShares.length); - - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - if (_totalShares[i] > 0) { - try _getIStakingModuleById(_stakingModuleIds[i]).onRewardsMinted(_totalShares[i]) {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onRewardsMinted() reverts because of the - /// "out of gas" error. Here we assume that the onRewardsMinted() method doesn't - /// have reverts with empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit RewardsMintedReportFailed( - _stakingModuleIds[i], - lowLevelRevertData - ); - } - } - - unchecked { - ++i; - } - } - } - - /// @notice Updates total numbers of exited validators for staking modules with the specified module ids. - /// @param _stakingModuleIds Ids of the staking modules to be updated. - /// @param _exitedValidatorsCounts New counts of exited validators for the specified staking modules. - /// @return The total increase in the aggregate number of exited validators across all updated modules. - /// - /// @dev The total numbers are stored in the staking router and can differ from the totals obtained by calling - /// `IStakingModule.getStakingModuleSummary()`. The overall process of updating validator counts is the following: - /// - /// 1. In the first data submission phase, the oracle calls `updateExitedValidatorsCountByStakingModule` on the - /// staking router, passing the totals by module. The staking router stores these totals and uses them to - /// distribute new stake and staking fees between the modules. There can only be single call of this function - /// per oracle reporting frame. - /// - /// 2. In the second part of the second data submission phase, the oracle calls - /// `StakingRouter.reportStakingModuleExitedValidatorsCountByNodeOperator` on the staking router which passes - /// the counts by node operator to the staking module by calling `IStakingModule.updateExitedValidatorsCount`. - /// This can be done multiple times for the same module, passing data for different subsets of node - /// operators. - /// - /// 3. At the end of the second data submission phase, it's expected for the aggregate exited validators count - /// across all module's node operators (stored in the module) to match the total count for this module - /// (stored in the staking router). However, it might happen that the second phase of data submission doesn't - /// finish until the new oracle reporting frame is started, in which case staking router will emit a warning - /// event `StakingModuleExitedValidatorsIncompleteReporting` when the first data submission phase is performed - /// for a new reporting frame. This condition will result in the staking module having an incomplete data about - /// the exited validator counts during the whole reporting frame. Handling this condition is - /// the responsibility of each staking module. - /// - /// 4. When the second reporting phase is finished, i.e. when the oracle submitted the complete data on the exited - /// validator counts per node operator for the current reporting frame, the oracle calls - /// `StakingRouter.onValidatorsCountsByNodeOperatorReportingFinished` which, in turn, calls - /// `IStakingModule.onExitedAndStuckValidatorsCountsUpdated` on all modules. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function updateExitedValidatorsCountByStakingModule( - uint256[] calldata _stakingModuleIds, - uint256[] calldata _exitedValidatorsCounts - ) - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - returns (uint256) - { - _validateEqualArrayLengths(_stakingModuleIds.length, _exitedValidatorsCounts.length); - - uint256 newlyExitedValidatorsCount; - - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - uint256 stakingModuleId = _stakingModuleIds[i]; - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(stakingModuleId)); - - uint256 prevReportedExitedValidatorsCount = stakingModule.exitedValidatorsCount; - if (_exitedValidatorsCounts[i] < prevReportedExitedValidatorsCount) { - revert ExitedValidatorsCountCannotDecrease(); - } - - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - /* uint256 depositableValidatorsCount */ - ) = _getStakingModuleSummary(IStakingModule(stakingModule.stakingModuleAddress)); - - if (_exitedValidatorsCounts[i] > totalDepositedValidators) { - revert ReportedExitedValidatorsExceedDeposited( - _exitedValidatorsCounts[i], - totalDepositedValidators - ); - } - - newlyExitedValidatorsCount += _exitedValidatorsCounts[i] - prevReportedExitedValidatorsCount; - - if (totalExitedValidators < prevReportedExitedValidatorsCount) { - // not all of the exited validators were async reported to the module - emit StakingModuleExitedValidatorsIncompleteReporting( - stakingModuleId, - prevReportedExitedValidatorsCount - totalExitedValidators - ); - } - - stakingModule.exitedValidatorsCount = _exitedValidatorsCounts[i]; - - unchecked { - ++i; - } - } - - return newlyExitedValidatorsCount; - } - - /// @notice Updates exited validators counts per node operator for the staking module with - /// the specified id. See the docs for `updateExitedValidatorsCountByStakingModule` for the - /// description of the overall update process. - /// - /// @param _stakingModuleId The id of the staking modules to be updated. - /// @param _nodeOperatorIds Ids of the node operators to be updated. - /// @param _exitedValidatorsCounts New counts of exited validators for the specified node operators. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function reportStakingModuleExitedValidatorsCountByNodeOperator( - uint256 _stakingModuleId, - bytes calldata _nodeOperatorIds, - bytes calldata _exitedValidatorsCounts - ) - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - { - _checkValidatorsByNodeOperatorReportData(_nodeOperatorIds, _exitedValidatorsCounts); - _getIStakingModuleById(_stakingModuleId).updateExitedValidatorsCount(_nodeOperatorIds, _exitedValidatorsCounts); - } - - struct ValidatorsCountsCorrection { - /// @notice The expected current number of exited validators of the module that is - /// being corrected. - uint256 currentModuleExitedValidatorsCount; - /// @notice The expected current number of exited validators of the node operator - /// that is being corrected. - uint256 currentNodeOperatorExitedValidatorsCount; - /// @notice The corrected number of exited validators of the module. - uint256 newModuleExitedValidatorsCount; - /// @notice The corrected number of exited validators of the node operator. - uint256 newNodeOperatorExitedValidatorsCount; - } - - /// @notice Sets exited validators count for the given module and given node operator in that module - /// without performing critical safety checks, e.g. that exited validators count cannot decrease. - /// - /// Should only be used by the DAO in extreme cases and with sufficient precautions to correct invalid - /// data reported by the oracle committee due to a bug in the oracle daemon. - /// - /// @param _stakingModuleId Id of the staking module. - /// @param _nodeOperatorId Id of the node operator. - /// @param _triggerUpdateFinish Whether to call `onExitedAndStuckValidatorsCountsUpdated` on the module - /// after applying the corrections. - /// @param _correction See the docs for the `ValidatorsCountsCorrection` struct. - /// - /// @dev Reverts if the current numbers of exited validators of the module and node operator - /// don't match the supplied expected current values. - /// - /// @dev The function is restricted to the `UNSAFE_SET_EXITED_VALIDATORS_ROLE` role. - function unsafeSetExitedValidatorsCount( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - bool _triggerUpdateFinish, - ValidatorsCountsCorrection memory _correction - ) - external - onlyRole(UNSAFE_SET_EXITED_VALIDATORS_ROLE) - { - StakingModule storage stakingModuleState = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - IStakingModule stakingModule = IStakingModule(stakingModuleState.stakingModuleAddress); - - ( - /* uint256 targetLimitMode */, - /* uint256 targetValidatorsCount */, - /* uint256 stuckValidatorsCount, */, - /* uint256 refundedValidatorsCount */, - /* uint256 stuckPenaltyEndTimestamp */, - uint256 totalExitedValidators, - /* uint256 totalDepositedValidators */, - /* uint256 depositableValidatorsCount */ - ) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); - - if (_correction.currentModuleExitedValidatorsCount != stakingModuleState.exitedValidatorsCount || - _correction.currentNodeOperatorExitedValidatorsCount != totalExitedValidators - ) { - revert UnexpectedCurrentValidatorsCount( - stakingModuleState.exitedValidatorsCount, - totalExitedValidators - ); - } - - stakingModuleState.exitedValidatorsCount = _correction.newModuleExitedValidatorsCount; - - stakingModule.unsafeUpdateValidatorsCount( - _nodeOperatorId, - _correction.newNodeOperatorExitedValidatorsCount - ); - - ( - uint256 moduleTotalExitedValidators, - uint256 moduleTotalDepositedValidators, - ) = _getStakingModuleSummary(stakingModule); - - if (_correction.newModuleExitedValidatorsCount > moduleTotalDepositedValidators) { - revert ReportedExitedValidatorsExceedDeposited( - _correction.newModuleExitedValidatorsCount, - moduleTotalDepositedValidators - ); - } - - if (_triggerUpdateFinish) { - if (moduleTotalExitedValidators != _correction.newModuleExitedValidatorsCount) { - revert UnexpectedFinalExitedValidatorsCount( - moduleTotalExitedValidators, - _correction.newModuleExitedValidatorsCount - ); - } - - stakingModule.onExitedAndStuckValidatorsCountsUpdated(); - } - } - - /// @notice Finalizes the reporting of the exited validators counts for the current - /// reporting frame. - /// - /// @dev Called by the oracle when the second phase of data reporting finishes, i.e. when the - /// oracle submitted the complete data on the exited validator counts per node operator - /// for the current reporting frame. See the docs for `updateExitedValidatorsCountByStakingModule` - /// for the description of the overall update process. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function onValidatorsCountsByNodeOperatorReportingFinished() - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - { - uint256 stakingModulesCount = getStakingModulesCount(); - StakingModule storage stakingModule; - IStakingModule moduleContract; - - for (uint256 i; i < stakingModulesCount; ) { - stakingModule = _getStakingModuleByIndex(i); - moduleContract = IStakingModule(stakingModule.stakingModuleAddress); - - (uint256 exitedValidatorsCount, , ) = _getStakingModuleSummary(moduleContract); - if (exitedValidatorsCount == stakingModule.exitedValidatorsCount) { - // oracle finished updating exited validators for all node ops - try moduleContract.onExitedAndStuckValidatorsCountsUpdated() {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onExitedAndStuckValidatorsCountsUpdated() - /// reverts because of the "out of gas" error. Here we assume that the - /// onExitedAndStuckValidatorsCountsUpdated() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit ExitedAndStuckValidatorsCountsUpdateFailed( - stakingModule.id, - lowLevelRevertData - ); - } - } - - unchecked { - ++i; - } - } - } - - /// @notice Decreases vetted signing keys counts per node operator for the staking module with - /// the specified id. - /// @param _stakingModuleId The id of the staking module to be updated. - /// @param _nodeOperatorIds Ids of the node operators to be updated. - /// @param _vettedSigningKeysCounts New counts of vetted signing keys for the specified node operators. - /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. - function decreaseStakingModuleVettedKeysCountByNodeOperator( - uint256 _stakingModuleId, - bytes calldata _nodeOperatorIds, - bytes calldata _vettedSigningKeysCounts - ) external onlyRole(STAKING_MODULE_UNVETTING_ROLE) { - _checkValidatorsByNodeOperatorReportData(_nodeOperatorIds, _vettedSigningKeysCounts); - _getIStakingModuleById(_stakingModuleId).decreaseVettedSigningKeysCount(_nodeOperatorIds, _vettedSigningKeysCounts); - } - - /// @notice Returns all registered staking modules. - /// @return res Array of staking modules. - function getStakingModules() external view returns (StakingModule[] memory res) { - uint256 stakingModulesCount = getStakingModulesCount(); - res = new StakingModule[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - res[i] = _getStakingModuleByIndex(i); - - unchecked { - ++i; - } - } - } - - /// @notice Returns the ids of all registered staking modules. - /// @return stakingModuleIds Array of staking module ids. - function getStakingModuleIds() public view returns (uint256[] memory stakingModuleIds) { - uint256 stakingModulesCount = getStakingModulesCount(); - stakingModuleIds = new uint256[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - stakingModuleIds[i] = _getStakingModuleByIndex(i).id; - - unchecked { - ++i; - } - } - } - - /// @notice Returns the staking module by its id. - /// @param _stakingModuleId Id of the staking module. - /// @return Staking module data. - function getStakingModule(uint256 _stakingModuleId) - public - view - returns (StakingModule memory) - { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - } - - /// @notice Returns total number of staking modules. - /// @return Total number of staking modules. - function getStakingModulesCount() public view returns (uint256) { - return STAKING_MODULES_COUNT_POSITION.getStorageUint256(); - } - - /// @notice Returns true if staking module with the given id was registered via `addStakingModule`, false otherwise. - /// @param _stakingModuleId Id of the staking module. - /// @return True if staking module with the given id was registered, false otherwise. - function hasStakingModule(uint256 _stakingModuleId) external view returns (bool) { - return _getStorageStakingIndicesMapping()[_stakingModuleId] != 0; - } - - /// @notice Returns status of staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Status of the staking module. - function getStakingModuleStatus(uint256 _stakingModuleId) - public - view - returns (StakingModuleStatus) - { - return StakingModuleStatus(_getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).status); - } - - /// @notice A summary of the staking module's validators. - struct StakingModuleSummary { - /// @notice The total number of validators in the EXITED state on the Consensus Layer. - /// @dev This value can't decrease in normal conditions. - uint256 totalExitedValidators; - - /// @notice The total number of validators deposited via the official Deposit Contract. - /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this - /// counter is not decreasing. - uint256 totalDepositedValidators; - - /// @notice The number of validators in the set available for deposit - uint256 depositableValidatorsCount; - } - - /// @notice A summary of node operator and its validators. - struct NodeOperatorSummary { - /// @notice Shows whether the current target limit applied to the node operator. - uint256 targetLimitMode; - - /// @notice Relative target active validators limit for operator. - uint256 targetValidatorsCount; - - /// @notice The number of validators with an expired request to exit time. - /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. - uint256 stuckValidatorsCount; - - /// @notice The number of validators that can't be withdrawn, but deposit costs were - /// compensated to the Lido by the node operator. - /// @dev [deprecated] Refunded validators processing has been removed, this field is no longer used. - uint256 refundedValidatorsCount; - - /// @notice A time when the penalty for stuck validators stops applying to node operator rewards. - /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. - uint256 stuckPenaltyEndTimestamp; - - /// @notice The total number of validators in the EXITED state on the Consensus Layer. - /// @dev This value can't decrease in normal conditions. - uint256 totalExitedValidators; - - /// @notice The total number of validators deposited via the official Deposit Contract. - /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this - /// counter is not decreasing. - uint256 totalDepositedValidators; - - /// @notice The number of validators in the set available for deposit. - uint256 depositableValidatorsCount; - } - - /// @notice Returns all-validators summary in the staking module. - /// @param _stakingModuleId Id of the staking module to return summary for. - /// @return summary Staking module summary. - function getStakingModuleSummary(uint256 _stakingModuleId) - public - view - returns (StakingModuleSummary memory summary) - { - IStakingModule stakingModule = IStakingModule(getStakingModule(_stakingModuleId).stakingModuleAddress); - ( - summary.totalExitedValidators, - summary.totalDepositedValidators, - summary.depositableValidatorsCount - ) = _getStakingModuleSummary(stakingModule); - } - - - /// @notice Returns node operator summary from the staking module. - /// @param _stakingModuleId Id of the staking module where node operator is onboarded. - /// @param _nodeOperatorId Id of the node operator to return summary for. - /// @return summary Node operator summary. - function getNodeOperatorSummary(uint256 _stakingModuleId, uint256 _nodeOperatorId) - public - view - returns (NodeOperatorSummary memory summary) - { - IStakingModule stakingModule = IStakingModule(getStakingModule(_stakingModuleId).stakingModuleAddress); - /// @dev using intermediate variables below due to "Stack too deep" error in case of - /// assigning directly into the NodeOperatorSummary struct - ( - uint256 targetLimitMode, - uint256 targetValidatorsCount, - /* uint256 stuckValidatorsCount */, - /* uint256 refundedValidatorsCount */, - /* uint256 stuckPenaltyEndTimestamp */, - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); - summary.targetLimitMode = targetLimitMode; - summary.targetValidatorsCount = targetValidatorsCount; - summary.totalExitedValidators = totalExitedValidators; - summary.totalDepositedValidators = totalDepositedValidators; - summary.depositableValidatorsCount = depositableValidatorsCount; - } - - /// @notice A collection of the staking module data stored across the StakingRouter and the - /// staking module contract. - /// - /// @dev This data, first of all, is designed for off-chain usage and might be redundant for - /// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. - struct StakingModuleDigest { - /// @notice The number of node operators registered in the staking module. - uint256 nodeOperatorsCount; - /// @notice The number of node operators registered in the staking module in active state. - uint256 activeNodeOperatorsCount; - /// @notice The current state of the staking module taken from the StakingRouter. - StakingModule state; - /// @notice A summary of the staking module's validators. - StakingModuleSummary summary; - } - - /// @notice A collection of the node operator data stored in the staking module. - /// @dev This data, first of all, is designed for off-chain usage and might be redundant for - /// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. - struct NodeOperatorDigest { - /// @notice Id of the node operator. - uint256 id; - /// @notice Shows whether the node operator is active or not. - bool isActive; - /// @notice A summary of node operator and its validators. - NodeOperatorSummary summary; - } - - /// @notice Returns staking module digest for each staking module registered in the staking router. - /// @return Array of staking module digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getAllStakingModuleDigests() external view returns (StakingModuleDigest[] memory) { - return getStakingModuleDigests(getStakingModuleIds()); - } - - /// @notice Returns staking module digest for passed staking module ids. - /// @param _stakingModuleIds Ids of the staking modules to return data for. - /// @return digests Array of staking module digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getStakingModuleDigests(uint256[] memory _stakingModuleIds) - public - view - returns (StakingModuleDigest[] memory digests) - { - digests = new StakingModuleDigest[](_stakingModuleIds.length); - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - StakingModule memory stakingModuleState = getStakingModule(_stakingModuleIds[i]); - IStakingModule stakingModule = IStakingModule(stakingModuleState.stakingModuleAddress); - digests[i] = StakingModuleDigest({ - nodeOperatorsCount: stakingModule.getNodeOperatorsCount(), - activeNodeOperatorsCount: stakingModule.getActiveNodeOperatorsCount(), - state: stakingModuleState, - summary: getStakingModuleSummary(_stakingModuleIds[i]) - }); - - unchecked { - ++i; - } - } - } - - /// @notice Returns node operator digest for each node operator registered in the given staking module. - /// @param _stakingModuleId Id of the staking module to return data for. - /// @return Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getAllNodeOperatorDigests(uint256 _stakingModuleId) external view returns (NodeOperatorDigest[] memory) { - return getNodeOperatorDigests( - _stakingModuleId, 0, _getIStakingModuleById(_stakingModuleId).getNodeOperatorsCount() - ); - } - - /// @notice Returns node operator digest for passed node operator ids in the given staking module. - /// @param _stakingModuleId Id of the staking module where node operators registered. - /// @param _offset Node operators offset starting with 0. - /// @param _limit The max number of node operators to return. - /// @return Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getNodeOperatorDigests( - uint256 _stakingModuleId, - uint256 _offset, - uint256 _limit - ) public view returns (NodeOperatorDigest[] memory) { - return getNodeOperatorDigests( - _stakingModuleId, _getIStakingModuleById(_stakingModuleId).getNodeOperatorIds(_offset, _limit) - ); - } - - /// @notice Returns node operator digest for a slice of node operators registered in the given - /// staking module. - /// @param _stakingModuleId Id of the staking module where node operators registered. - /// @param _nodeOperatorIds Ids of the node operators to return data for. - /// @return digests Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getNodeOperatorDigests(uint256 _stakingModuleId, uint256[] memory _nodeOperatorIds) - public - view - returns (NodeOperatorDigest[] memory digests) - { - IStakingModule stakingModule = _getIStakingModuleById(_stakingModuleId); - digests = new NodeOperatorDigest[](_nodeOperatorIds.length); - for (uint256 i = 0; i < _nodeOperatorIds.length; ) { - digests[i] = NodeOperatorDigest({ - id: _nodeOperatorIds[i], - isActive: stakingModule.getNodeOperatorIsActive(_nodeOperatorIds[i]), - summary: getNodeOperatorSummary(_stakingModuleId, _nodeOperatorIds[i]) - }); - - unchecked { - ++i; - } - } - } - - /// @notice Sets the staking module status flag for participation in further deposits and/or reward distribution. - /// @param _stakingModuleId Id of the staking module to be updated. - /// @param _status New status of the staking module. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function setStakingModuleStatus( - uint256 _stakingModuleId, - StakingModuleStatus _status - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - if (StakingModuleStatus(stakingModule.status) == _status) revert StakingModuleStatusTheSame(); - _setStakingModuleStatus(stakingModule, _status); - } - - /// @notice Returns whether the staking module is stopped. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the staking module is stopped, false otherwise. - function getStakingModuleIsStopped(uint256 _stakingModuleId) external view returns (bool) - { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Stopped; - } - - /// @notice Returns whether the deposits are paused for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the deposits are paused, false otherwise. - function getStakingModuleIsDepositsPaused(uint256 _stakingModuleId) - external - view - returns (bool) - { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.DepositsPaused; - } - - /// @notice Returns whether the staking module is active. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the staking module is active, false otherwise. - function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool) { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Active; - } - - /// @notice Returns staking module nonce. - /// @param _stakingModuleId Id of the staking module. - /// @return Staking module nonce. - function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256) { - return _getIStakingModuleById(_stakingModuleId).getNonce(); - } - - /// @notice Returns the last deposit block for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Last deposit block for the staking module. - function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) - external - view - returns (uint256) - { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).lastDepositBlock; - } - - /// @notice Returns the min deposit block distance for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Min deposit block distance for the staking module. - function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).minDepositBlockDistance; - } - - /// @notice Returns the max deposits count per block for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Max deposits count per block for the staking module. - function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).maxDepositsPerBlock; - } - - /// @notice Returns active validators count for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return activeValidatorsCount Active validators count for the staking module. - function getStakingModuleActiveValidatorsCount(uint256 _stakingModuleId) - external - view - returns (uint256 activeValidatorsCount) - { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - /* uint256 depositableValidatorsCount */ - ) = _getStakingModuleSummary(IStakingModule(stakingModule.stakingModuleAddress)); - - activeValidatorsCount = totalDepositedValidators - Math256.max( - stakingModule.exitedValidatorsCount, totalExitedValidators - ); - } - - /// @notice Returns the max count of deposits which the staking module can provide data for based - /// on the passed `_maxDepositsValue` amount. - /// @param _stakingModuleId Id of the staking module to be deposited. - /// @param _maxDepositsValue Max amount of ether that might be used for deposits count calculation. - /// @return Max number of deposits might be done using the given staking module. - function getStakingModuleMaxDepositsCount(uint256 _stakingModuleId, uint256 _maxDepositsValue) - public - view - returns (uint256) - { - ( - /* uint256 allocated */, - uint256[] memory newDepositsAllocation, - StakingModuleCache[] memory stakingModulesCache - ) = _getDepositsAllocation(_maxDepositsValue / DEPOSIT_SIZE); - uint256 stakingModuleIndex = _getStakingModuleIndexById(_stakingModuleId); - return - newDepositsAllocation[stakingModuleIndex] - stakingModulesCache[stakingModuleIndex].activeValidatorsCount; - } - - /// @notice Returns the aggregate fee distribution proportion. - /// @return modulesFee Modules aggregate fee in base precision. - /// @return treasuryFee Treasury fee in base precision. - /// @return basePrecision Base precision: a value corresponding to the full fee. - function getStakingFeeAggregateDistribution() public view returns ( - uint96 modulesFee, - uint96 treasuryFee, - uint256 basePrecision - ) { - uint96[] memory moduleFees; - uint96 totalFee; - (, , moduleFees, totalFee, basePrecision) = getStakingRewardsDistribution(); - for (uint256 i; i < moduleFees.length; ) { - modulesFee += moduleFees[i]; - - unchecked { - ++i; - } - } - treasuryFee = totalFee - modulesFee; - } - - /// @notice Return shares table. - /// @return recipients Rewards recipient addresses corresponding to each module. - /// @return stakingModuleIds Module IDs. - /// @return stakingModuleFees Fee of each recipient. - /// @return totalFee Total fee to mint for each staking module and treasury. - /// @return precisionPoints Base precision number, which constitutes 100% fee. - function getStakingRewardsDistribution() - public - view - returns ( - address[] memory recipients, - uint256[] memory stakingModuleIds, - uint96[] memory stakingModuleFees, - uint96 totalFee, - uint256 precisionPoints - ) - { - (uint256 totalActiveValidators, StakingModuleCache[] memory stakingModulesCache) = _loadStakingModulesCache(); - uint256 stakingModulesCount = stakingModulesCache.length; - - /// @dev Return empty response if there are no staking modules or active validators yet. - if (stakingModulesCount == 0 || totalActiveValidators == 0) { - return (new address[](0), new uint256[](0), new uint96[](0), 0, FEE_PRECISION_POINTS); - } - - precisionPoints = FEE_PRECISION_POINTS; - stakingModuleIds = new uint256[](stakingModulesCount); - recipients = new address[](stakingModulesCount); - stakingModuleFees = new uint96[](stakingModulesCount); - - uint256 rewardedStakingModulesCount = 0; - uint256 stakingModuleValidatorsShare; - uint96 stakingModuleFee; - - for (uint256 i; i < stakingModulesCount; ) { - /// @dev Skip staking modules which have no active validators. - if (stakingModulesCache[i].activeValidatorsCount > 0) { - stakingModuleIds[rewardedStakingModulesCount] = stakingModulesCache[i].stakingModuleId; - stakingModuleValidatorsShare = ((stakingModulesCache[i].activeValidatorsCount * precisionPoints) / totalActiveValidators); - - recipients[rewardedStakingModulesCount] = address(stakingModulesCache[i].stakingModuleAddress); - stakingModuleFee = uint96((stakingModuleValidatorsShare * stakingModulesCache[i].stakingModuleFee) / TOTAL_BASIS_POINTS); - /// @dev If the staking module has the `Stopped` status for some reason, then - /// the staking module's rewards go to the treasury, so that the DAO has ability - /// to manage them (e.g. to compensate the staking module in case of an error, etc.) - if (stakingModulesCache[i].status != StakingModuleStatus.Stopped) { - stakingModuleFees[rewardedStakingModulesCount] = stakingModuleFee; - } - // Else keep stakingModuleFees[rewardedStakingModulesCount] = 0, but increase totalFee. - - totalFee += (uint96((stakingModuleValidatorsShare * stakingModulesCache[i].treasuryFee) / TOTAL_BASIS_POINTS) + stakingModuleFee); - - unchecked { - rewardedStakingModulesCount++; - } - } - - unchecked { - ++i; - } - } - - // Total fee never exceeds 100%. - assert(totalFee <= precisionPoints); - - /// @dev Shrink arrays. - if (rewardedStakingModulesCount < stakingModulesCount) { - assembly { - mstore(stakingModuleIds, rewardedStakingModulesCount) - mstore(recipients, rewardedStakingModulesCount) - mstore(stakingModuleFees, rewardedStakingModulesCount) - } - } - } - - /// @notice Returns the same as getStakingRewardsDistribution() but in reduced, 1e4 precision (DEPRECATED). - /// @dev Helper only for Lido contract. Use getStakingRewardsDistribution() instead. - /// @return totalFee Total fee to mint for each staking module and treasury in reduced, 1e4 precision. - function getTotalFeeE4Precision() external view returns (uint16 totalFee) { - /// @dev The logic is placed here but in Lido contract to save Lido bytecode. - (, , , uint96 totalFeeInHighPrecision, uint256 precision) = getStakingRewardsDistribution(); - // Here we rely on (totalFeeInHighPrecision <= precision). - totalFee = _toE4Precision(totalFeeInHighPrecision, precision); - } - - /// @notice Returns the same as getStakingFeeAggregateDistribution() but in reduced, 1e4 precision (DEPRECATED). - /// @dev Helper only for Lido contract. Use getStakingFeeAggregateDistribution() instead. - /// @return modulesFee Modules aggregate fee in reduced, 1e4 precision. - /// @return treasuryFee Treasury fee in reduced, 1e4 precision. - function getStakingFeeAggregateDistributionE4Precision() - external view - returns (uint16 modulesFee, uint16 treasuryFee) - { - /// @dev The logic is placed here but in Lido contract to save Lido bytecode. - ( - uint256 modulesFeeHighPrecision, - uint256 treasuryFeeHighPrecision, - uint256 precision - ) = getStakingFeeAggregateDistribution(); - // Here we rely on ({modules,treasury}FeeHighPrecision <= precision). - modulesFee = _toE4Precision(modulesFeeHighPrecision, precision); - treasuryFee = _toE4Precision(treasuryFeeHighPrecision, precision); - } - - /// @notice Returns new deposits allocation after the distribution of the `_depositsCount` deposits. - /// @param _depositsCount The maximum number of deposits to be allocated. - /// @return allocated Number of deposits allocated to the staking modules. - /// @return allocations Array of new deposits allocation to the staking modules. - function getDepositsAllocation(uint256 _depositsCount) external view returns (uint256 allocated, uint256[] memory allocations) { - (allocated, allocations, ) = _getDepositsAllocation(_depositsCount); - } - - /// @notice Invokes a deposit call to the official Deposit contract. - /// @param _depositsCount Number of deposits to make. - /// @param _stakingModuleId Id of the staking module to be deposited. - /// @param _depositCalldata Staking module calldata. - /// @dev Only the Lido contract is allowed to call this method. - function deposit( - uint256 _depositsCount, - uint256 _stakingModuleId, - bytes calldata _depositCalldata - ) external payable { - if (msg.sender != LIDO_POSITION.getStorageAddress()) revert AppAuthLidoFailed(); - - bytes32 withdrawalCredentials = getWithdrawalCredentials(); - if (withdrawalCredentials == 0) revert EmptyWithdrawalsCredentials(); - - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - if (StakingModuleStatus(stakingModule.status) != StakingModuleStatus.Active) - revert StakingModuleNotActive(); - - /// @dev Firstly update the local state of the contract to prevent a reentrancy attack - /// even though the staking modules are trusted contracts. - uint256 depositsValue = msg.value; - if (depositsValue != _depositsCount * DEPOSIT_SIZE) revert InvalidDepositsValue(depositsValue, _depositsCount); - - _updateModuleLastDepositState(stakingModule, _stakingModuleId, depositsValue); - - if (_depositsCount > 0) { - (bytes memory publicKeysBatch, bytes memory signaturesBatch) = - IStakingModule(stakingModule.stakingModuleAddress) - .obtainDepositData(_depositsCount, _depositCalldata); - - uint256 etherBalanceBeforeDeposits = address(this).balance; - _makeBeaconChainDeposits32ETH( - _depositsCount, - abi.encodePacked(withdrawalCredentials), - publicKeysBatch, - signaturesBatch - ); - uint256 etherBalanceAfterDeposits = address(this).balance; - - /// @dev All sent ETH must be deposited and self balance stay the same. - assert(etherBalanceBeforeDeposits - etherBalanceAfterDeposits == depositsValue); - } - } - - /// @notice Set credentials to withdraw ETH on Consensus Layer side. - /// @param _withdrawalCredentials withdrawal credentials field as defined in the Consensus Layer specs. - /// @dev Note that setWithdrawalCredentials discards all unused deposits data as the signatures are invalidated. - /// @dev The function is restricted to the `MANAGE_WITHDRAWAL_CREDENTIALS_ROLE` role. - function setWithdrawalCredentials(bytes32 _withdrawalCredentials) external onlyRole(MANAGE_WITHDRAWAL_CREDENTIALS_ROLE) { - WITHDRAWAL_CREDENTIALS_POSITION.setStorageBytes32(_withdrawalCredentials); - - uint256 stakingModulesCount = getStakingModulesCount(); - for (uint256 i; i < stakingModulesCount; ) { - StakingModule storage stakingModule = _getStakingModuleByIndex(i); - - unchecked { - ++i; - } - - try IStakingModule(stakingModule.stakingModuleAddress) - .onWithdrawalCredentialsChanged() {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onWithdrawalCredentialsChanged() - /// reverts because of the "out of gas" error. Here we assume that the - /// onWithdrawalCredentialsChanged() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - _setStakingModuleStatus(stakingModule, StakingModuleStatus.DepositsPaused); - emit WithdrawalsCredentialsChangeFailed(stakingModule.id, lowLevelRevertData); - } - } - - emit WithdrawalCredentialsSet(_withdrawalCredentials, msg.sender); - } - - /// @notice Returns current credentials to withdraw ETH on Consensus Layer side. - /// @return Withdrawal credentials. - function getWithdrawalCredentials() public view returns (bytes32) { - return WITHDRAWAL_CREDENTIALS_POSITION.getStorageBytes32(); - } - - function _checkValidatorsByNodeOperatorReportData( - bytes calldata _nodeOperatorIds, - bytes calldata _validatorsCounts - ) internal pure { - if (_nodeOperatorIds.length % 8 != 0 || _validatorsCounts.length % 16 != 0) { - revert InvalidReportData(3); - } - uint256 nodeOperatorsCount = _nodeOperatorIds.length / 8; - if (_validatorsCounts.length / 16 != nodeOperatorsCount) { - revert InvalidReportData(2); - } - if (nodeOperatorsCount == 0) { - revert InvalidReportData(1); - } - } - - /// @dev Save the last deposit state for the staking module and emit the event - /// @param stakingModule staking module storage ref - /// @param stakingModuleId id of the staking module to be deposited - /// @param depositsValue value to deposit - function _updateModuleLastDepositState( - StakingModule storage stakingModule, - uint256 stakingModuleId, - uint256 depositsValue - ) internal { - stakingModule.lastDepositAt = uint64(block.timestamp); - stakingModule.lastDepositBlock = block.number; - emit StakingRouterETHDeposited(stakingModuleId, depositsValue); - } - - - /// @dev Loads modules into a memory cache. - /// @return totalActiveValidators Total active validators across all modules. - /// @return stakingModulesCache Array of StakingModuleCache structs. - function _loadStakingModulesCache() internal view returns ( - uint256 totalActiveValidators, - StakingModuleCache[] memory stakingModulesCache - ) { - uint256 stakingModulesCount = getStakingModulesCount(); - stakingModulesCache = new StakingModuleCache[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - stakingModulesCache[i] = _loadStakingModulesCacheItem(i); - totalActiveValidators += stakingModulesCache[i].activeValidatorsCount; - - unchecked { - ++i; - } - } - } - - function _loadStakingModulesCacheItem(uint256 _stakingModuleIndex) - internal - view - returns (StakingModuleCache memory cacheItem) - { - StakingModule storage stakingModuleData = _getStakingModuleByIndex(_stakingModuleIndex); - - cacheItem.stakingModuleAddress = stakingModuleData.stakingModuleAddress; - cacheItem.stakingModuleId = stakingModuleData.id; - cacheItem.stakingModuleFee = stakingModuleData.stakingModuleFee; - cacheItem.treasuryFee = stakingModuleData.treasuryFee; - cacheItem.stakeShareLimit = stakingModuleData.stakeShareLimit; - cacheItem.status = StakingModuleStatus(stakingModuleData.status); - - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ) = _getStakingModuleSummary(IStakingModule(cacheItem.stakingModuleAddress)); - - cacheItem.availableValidatorsCount = cacheItem.status == StakingModuleStatus.Active - ? depositableValidatorsCount - : 0; - - // The module might not receive all exited validators data yet => we need to replacing - // the exitedValidatorsCount with the one that the staking router is aware of. - cacheItem.activeValidatorsCount = - totalDepositedValidators - - Math256.max(totalExitedValidators, stakingModuleData.exitedValidatorsCount); - } - - function _setStakingModuleStatus(StakingModule storage _stakingModule, StakingModuleStatus _status) internal { - StakingModuleStatus prevStatus = StakingModuleStatus(_stakingModule.status); - if (prevStatus != _status) { - _stakingModule.status = uint8(_status); - emit StakingModuleStatusSet(_stakingModule.id, _status, msg.sender); - } - } - - function _getDepositsAllocation( - uint256 _depositsToAllocate - ) internal view returns (uint256 allocated, uint256[] memory allocations, StakingModuleCache[] memory stakingModulesCache) { - // Calculate total used validators for operators. - uint256 totalActiveValidators; - - (totalActiveValidators, stakingModulesCache) = _loadStakingModulesCache(); - - uint256 stakingModulesCount = stakingModulesCache.length; - allocations = new uint256[](stakingModulesCount); - if (stakingModulesCount > 0) { - /// @dev New estimated active validators count. - totalActiveValidators += _depositsToAllocate; - uint256[] memory capacities = new uint256[](stakingModulesCount); - uint256 targetValidators; - - for (uint256 i; i < stakingModulesCount; ) { - allocations[i] = stakingModulesCache[i].activeValidatorsCount; - targetValidators = (stakingModulesCache[i].stakeShareLimit * totalActiveValidators) / TOTAL_BASIS_POINTS; - capacities[i] = Math256.min(targetValidators, stakingModulesCache[i].activeValidatorsCount + stakingModulesCache[i].availableValidatorsCount); - - unchecked { - ++i; - } - } - - (allocated, allocations) = MinFirstAllocationStrategy.allocate(allocations, capacities, _depositsToAllocate); - } - } - - function _getStakingModuleIndexById(uint256 _stakingModuleId) internal view returns (uint256) { - mapping(uint256 => uint256) storage _stakingModuleIndicesOneBased = _getStorageStakingIndicesMapping(); - uint256 indexOneBased = _stakingModuleIndicesOneBased[_stakingModuleId]; - if (indexOneBased == 0) revert StakingModuleUnregistered(); - return indexOneBased - 1; - } - - function _setStakingModuleIndexById(uint256 _stakingModuleId, uint256 _stakingModuleIndex) internal { - mapping(uint256 => uint256) storage _stakingModuleIndicesOneBased = _getStorageStakingIndicesMapping(); - _stakingModuleIndicesOneBased[_stakingModuleId] = _stakingModuleIndex + 1; - } - - function _getIStakingModuleById(uint256 _stakingModuleId) internal view returns (IStakingModule) { - return IStakingModule(_getStakingModuleAddressById(_stakingModuleId)); - } - - function _getStakingModuleByIndex(uint256 _stakingModuleIndex) internal view returns (StakingModule storage) { - mapping(uint256 => StakingModule) storage _stakingModules = _getStorageStakingModulesMapping(); - return _stakingModules[_stakingModuleIndex]; - } - - function _getStakingModuleAddressById(uint256 _stakingModuleId) internal view returns (address) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).stakingModuleAddress; - } - - function _getStorageStakingModulesMapping() internal pure returns (mapping(uint256 => StakingModule) storage result) { - bytes32 position = STAKING_MODULES_MAPPING_POSITION; - assembly { - result.slot := position - } - } - - function _getStorageStakingIndicesMapping() internal pure returns (mapping(uint256 => uint256) storage result) { - bytes32 position = STAKING_MODULE_INDICES_MAPPING_POSITION; - assembly { - result.slot := position - } - } - - function _toE4Precision(uint256 _value, uint256 _precision) internal pure returns (uint16) { - return uint16((_value * TOTAL_BASIS_POINTS) / _precision); - } - - function _validateEqualArrayLengths(uint256 firstArrayLength, uint256 secondArrayLength) internal pure { - if (firstArrayLength != secondArrayLength) { - revert ArraysLengthMismatch(firstArrayLength, secondArrayLength); - } - } - - /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. - function _getStakingModuleSummary(IStakingModule stakingModule) internal view returns (uint256, uint256, uint256) { - return stakingModule.getStakingModuleSummary(); - } - - /// @notice Handles tracking and penalization logic for a node operator who failed to exit their validator within the defined exit window. - /// @dev This function is called to report the current exit-related status of a validator belonging to a specific node operator. - /// It accepts a validator's public key, associated with the duration (in seconds) it was eligible to exit but has not exited. - /// This data could be used to trigger penalties for the node operator if the validator has been non-exiting for too long. - /// @param _stakingModuleId The ID of the staking module. - /// @param _nodeOperatorId The ID of the node operator whose validator status is being delivered. - /// @param _proofSlotTimestamp The timestamp (slot time) when the validator was last known to be in an active ongoing state. - /// @param _publicKey The public key of the validator being reported. - /// @param _eligibleToExitInSec The duration (in seconds) indicating how long the validator has been eligible to exit after request but has not exited. - function reportValidatorExitDelay( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - uint256 _proofSlotTimestamp, - bytes calldata _publicKey, - uint256 _eligibleToExitInSec - ) - external - onlyRole(REPORT_VALIDATOR_EXITING_STATUS_ROLE) - { - _getIStakingModuleById(_stakingModuleId).reportValidatorExitDelay( - _nodeOperatorId, - _proofSlotTimestamp, - _publicKey, - _eligibleToExitInSec - ); - } - - /// @notice Handles the triggerable exit event for a set of validators. - /// @dev This function is called when validators are exited using triggerable exit requests on the Execution Layer. - /// @param validatorExitData An array of `ValidatorExitData` structs, each representing a validator - /// for which a triggerable exit was requested. Each entry includes: - /// - `stakingModuleId`: ID of the staking module. - /// - `nodeOperatorId`: ID of the node operator. - /// - `pubkey`: Validator public key, 48 bytes length. - /// @param _withdrawalRequestPaidFee Fee amount paid to send a withdrawal request on the Execution Layer (EL). - /// @param _exitType The type of exit being performed. - /// This parameter may be interpreted differently across various staking modules depending on their specific implementation. - function onValidatorExitTriggered( - ValidatorExitData[] calldata validatorExitData, - uint256 _withdrawalRequestPaidFee, - uint256 _exitType - ) - external - onlyRole(REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE) - { - ValidatorExitData calldata data; - for (uint256 i = 0; i < validatorExitData.length; ++i) { - data = validatorExitData[i]; - - try _getIStakingModuleById(data.stakingModuleId).onValidatorExitTriggered( - data.nodeOperatorId, - data.pubkey, - _withdrawalRequestPaidFee, - _exitType - ) - {} catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onValidatorExitTriggered() - /// reverts because of the "out of gas" error. Here we assume that the - /// onValidatorExitTriggered() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit StakingModuleExitNotificationFailed(data.stakingModuleId, data.nodeOperatorId, data.pubkey); - } - } - } -} diff --git a/contracts/0.8.9/WithdrawalVault.sol b/contracts/0.8.9/WithdrawalVault.sol index 9964bea5e4..80517409b1 100644 --- a/contracts/0.8.9/WithdrawalVault.sol +++ b/contracts/0.8.9/WithdrawalVault.sol @@ -8,7 +8,7 @@ import {IERC20} from "@openzeppelin/contracts-v4.4/token/ERC20/IERC20.sol"; import {IERC721} from "@openzeppelin/contracts-v4.4/token/ERC721/IERC721.sol"; import {SafeERC20} from "@openzeppelin/contracts-v4.4/token/ERC20/utils/SafeERC20.sol"; import {Versioned} from "./utils/Versioned.sol"; -import {WithdrawalVaultEIP7002} from "./WithdrawalVaultEIP7002.sol"; +import {WithdrawalVaultEIP7685} from "./WithdrawalVaultEIP7685.sol"; interface ILido { /** @@ -22,12 +22,13 @@ interface ILido { /** * @title A vault for temporary storage of withdrawals */ -contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { +contract WithdrawalVault is Versioned, WithdrawalVaultEIP7685 { using SafeERC20 for IERC20; ILido public immutable LIDO; address public immutable TREASURY; address public immutable TRIGGERABLE_WITHDRAWALS_GATEWAY; + address public immutable CONSOLIDATION_GATEWAY; // Events /** @@ -43,9 +44,9 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { event ERC721Recovered(address indexed requestedBy, address indexed token, uint256 tokenId); // Errors - error ZeroAddress(); error NotLido(); error NotTriggerableWithdrawalsGateway(); + error NotConsolidationGateway(); error NotEnoughEther(uint256 requested, uint256 balance); error ZeroAmount(); @@ -53,14 +54,23 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { * @param _lido the Lido token (stETH) address * @param _treasury the Lido treasury address (see ERC20/ERC721-recovery interfaces) */ - constructor(address _lido, address _treasury, address _triggerableWithdrawalsGateway) { + constructor( + address _lido, + address _treasury, + address _triggerableWithdrawalsGateway, + address _consolidationGateway, + address _withdrawalRequest, + address _consolidationRequest + ) WithdrawalVaultEIP7685(_withdrawalRequest, _consolidationRequest) { _onlyNonZeroAddress(_lido); _onlyNonZeroAddress(_treasury); _onlyNonZeroAddress(_triggerableWithdrawalsGateway); + _onlyNonZeroAddress(_consolidationGateway); LIDO = ILido(_lido); TREASURY = _treasury; TRIGGERABLE_WITHDRAWALS_GATEWAY = _triggerableWithdrawalsGateway; + CONSOLIDATION_GATEWAY = _consolidationGateway; } /// @dev Ensures the contract’s ETH balance is unchanged. @@ -75,14 +85,14 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { function initialize() external { // Initializations for v0 --> v2 _checkContractVersion(0); - _initializeContractVersionTo(2); + _initializeContractVersionTo(3); } - /// @notice Finalizes upgrade to v2 (from v1). Can be called only once. - function finalizeUpgrade_v2() external { - // Finalization for v1 --> v2 - _checkContractVersion(1); - _updateContractVersion(2); + /// @notice Finalizes upgrade to v3 (from v2). Can be called only once. + function finalizeUpgrade_v3() external { + // Finalization for v2 --> v3 + _checkContractVersion(2); + _updateContractVersion(3); } /** @@ -136,10 +146,6 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { _token.transferFrom(address(this), TREASURY, _tokenId); } - function _onlyNonZeroAddress(address _address) internal pure { - if (_address == address(0)) revert ZeroAddress(); - } - /** * @dev Submits EIP-7002 full or partial withdrawal requests for the specified public keys. * Each full withdrawal request instructs a validator to fully withdraw its stake and exit its duties as a validator. @@ -171,6 +177,32 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { _addWithdrawalRequests(pubkeys, amounts); } + /** + * @dev Submits EIP-7251 consolidation requests, one per (source, target) pair. + * Each request instructs a validator to consolidate its stake to the target validator. + * + * @param sourcePubkeys An array of 48-byte public keys corresponding to validators requesting the consolidation. + * + * @param targetPubkeys An array of 48-byte public keys corresponding to validators receiving the consolidation. + * + * @notice Reverts if: + * - The caller is not ConsolidationsGateway. + * - The provided public key array is empty. + * - The provided public key array malformed. + * - The provided source public key and target public key arrays are not of equal length. + * - The provided total withdrawal fee value is invalid. + */ + function addConsolidationRequests( + bytes[] calldata sourcePubkeys, + bytes[] calldata targetPubkeys + ) external payable preservesEthBalance { + if (msg.sender != CONSOLIDATION_GATEWAY) { + revert NotConsolidationGateway(); + } + + _addConsolidationRequests(sourcePubkeys, targetPubkeys); + } + /** * @dev Retrieves the current EIP-7002 withdrawal fee. * @return The minimum fee required per withdrawal request. @@ -178,4 +210,12 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { function getWithdrawalRequestFee() public view returns (uint256) { return _getWithdrawalRequestFee(); } + + /** + * @dev Retrieves the current EIP-7251 consolidation fee. + * @return The minimum fee required per consolidation request. + */ + function getConsolidationRequestFee() external view returns (uint256) { + return _getConsolidationRequestFee(); + } } diff --git a/contracts/0.8.9/WithdrawalVaultEIP7002.sol b/contracts/0.8.9/WithdrawalVaultEIP7002.sol deleted file mode 100644 index d4449939eb..0000000000 --- a/contracts/0.8.9/WithdrawalVaultEIP7002.sol +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -/* See contracts/COMPILERS.md */ -pragma solidity 0.8.9; - -/** - * @title A base contract for a withdrawal vault, enables to submit EIP-7002 withdrawal requests. - */ -abstract contract WithdrawalVaultEIP7002 { - address public constant WITHDRAWAL_REQUEST = 0x00000961Ef480Eb55e80D19ad83579A64c007002; - - event WithdrawalRequestAdded(bytes request); - - error ZeroArgument(string name); - error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); - error FeeReadFailed(); - error FeeInvalidData(); - error IncorrectFee(uint256 requiredFee, uint256 providedFee); - error RequestAdditionFailed(bytes callData); - - function _addWithdrawalRequests(bytes[] calldata pubkeys, uint64[] calldata amounts) internal { - uint256 requestsCount = pubkeys.length; - if (requestsCount == 0) revert ZeroArgument("pubkeys"); - if (requestsCount != amounts.length) revert ArraysLengthMismatch(requestsCount, amounts.length); - - uint256 fee = _getWithdrawalRequestFee(); - _checkFee(requestsCount * fee); - - for (uint256 i = 0; i < requestsCount; ++i) { - _callAddWithdrawalRequest(pubkeys[i], amounts[i], fee); - } - } - - function _getWithdrawalRequestFee() internal view returns (uint256) { - (bool success, bytes memory feeData) = WITHDRAWAL_REQUEST.staticcall(""); - - if (!success) { - revert FeeReadFailed(); - } - - if (feeData.length != 32) { - revert FeeInvalidData(); - } - - return abi.decode(feeData, (uint256)); - } - - function _callAddWithdrawalRequest(bytes calldata pubkey, uint64 amount, uint256 fee) internal { - assert(pubkey.length == 48); - - bytes memory request = abi.encodePacked(pubkey, amount); - (bool success,) = WITHDRAWAL_REQUEST.call{value: fee}(request); - if (!success) { - revert RequestAdditionFailed(request); - } - - emit WithdrawalRequestAdded(request); - } - - function _checkFee(uint256 fee) internal view { - if (msg.value != fee) { - revert IncorrectFee(fee, msg.value); - } - } -} diff --git a/contracts/0.8.9/WithdrawalVaultEIP7685.sol b/contracts/0.8.9/WithdrawalVaultEIP7685.sol new file mode 100644 index 0000000000..5155b4302a --- /dev/null +++ b/contracts/0.8.9/WithdrawalVaultEIP7685.sol @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.9; + + +/** + * @title Withdrawal Vault EIP-7685 Support + * @notice Abstract contract providing base functionality for + * general-purpose Execution Layer requests. + * @dev Implements support for the following request types: + * - EIP-7002: Withdrawal requests + * - EIP-7251: Consolidation requests + */ +abstract contract WithdrawalVaultEIP7685 { + address public immutable WITHDRAWAL_REQUEST; + address public immutable CONSOLIDATION_REQUEST; + + uint256 internal constant PUBLIC_KEY_LENGTH = 48; + + event WithdrawalRequestAdded(bytes request); + event ConsolidationRequestAdded(bytes request); + + error ZeroAddress(); + error ZeroArgument(string name); + error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); + error FeeReadFailed(); + error FeeInvalidData(); + error IncorrectFee(uint256 requiredFee, uint256 providedFee); + error RequestAdditionFailed(bytes callData); + error InvalidPublicKeyLength(bytes pubkey); + + constructor(address _withdrawalRequest, address _consolidationRequest) { + _onlyNonZeroAddress(_withdrawalRequest); + _onlyNonZeroAddress(_consolidationRequest); + + WITHDRAWAL_REQUEST = _withdrawalRequest; + CONSOLIDATION_REQUEST = _consolidationRequest; + } + + function _addWithdrawalRequests(bytes[] calldata pubkeys, uint64[] calldata amounts) internal { + uint256 requestsCount = pubkeys.length; + if (requestsCount == 0) revert ZeroArgument("pubkeys"); + if (requestsCount != amounts.length) revert ArraysLengthMismatch(requestsCount, amounts.length); + + uint256 fee = _getWithdrawalRequestFee(); + _requireExactFee(requestsCount * fee); + + for (uint256 i = 0; i < requestsCount; ++i) { + _validatePublicKey(pubkeys[i]); + _callAddWithdrawalRequest(pubkeys[i], amounts[i], fee); + } + } + + function _addConsolidationRequests( + bytes[] calldata sourcePubkeys, + bytes[] calldata targetPubkeys + ) internal { + uint256 requestsCount = sourcePubkeys.length; + if (requestsCount == 0) revert ZeroArgument("sourcePubkeys"); + if (requestsCount != targetPubkeys.length) + revert ArraysLengthMismatch(requestsCount, targetPubkeys.length); + + uint256 fee = _getConsolidationRequestFee(); + _requireExactFee(requestsCount * fee); + + for (uint256 i = 0; i < requestsCount; ++i) { + _validatePublicKey(sourcePubkeys[i]); + _validatePublicKey(targetPubkeys[i]); + _callAddConsolidationRequest(sourcePubkeys[i], targetPubkeys[i], fee); + } + } + + function _getWithdrawalRequestFee() internal view returns (uint256) { + return _getFeeFromContract(WITHDRAWAL_REQUEST); + } + + function _getConsolidationRequestFee() internal view returns (uint256) { + return _getFeeFromContract(CONSOLIDATION_REQUEST); + } + + function _getFeeFromContract(address contractAddress) internal view returns (uint256) { + (bool success, bytes memory feeData) = contractAddress.staticcall(""); + + if (!success) { + revert FeeReadFailed(); + } + + if (feeData.length != 32) { + revert FeeInvalidData(); + } + + return abi.decode(feeData, (uint256)); + } + + function _validatePublicKey(bytes calldata pubkey) internal pure { + if (pubkey.length != PUBLIC_KEY_LENGTH) { + revert InvalidPublicKeyLength(pubkey); + } + } + + function _callAddWithdrawalRequest(bytes calldata pubkey, uint64 amount, uint256 fee) internal { + bytes memory request = abi.encodePacked(pubkey, amount); + (bool success,) = WITHDRAWAL_REQUEST.call{value: fee}(request); + if (!success) { + revert RequestAdditionFailed(request); + } + + emit WithdrawalRequestAdded(request); + } + + function _callAddConsolidationRequest(bytes calldata sourcePubkey, bytes calldata targetPubkey, uint256 fee) internal { + bytes memory request = abi.encodePacked(sourcePubkey, targetPubkey); + (bool success,) = CONSOLIDATION_REQUEST.call{value: fee}(request); + if (!success) { + revert RequestAdditionFailed(request); + } + + emit ConsolidationRequestAdded(request); + } + + function _requireExactFee(uint256 requiredFee) internal view { + if (requiredFee != msg.value) { + revert IncorrectFee(requiredFee, msg.value); + } + } + + function _onlyNonZeroAddress(address _address) internal pure { + if (_address == address(0)) revert ZeroAddress(); + } +} diff --git a/contracts/0.8.9/oracle/AccountingOracle.sol b/contracts/0.8.9/oracle/AccountingOracle.sol index 779361f193..1735d42ed0 100644 --- a/contracts/0.8.9/oracle/AccountingOracle.sol +++ b/contracts/0.8.9/oracle/AccountingOracle.sol @@ -6,21 +6,32 @@ pragma solidity 0.8.9; import {SafeCast} from "@openzeppelin/contracts-v4.4/utils/math/SafeCast.sol"; +import {ILido} from "contracts/common/interfaces/ILido.sol"; import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; import {ReportValues} from "contracts/common/interfaces/ReportValues.sol"; import {ILazyOracle} from "contracts/common/interfaces/ILazyOracle.sol"; - import {UnstructuredStorage} from "../lib/UnstructuredStorage.sol"; - import {BaseOracle} from "./BaseOracle.sol"; - interface IReportReceiver { function handleOracleReport(ReportValues memory values) external; } interface IOracleReportSanityChecker { - function checkExitedValidatorsRatePerDay(uint256 _exitedValidatorsCount) external view; + function checkExitedEthAmountPerDay( + uint256 _newlyExitedValidatorsCount, + uint256 _timeElapsed + ) external view; + function checkModuleAndCLBalancesChangeRates( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _preCLValidatorsBalanceWei, + uint256 _preCLPendingBalanceWei, + uint256 _postCLValidatorsBalanceWei, + uint256 _postCLPendingBalanceWei, + uint256 _depositsWei, + uint256 _timeElapsed + ) external view; function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) external view; function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) external view; @@ -32,6 +43,16 @@ interface IStakingRouter { uint256[] calldata _exitedValidatorsCounts ) external returns (uint256); + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view; + + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external; + function reportStakingModuleExitedValidatorsCountByNodeOperator( uint256 _stakingModuleId, bytes calldata _nodeOperatorIds, @@ -51,10 +72,9 @@ contract AccountingOracle is BaseOracle { error LidoLocatorCannotBeZero(); error AdminCannotBeZero(); - error LidoCannotBeZero(); - error IncorrectOracleMigration(uint256 code); error SenderNotAllowed(); error InvalidExitedValidatorsData(); + error InvalidClBalancesData(); error UnsupportedExtraDataFormat(uint256 format); error UnsupportedExtraDataType(uint256 itemIndex, uint256 dataType); error DeprecatedExtraDataType(uint256 itemIndex, uint256 dataType); @@ -68,7 +88,6 @@ contract AccountingOracle is BaseOracle { error UnexpectedExtraDataIndex(uint256 expectedIndex, uint256 receivedIndex); error InvalidExtraDataItem(uint256 itemIndex); error InvalidExtraDataSortOrder(uint256 itemIndex); - event ExtraDataSubmitted(uint256 indexed refSlot, uint256 itemsProcessed, uint256 itemsCount); event WarnExtraDataIncompleteProcessing(uint256 indexed refSlot, uint256 processedItemsCount, uint256 itemsCount); @@ -119,10 +138,11 @@ contract AccountingOracle is BaseOracle { _updateContractVersion(2); _updateContractVersion(3); _updateContractVersion(4); + _updateContractVersion(5); } - function finalizeUpgrade_v4(uint256 consensusVersion) external { - _updateContractVersion(4); + function finalizeUpgrade_v5(uint256 consensusVersion) external { + _updateContractVersion(5); _setConsensusVersion(consensusVersion); } @@ -147,12 +167,12 @@ contract AccountingOracle is BaseOracle { /// CL values /// - /// @dev The number of validators on consensus layer that were ever deposited - /// via Lido as observed at the reference slot. - uint256 numValidators; - /// @dev Cumulative balance of all Lido validators on the consensus layer + /// @dev Sum of consensus-layer validator balances (`validator.balance`), + /// excluding pending deposits, as observed at the reference slot. + uint256 clValidatorsBalanceGwei; + /// @dev Pending deposits balance on the consensus layer /// as observed at the reference slot. - uint256 clBalanceGwei; + uint256 clPendingBalanceGwei; /// @dev Ids of staking modules that have more exited validators than the number /// stored in the respective staking module contract as observed at the reference /// slot. @@ -161,7 +181,13 @@ contract AccountingOracle is BaseOracle { /// the stakingModuleIdsWithNewlyExitedValidators array as observed at the /// reference slot. uint256[] numExitedValidatorsByStakingModule; - /// + /// @dev Ids of staking modules that have effective balances changed compared to the number + /// stored in the respective staking module contract as observed at the reference slot. + uint256[] stakingModuleIdsWithUpdatedBalance; + /// @dev Sum of consensus-layer validator balances (`validator.balance`) + /// for each staking module in `stakingModuleIdsWithUpdatedBalance`, + /// excluding pending deposits, as observed at the reference slot. + uint256[] validatorBalancesGweiByStakingModule; /// EL values /// @@ -411,6 +437,11 @@ contract AccountingOracle is BaseOracle { result.extraDataItemsSubmitted = extraState.itemsProcessed; } + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp) { + refSlot = _getCurrentRefSlot(); + refSlotTimestamp = _getSlotTimestamp(refSlot); + } + /// /// Implementation & helpers /// @@ -465,29 +496,43 @@ contract AccountingOracle is BaseOracle { } uint256 slotsElapsed = data.refSlot - prevRefSlot; + uint256 timeElapsed = slotsElapsed * SECONDS_PER_SLOT; IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); IWithdrawalQueue withdrawalQueue = IWithdrawalQueue(LOCATOR.withdrawalQueue()); + IOracleReportSanityChecker sanityChecker = IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()); + _checkStakingRouterModuleBalances(sanityChecker, data, timeElapsed); _processStakingRouterExitedValidatorsByModule( stakingRouter, + sanityChecker, data.stakingModuleIdsWithNewlyExitedValidators, data.numExitedValidatorsByStakingModule, - slotsElapsed + timeElapsed + ); + + /// @notice update CL balances in StakingRouter + /// @dev we need to update balances before rewards and fee calculation + /// Note, deposit trackers not changed at this moment, they are bumped + /// in StakingRouter.onAccountingReport during `handleAccountingReport` + _processStakingRouterValidatorBalancesByModule( + stakingRouter, + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule ); withdrawalQueue.onOracleReport( data.isBunkerMode, - GENESIS_TIME + prevRefSlot * SECONDS_PER_SLOT, - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT + _getSlotTimestamp(prevRefSlot), + _getSlotTimestamp(data.refSlot) ); IReportReceiver(LOCATOR.accounting()).handleOracleReport( ReportValues( - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT, - slotsElapsed * SECONDS_PER_SLOT, - data.numValidators, - data.clBalanceGwei * 1e9, + _getSlotTimestamp(data.refSlot), + timeElapsed, + data.clValidatorsBalanceGwei * 1e9, + data.clPendingBalanceGwei * 1e9, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, @@ -497,7 +542,7 @@ contract AccountingOracle is BaseOracle { ); ILazyOracle(LOCATOR.lazyOracle()).updateReportData( - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT, + _getSlotTimestamp(data.refSlot), data.refSlot, data.vaultsDataTreeRoot, data.vaultsDataTreeCid @@ -514,11 +559,16 @@ contract AccountingOracle is BaseOracle { }); } + function _getSlotTimestamp(uint256 slot) internal view returns (uint256) { + return GENESIS_TIME + (slot * SECONDS_PER_SLOT); + } + function _processStakingRouterExitedValidatorsByModule( IStakingRouter stakingRouter, + IOracleReportSanityChecker sanityChecker, uint256[] calldata stakingModuleIds, uint256[] calldata numExitedValidatorsByStakingModule, - uint256 slotsElapsed + uint256 timeElapsed ) internal { if (stakingModuleIds.length != numExitedValidatorsByStakingModule.length) { revert InvalidExitedValidatorsData(); @@ -528,7 +578,7 @@ contract AccountingOracle is BaseOracle { return; } - for (uint256 i = 1; i < stakingModuleIds.length; ) { + for (uint256 i = 1; i < stakingModuleIds.length;) { if (stakingModuleIds[i] <= stakingModuleIds[i - 1]) { revert InvalidExitedValidatorsData(); } @@ -537,7 +587,7 @@ contract AccountingOracle is BaseOracle { } } - for (uint256 i = 0; i < stakingModuleIds.length; ) { + for (uint256 i = 0; i < stakingModuleIds.length;) { if (numExitedValidatorsByStakingModule[i] == 0) { revert InvalidExitedValidatorsData(); } @@ -551,14 +601,24 @@ contract AccountingOracle is BaseOracle { numExitedValidatorsByStakingModule ); - uint256 exitedValidatorsRatePerDay = (newlyExitedValidatorsCount * (1 days)) / - (SECONDS_PER_SLOT * slotsElapsed); - - IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitedValidatorsRatePerDay( - exitedValidatorsRatePerDay + sanityChecker.checkExitedEthAmountPerDay( + newlyExitedValidatorsCount, + timeElapsed ); } + function _processStakingRouterValidatorBalancesByModule( + IStakingRouter stakingRouter, + uint256[] calldata stakingModuleIds, + uint256[] calldata validatorBalancesGwei + ) internal { + if (stakingModuleIds.length == 0) { + return; + } + + stakingRouter.reportValidatorBalancesByStakingModule(stakingModuleIds, validatorBalancesGwei); + } + function _submitReportExtraDataEmpty() internal { ExtraDataProcessingState memory procState = _storageExtraDataProcessingState().value; _checkCanSubmitExtraData(procState, EXTRA_DATA_FORMAT_EMPTY); @@ -643,12 +703,56 @@ contract AccountingOracle is BaseOracle { procState.dataHash = dataHash; procState.itemsProcessed = uint64(itemsProcessed); procState.lastSortingKey = iter.lastSortingKey; - _storageExtraDataProcessingState().value = procState; + _storageExtraDataProcessingState().value = procState; } emit ExtraDataSubmitted(procState.refSlot, procState.itemsProcessed, procState.itemsCount); } + function _checkStakingRouterModuleBalances( + IOracleReportSanityChecker sanityChecker, + ReportData calldata data, + uint256 timeElapsed + ) internal view { + // This check must run before `reportValidatorBalancesByStakingModule(...)` mutates the router state, + // because it compares the report against the previous per-module validators balances in StakingRouter + // and the pre-report protocol pending/deposits snapshot in Lido. + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + stakingRouter.validateReportValidatorBalancesByStakingModule( + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule + ); + + uint256[] memory validatorBalancesWeiByStakingModule = + _normalizeStakingRouterValidatorBalancesToWei(data.validatorBalancesGweiByStakingModule); + + (uint256 preCLValidatorsBalanceWei, uint256 preCLPendingBalanceWei,, uint256 depositsWei) = + ILido(LOCATOR.lido()).getBalanceStats(); + sanityChecker.checkModuleAndCLBalancesChangeRates( + data.stakingModuleIdsWithUpdatedBalance, + validatorBalancesWeiByStakingModule, + preCLValidatorsBalanceWei, + preCLPendingBalanceWei, + data.clValidatorsBalanceGwei * 1 gwei, + data.clPendingBalanceGwei * 1 gwei, + depositsWei, + timeElapsed + ); + } + + function _normalizeStakingRouterValidatorBalancesToWei( + uint256[] calldata validatorBalancesGwei + ) internal pure returns (uint256[] memory validatorBalancesWeiByStakingModule) { + uint256 modulesCount = validatorBalancesGwei.length; + validatorBalancesWeiByStakingModule = new uint256[](modulesCount); + for (uint256 i = 0; i < modulesCount; ) { + validatorBalancesWeiByStakingModule[i] = validatorBalancesGwei[i] * 1 gwei; + unchecked { + ++i; + } + } + } + function _processExtraDataItems(bytes calldata data, ExtraDataIterState memory iter) internal { uint256 dataOffset = iter.dataOffset; uint256 maxNodeOperatorsPerItem = 0; @@ -687,12 +791,14 @@ contract AccountingOracle is BaseOracle { revert DeprecatedExtraDataType(index, itemType); } - if (itemType != EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + uint256 nodeOpsProcessed; + + if (itemType == EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + nodeOpsProcessed = _processExtraDataItem(data, iter); + } else { revert UnsupportedExtraDataType(index, itemType); } - uint256 nodeOpsProcessed = _processExtraDataItem(data, iter); - if (nodeOpsProcessed > maxNodeOperatorsPerItem) { maxNodeOperatorsPerItem = nodeOpsProcessed; maxNodeOperatorItemIndex = index; @@ -723,7 +829,7 @@ contract AccountingOracle is BaseOracle { uint256 nodeOpsCount; uint256 nodeOpId; bytes calldata nodeOpIds; - bytes calldata valuesCounts; + bytes calldata payload; if (dataOffset + 35 > data.length) { // has to fit at least moduleId (3 bytes), nodeOpsCount (8 bytes), @@ -735,7 +841,7 @@ contract AccountingOracle is BaseOracle { assembly { // layout at the dataOffset: // | 3 bytes | 8 bytes | nodeOpsCount * 8 bytes | nodeOpsCount * 16 bytes | - // | moduleId | nodeOpsCount | nodeOperatorIds | validatorsCounts | + // | moduleId | nodeOpsCount | nodeOperatorIds | payload | let header := calldataload(add(data.offset, dataOffset)) moduleId := shr(232, header) nodeOpsCount := and(shr(168, header), 0xffffffffffffffff) @@ -743,9 +849,9 @@ contract AccountingOracle is BaseOracle { nodeOpIds.length := mul(nodeOpsCount, 8) // read the 1st node operator id for checking the sorting order later nodeOpId := shr(192, calldataload(nodeOpIds.offset)) - valuesCounts.offset := add(nodeOpIds.offset, nodeOpIds.length) - valuesCounts.length := mul(nodeOpsCount, 16) - dataOffset := sub(add(valuesCounts.offset, valuesCounts.length), data.offset) + payload.offset := add(nodeOpIds.offset, nodeOpIds.length) + payload.length := mul(nodeOpsCount, 16) + dataOffset := sub(add(payload.offset, payload.length), data.offset) } if (moduleId == 0) { @@ -784,8 +890,11 @@ contract AccountingOracle is BaseOracle { revert InvalidExtraDataItem(iter.index); } - IStakingRouter(iter.stakingRouter) - .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, nodeOpIds, valuesCounts); + // Route to appropriate StakingRouter function based on item type + if (iter.itemType == EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + IStakingRouter(iter.stakingRouter) + .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, nodeOpIds, payload); + } iter.dataOffset = dataOffset; return nodeOpsCount; diff --git a/contracts/0.8.9/oracle/ValidatorsExitBus.sol b/contracts/0.8.9/oracle/ValidatorsExitBus.sol index 07eddf9d20..0074c6310c 100644 --- a/contracts/0.8.9/oracle/ValidatorsExitBus.sol +++ b/contracts/0.8.9/oracle/ValidatorsExitBus.sol @@ -2,11 +2,14 @@ // SPDX-License-Identifier: GPL-3.0 pragma solidity 0.8.9; +import {SafeCast} from "@openzeppelin/contracts-v4.4/utils/math/SafeCast.sol"; + import {AccessControlEnumerable} from "../utils/access/AccessControlEnumerable.sol"; import {UnstructuredStorage} from "../lib/UnstructuredStorage.sol"; import {Versioned} from "../utils/Versioned.sol"; -import {ExitRequestLimitData, ExitLimitUtilsStorage, ExitLimitUtils} from "../lib/ExitLimitUtils.sol"; +import {LimitData, RateLimitStorage, RateLimit} from "../../common/lib/RateLimit.sol"; import {PausableUntil} from "../utils/PausableUntil.sol"; +import {WithdrawalCredentials} from "../../common/lib/WithdrawalCredentials.sol"; interface ITriggerableWithdrawalsGateway { struct ValidatorData { @@ -22,10 +25,47 @@ interface ITriggerableWithdrawalsGateway { ) external payable; } +/// @notice New interface for staking modules (CSM, CuratedV2) +/// @dev Returns only pubkeys +interface IUnifiedStakingModule { + /// @dev It also works for legacy staking modules (NOR, SDVT) where `getSigningKeys` returns different + /// tuple `(bytes memory pubkeys, bytes memory signatures, bool[] memory used)`. + /// The trick: `abi.decode(returndata, (bytes))` will decode only the first tuple element. + /// This is safe as long as the first returned value really is `bytes pubkeys` in that position. + function getSigningKeys( + uint256 nodeOperatorId, + uint256 startIndex, + uint256 keysCount + ) external view returns (bytes memory); +} + +interface IStakingRouter { + struct ModuleStateConfig { + address moduleAddress; + uint16 moduleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint16 priorityExitShareThreshold; + uint8 status; + uint8 withdrawalCredentialsType; + } + + function getStakingModuleStateConfig(uint256 _stakingModuleId) + external + view + returns (ModuleStateConfig memory stateConfig); +} + interface ILidoLocator { function validatorExitDelayVerifier() external view returns (address); function triggerableWithdrawalsGateway() external view returns (address); - function oracleReportSanityChecker() external view returns(address); + function oracleReportSanityChecker() external view returns (address); + function stakingRouter() external view returns (address); +} + +interface IOracleReportSanityCheckerForExitBus { + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256); + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256); } /** @@ -35,8 +75,7 @@ interface ILidoLocator { */ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, Versioned { using UnstructuredStorage for bytes32; - using ExitLimitUtilsStorage for bytes32; - using ExitLimitUtils for ExitRequestLimitData; + using SafeCast for uint256; /** * @notice Thrown when an invalid zero value is passed @@ -65,6 +104,17 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V */ error InvalidRequestsDataSortOrder(); + /** + * @notice Thrown when provided public key does not match the registered signing key + * @param index Index of the validator in the exit request list + */ + error InvalidPublicKey(uint256 index); + + /** + * @notice Thrown when retrieved pubkey length is invalid + */ + error InvalidRetrievedKeyLength(); + /** * Thrown when there are attempt to send exit events for request that was not submitted earlier by trusted entities */ @@ -93,11 +143,11 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V error InvalidExitDataIndexSortOrder(); /** - * @notice Thrown when remaining exit requests limit is not enough to cover sender requests - * @param requestsCount Amount of requests that were sent for processing - * @param remainingLimit Amount of requests that still can be processed at current day + * @notice Thrown when remaining exit balance limit is not enough to cover the exit requests + * @param balanceEth Total balance being requested for exit in ETH + * @param remainingLimitEth Remaining balance limit in ETH that can still be processed */ - error ExitRequestsLimitExceeded(uint256 requestsCount, uint256 remainingLimit); + error ExitRequestsLimitExceeded(uint256 balanceEth, uint256 remainingLimitEth); /** * @notice Thrown when submitting was not started for request @@ -110,6 +160,9 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V */ error TooManyExitRequestsInReport(uint256 requestsCount, uint256 maxRequestsPerReport); + error InvalidMaxEBWeight(); + error UnexpectedWCType(); + /** * @notice Emitted when an entity with the SUBMIT_REPORT_HASH_ROLE role submits a hash of the exit requests data. * @param exitRequestsHash keccak256 hash of the encoded validators list @@ -134,11 +187,11 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Emitted when limits configs are set. - * @param maxExitRequestsLimit The maximum number of exit requests. - * @param exitsPerFrame The number of exits that can be restored per frame. - * @param frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. + * @param maxExitBalanceEth The maximum exit balance limit in ETH. + * @param balancePerFrameEth The exit balance in ETH that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored. */ - event ExitRequestsLimitSet(uint256 maxExitRequestsLimit, uint256 exitsPerFrame, uint256 frameDurationInSec); + event ExitBalanceLimitSet(uint256 maxExitBalanceEth, uint256 balancePerFrameEth, uint256 frameDurationInSec); /** * @notice Emitted when exit requests were delivered @@ -148,7 +201,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Emitted when max validators per report value is set. - * @param maxValidatorsPerReport The number of valdiators allowed per report. + * @param maxValidatorsPerReport The number of validators allowed per report. */ event SetMaxValidatorsPerReport(uint256 maxValidatorsPerReport); @@ -161,6 +214,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V uint256 nodeOpId; uint256 moduleId; uint256 valIndex; + uint256 keyIndex; // - will be max uint256 for format 1, actual value for format 2 bytes pubkey; } @@ -181,6 +235,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// Length in bytes of packed request uint256 internal constant PACKED_REQUEST_LENGTH = 64; + uint256 internal constant PACKED_REQUEST_LENGTH_V2 = 72; uint256 internal constant PUBLIC_KEY_LENGTH = 48; @@ -202,13 +257,25 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// uint256 public constant DATA_FORMAT_LIST = 1; + /// @notice The extended list format that includes a key index for each validator. + /// + /// Each validator exit request is 72 bytes: + /// + /// MSB <-------------------------------------------------------------------- LSB + /// | 3 bytes | 5 bytes | 8 bytes | 8 bytes | 48 bytes | + /// | moduleId | nodeOpId | validatorIndex | keyIndex | validatorPubkey | + /// + /// Encoding and sorting rules are identical to `DATA_FORMAT_LIST`. + /// `keyIndex` is used to validate the pubkey against on-chain registered keys. + uint256 public constant DATA_FORMAT_LIST_WITH_KEY_INDEX = 2; + ILidoLocator internal immutable LOCATOR; /// @dev Storage slot: uint256 totalRequestsProcessed bytes32 internal constant TOTAL_REQUESTS_PROCESSED_POSITION = keccak256("lido.ValidatorsExitBusOracle.totalRequestsProcessed"); - // Storage slot for exit request limit configuration and current quota tracking - bytes32 internal constant EXIT_REQUEST_LIMIT_POSITION = keccak256("lido.ValidatorsExitBus.maxExitRequestLimit"); + // Storage slot for exit balance limit configuration and current quota tracking (in ETH, not Gwei) + bytes32 internal constant EXIT_BALANCE_LIMIT_POSITION = keccak256("lido.ValidatorsExitBus.exitBalanceLimitEth"); // Storage slot for the maximum number of validator exit requests allowed per processing report bytes32 internal constant MAX_VALIDATORS_PER_REPORT_POSITION = keccak256("lido.ValidatorsExitBus.maxValidatorsPerReport"); @@ -218,7 +285,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V uint256 public constant EXIT_TYPE = 2; - /// @dev Ensures the contract’s ETH balance is unchanged. + /// @dev Ensures the contract's ETH balance is unchanged. modifier preservesEthBalance() { uint256 balanceBeforeCall = address(this).balance - msg.value; _; @@ -229,6 +296,16 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V LOCATOR = ILidoLocator(lidoLocator); } + function MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01() public view returns (uint16) { + (uint16 maxEBWeightType1, ) = _getMaxEffectiveBalanceWeights(); + return maxEBWeightType1; + } + + function MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02() public view returns (uint16) { + (, uint16 maxEBWeightType2) = _getMaxEffectiveBalanceWeights(); + return maxEBWeightType2; + } + /** * @notice Submit a hash of the exit requests data. * @@ -272,16 +349,17 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V _checkExitRequestData(request.data, request.dataFormat); _checkContractVersion(requestStatus.contractVersion); - uint256 requestsCount = request.data.length / PACKED_REQUEST_LENGTH; + uint256 requestsCount = request.data.length / _getPackedRequestLength(request.dataFormat); uint256 maxRequestsPerReport = _getMaxValidatorsPerReport(); if (requestsCount > maxRequestsPerReport) { revert TooManyExitRequestsInReport(requestsCount, maxRequestsPerReport); } - _consumeLimit(requestsCount); + uint256 totalBalanceEth = _calculateTotalExitBalanceEth(request.data, request.dataFormat); + _consumeLimit(totalBalanceEth); - _processExitRequestsList(request.data); + _processExitRequestsList(request.data, request.dataFormat); TOTAL_REQUESTS_PROCESSED_POSITION.setStorageUint256( TOTAL_REQUESTS_PROCESSED_POSITION.getStorageUint256() + requestsCount @@ -332,7 +410,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V memory triggerableExitData = new ITriggerableWithdrawalsGateway.ValidatorData[](exitDataIndexes.length); uint256 lastExitDataIndex = type(uint256).max; - uint256 requestsCount = exitsData.data.length / PACKED_REQUEST_LENGTH; + uint256 requestsCount = exitsData.data.length / _getPackedRequestLength(exitsData.dataFormat); for (uint256 i = 0; i < exitDataIndexes.length; i++) { if (exitDataIndexes[i] >= requestsCount) { @@ -345,7 +423,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V lastExitDataIndex = exitDataIndexes[i]; - ValidatorData memory validatorData = _getValidatorData(exitsData.data, exitDataIndexes[i]); + ValidatorData memory validatorData = _getValidatorData(exitsData.data, exitsData.dataFormat, exitDataIndexes[i]); if (validatorData.moduleId == 0) revert InvalidModuleId(); @@ -363,45 +441,45 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Sets the limits config - * @param maxExitRequestsLimit The maximum number of exit requests. - * @param exitsPerFrame The number of exits that can be restored per frame. - * @param frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. + * @param maxExitBalanceEth The maximum exit balance limit in ETH. + * @param balancePerFrameEth The exit balance in ETH that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored. */ function setExitRequestLimit( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) external onlyRole(EXIT_REQUEST_LIMIT_MANAGER_ROLE) { - _setExitRequestLimit(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /** * @notice Returns information about current limits data - * @return maxExitRequestsLimit Maximum exit requests limit - * @return exitsPerFrame The number of exits that can be restored per frame. - * @return frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. - * @return prevExitRequestsLimit Limit left after previous requests - * @return currentExitRequestsLimit Current exit requests limit + * @return maxExitBalanceEth Maximum exit balance limit in ETH + * @return balancePerFrameEth The exit balance in ETH that can be restored per frame + * @return frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored + * @return prevExitBalanceEth Balance limit in ETH left after previous requests + * @return currentExitBalanceEth Current exit balance limit in ETH */ function getExitRequestLimitFullInfo() external view returns ( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec, - uint256 prevExitRequestsLimit, - uint256 currentExitRequestsLimit + uint256 prevExitBalanceEth, + uint256 currentExitBalanceEth ) { - ExitRequestLimitData memory exitRequestLimitData = EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit(); - maxExitRequestsLimit = exitRequestLimitData.maxExitRequestsLimit; - exitsPerFrame = exitRequestLimitData.exitsPerFrame; - frameDurationInSec = exitRequestLimitData.frameDurationInSec; - prevExitRequestsLimit = exitRequestLimitData.prevExitRequestsLimit; - - currentExitRequestsLimit = exitRequestLimitData.isExitLimitSet() - ? exitRequestLimitData.calculateCurrentExitLimit(_getTimestamp()) + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + maxExitBalanceEth = limitData.maxLimit; + balancePerFrameEth = limitData.itemsPerFrame; + frameDurationInSec = limitData.frameDurationInSec; + prevExitBalanceEth = limitData.prevLimit; + + currentExitBalanceEth = RateLimit.isLimitSet(limitData) + ? RateLimit.calculateCurrentLimit(limitData, _getTimestamp()) : type(uint256).max; } @@ -457,11 +535,12 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V ) external pure returns (bytes memory pubkey, uint256 nodeOpId, uint256 moduleId, uint256 valIndex) { _checkExitRequestData(exitRequests, dataFormat); - if (index >= exitRequests.length / PACKED_REQUEST_LENGTH) { - revert ExitDataIndexOutOfRange(index, exitRequests.length / PACKED_REQUEST_LENGTH); + uint256 requestsCount = exitRequests.length / _getPackedRequestLength(dataFormat); + if (index >= requestsCount) { + revert ExitDataIndexOutOfRange(index, requestsCount); } - ValidatorData memory validatorData = _getValidatorData(exitRequests, index); + ValidatorData memory validatorData = _getValidatorData(exitRequests, dataFormat, index); valIndex = validatorData.valIndex; nodeOpId = validatorData.nodeOpId; @@ -507,14 +586,23 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V return TOTAL_REQUESTS_PROCESSED_POSITION.getStorageUint256(); } + /// @dev Returns the packed request length for a given data format + function _getPackedRequestLength(uint256 dataFormat) internal pure returns (uint256) { + if (dataFormat == DATA_FORMAT_LIST) { + return PACKED_REQUEST_LENGTH; // 64 + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + return PACKED_REQUEST_LENGTH_V2; // 72 + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + /// Internal functions function _checkExitRequestData(bytes calldata requests, uint256 dataFormat) internal pure { - if (dataFormat != DATA_FORMAT_LIST) { - revert UnsupportedRequestsDataFormat(dataFormat); - } + uint256 packedLength = _getPackedRequestLength(dataFormat); // validates format - if (requests.length == 0 || requests.length % PACKED_REQUEST_LENGTH != 0) { + if (requests.length == 0 || requests.length % packedLength != 0) { revert InvalidRequestsDataLength(); } } @@ -554,38 +642,40 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } function _setExitRequestLimit( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) internal { uint256 timestamp = _getTimestamp(); - EXIT_REQUEST_LIMIT_POSITION.setStorageExitRequestLimit( - EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit().setExitLimits( - maxExitRequestsLimit, - exitsPerFrame, - frameDurationInSec, - timestamp - ) + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + limitData = RateLimit.setLimits( + limitData, + maxExitBalanceEth, + balancePerFrameEth, + frameDurationInSec, + timestamp ); + RateLimitStorage.setStorageLimit(EXIT_BALANCE_LIMIT_POSITION, limitData); - emit ExitRequestsLimitSet(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + emit ExitBalanceLimitSet(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } - function _consumeLimit(uint256 requestsCount) internal { - ExitRequestLimitData memory exitRequestLimitData = EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit(); - if (!exitRequestLimitData.isExitLimitSet()) { + function _consumeLimit(uint256 balanceEth) internal { + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + if (!RateLimit.isLimitSet(limitData)) { return; } - uint256 limit = exitRequestLimitData.calculateCurrentExitLimit(_getTimestamp()); + uint256 limitEth = RateLimit.calculateCurrentLimit(limitData, _getTimestamp()); - if (requestsCount > limit) { - revert ExitRequestsLimitExceeded(requestsCount, limit); + if (balanceEth > limitEth) { + revert ExitRequestsLimitExceeded(balanceEth, limitEth); } - EXIT_REQUEST_LIMIT_POSITION.setStorageExitRequestLimit( - exitRequestLimitData.updatePrevExitLimit(limit - requestsCount, _getTimestamp()) + RateLimitStorage.setStorageLimit( + EXIT_BALANCE_LIMIT_POSITION, + RateLimit.updatePrevLimit(limitData, limitEth - balanceEth, _getTimestamp()) ); } @@ -634,13 +724,35 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// Methods for reading data from tightly packed validator exit requests /// Format DATA_FORMAT_LIST = 1; - /** - * @notice Method for reading node operator id, module id and validator index from validator exit request data - * @param exitRequestData Validator exit requests data. DATA_FORMAT = 1 - * @param index index of request in array above - * @return validatorData Validator data including node operator id, module id, validator index - */ + /** + * @notice Method for reading node operator id, module id, validator index, and optionally key index + * from validator exit request data + * @param exitRequestData Validator exit requests data + * @param dataFormat Format of the data (1 or 2) + * @param index index of request in array above + * @return validatorData Validator data including node operator id, module id, validator index, and key index + */ function _getValidatorData( + bytes calldata exitRequestData, + uint256 dataFormat, + uint256 index + ) internal pure returns (ValidatorData memory validatorData) { + if (dataFormat == DATA_FORMAT_LIST) { + return _getValidatorDataV1(exitRequestData, index); + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + return _getValidatorDataV2(exitRequestData, index); + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + + /** + * @notice Extracts validator data from format 1 (64 bytes per request, no keyIndex) + * @param exitRequestData Validator exit requests data + * @param index index of request in array + * @return validatorData Validator data with keyIndex = type(uint256).max + */ + function _getValidatorDataV1( bytes calldata exitRequestData, uint256 index ) internal pure returns (ValidatorData memory validatorData) { @@ -663,6 +775,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V validatorData.valIndex = uint64(dataWithoutPubkey); validatorData.nodeOpId = uint40(dataWithoutPubkey >> 64); validatorData.moduleId = uint24(dataWithoutPubkey >> (64 + 40)); + validatorData.keyIndex = type(uint256).max; // Format 1 always uses keyIndex set to max uint256 to indicate unused bytes memory pubkey = new bytes(PUBLIC_KEY_LENGTH); assembly { @@ -676,10 +789,207 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } /** - * This method read report data (DATA_FORMAT=1) within a range - * Check dataWithoutPubkey <= lastDataWithoutPubkey needs to prevent duplicates + * @notice Extracts validator data from format 2 (72 bytes per request, includes keyIndex) + * @param exitRequestData Validator exit requests data + * @param index index of request in array + * @return validatorData Validator data with extracted keyIndex + */ + function _getValidatorDataV2( + bytes calldata exitRequestData, + uint256 index + ) internal pure returns (ValidatorData memory validatorData) { + uint256 itemOffset; + uint256 dataWithoutPubkey; + + assembly { + // Compute the start of this packed request (item) + itemOffset := add(exitRequestData.offset, mul(PACKED_REQUEST_LENGTH_V2, index)) + + // Load the first 24 bytes which contain moduleId (24 bits), + // nodeOpId (40 bits), valIndex (64 bits), and keyIndex (64 bits). + dataWithoutPubkey := shr(64, calldataload(itemOffset)) + } + + // dataWithoutPubkey format (192 bits total): + // MSB <--------------------------------- 192 bits ----------------------------------> LSB + // | 64 bits: zeros | 24 bits: moduleId | 40 bits: nodeOpId | 64 bits: valIndex | 64 bits: keyIndex | + + validatorData.keyIndex = uint64(dataWithoutPubkey); + validatorData.valIndex = uint64(dataWithoutPubkey >> 64); + validatorData.nodeOpId = uint40(dataWithoutPubkey >> (64 + 64)); + validatorData.moduleId = uint24(dataWithoutPubkey >> (64 + 64 + 40)); + + bytes memory pubkey = new bytes(PUBLIC_KEY_LENGTH); + assembly { + itemOffset := add(exitRequestData.offset, mul(PACKED_REQUEST_LENGTH_V2, index)) + let pubkeyCalldataOffset := add(itemOffset, 24) + let pubkeyMemPtr := add(pubkey, 32) + calldatacopy(pubkeyMemPtr, pubkeyCalldataOffset, PUBLIC_KEY_LENGTH) + } + + validatorData.pubkey = pubkey; + } + + /** + * @notice Calculates the total balance in ETH for all validators in the exit requests + * @dev This function determines the max effective balance based on the module's withdrawal credentials type: + * - Legacy modules (0x01 withdrawal credentials): 32 ETH per validator + * - Compounding modules (0x02 withdrawal credentials): 2048 ETH per validator (post-MaxEB/EIP-7251) + * + * The withdrawal credentials type is queried from the Staking Router for each module, + * eliminating the need for hardcoded module IDs. + * + * For gas efficiency, module types are cached during iteration to avoid repeated external calls + * for the same module. + * + * @param data Packed exit requests data + * @param dataFormat Format of the data (1 or 2) + * @return totalBalanceEth Total balance of all validators being exited in ETH + */ + function _calculateTotalExitBalanceEth(bytes calldata data, uint256 dataFormat) + internal + view + returns (uint256 totalBalanceEth) + { + (uint16 maxEBWeightType1, uint16 maxEBWeightType2) = _getMaxEffectiveBalanceWeights(); + uint256 packedLength; + uint256 dataShift; + uint256 moduleShift; + + if (dataFormat == DATA_FORMAT_LIST) { + packedLength = PACKED_REQUEST_LENGTH; + dataShift = 128; + moduleShift = 104; + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + packedLength = PACKED_REQUEST_LENGTH_V2; + dataShift = 64; + moduleShift = 168; + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + + uint256 baseOffset; + assembly { + baseOffset := data.offset + } + + uint256 requestsCount = data.length / packedLength; + uint256 cachedModuleId = 0; + uint256 cachedModuleMaxEBWeightEth = 0; + + for (uint256 i = 0; i < requestsCount; ++i) { + uint256 moduleId; + uint256 itemOffset; + + assembly { + itemOffset := add(baseOffset, mul(packedLength, i)) + let dataWithoutPubkey := shr(dataShift, calldataload(itemOffset)) + moduleId := shr(moduleShift, dataWithoutPubkey) // Extract top 24 bits + } + + if (moduleId != cachedModuleId) { + cachedModuleId = moduleId; + cachedModuleMaxEBWeightEth = _getModuleMaxEBWeight(moduleId, maxEBWeightType1, maxEBWeightType2); + } + totalBalanceEth += cachedModuleMaxEBWeightEth; + } + } + + function _getMaxEffectiveBalanceWeights() internal view returns (uint16 maxEBWeightType1, uint16 maxEBWeightType2) { + IOracleReportSanityCheckerForExitBus sanityChecker = + IOracleReportSanityCheckerForExitBus(LOCATOR.oracleReportSanityChecker()); + + uint256 maxEBWeightType1Raw = sanityChecker.getMaxEffectiveBalanceWeightWCType01(); + uint256 maxEBWeightType2Raw = sanityChecker.getMaxEffectiveBalanceWeightWCType02(); + if ( + maxEBWeightType1Raw == 0 || maxEBWeightType2Raw == 0 || maxEBWeightType1Raw > type(uint16).max + || maxEBWeightType2Raw > type(uint16).max + ) { + revert InvalidMaxEBWeight(); + } + + maxEBWeightType1 = maxEBWeightType1Raw.toUint16(); + maxEBWeightType2 = maxEBWeightType2Raw.toUint16(); + } + + function _getModuleMaxEBWeight( + uint256 moduleId, + uint16 maxEBWeightType1, + uint16 maxEBWeightType2 + ) internal view returns (uint16) { + uint256 wcType = + IStakingRouter(LOCATOR.stakingRouter()).getStakingModuleStateConfig(moduleId).withdrawalCredentialsType; + if (WithdrawalCredentials.isType1(wcType)) { + return maxEBWeightType1; + } else if (WithdrawalCredentials.isType2(wcType)) { + return maxEBWeightType2; + } + revert UnexpectedWCType(); + } + + /** + * @notice Verify that a pubkey belongs to the specified module and node operator + * @param moduleId Staking module ID + * @param nodeOpId Node operator ID + * @param keyIndex Index of the key in the module + * @param pubkey Public key to verify (48 bytes) + * @param requestIndex Index of the request in the batch (for error reporting) + * @param cachedModuleId Previously cached module ID (type(uint256).max if none) + * @param cachedModuleAddress Previously cached module address + * @return newModuleAddress Updated module address (same if module unchanged) */ - function _processExitRequestsList(bytes calldata data) internal { + function _verifyKey( + uint256 moduleId, + uint256 nodeOpId, + uint256 keyIndex, + bytes calldata pubkey, + uint256 requestIndex, + uint256 cachedModuleId, + address cachedModuleAddress + ) internal view returns (address newModuleAddress) { + if (moduleId == cachedModuleId) { + newModuleAddress = cachedModuleAddress; + } else { + newModuleAddress = IStakingRouter(LOCATOR.stakingRouter()).getStakingModuleStateConfig(moduleId).moduleAddress; + } + + bytes memory retrievedKeys = IUnifiedStakingModule(newModuleAddress) + .getSigningKeys( + nodeOpId, + keyIndex, // startIndex + 1 // keysCount: get only 1 key + ); + + if (retrievedKeys.length != 48) { + revert InvalidRetrievedKeyLength(); + } + + if (keccak256(retrievedKeys) != keccak256(pubkey)) { + revert InvalidPublicKey(requestIndex); + } + } + + /** + * @notice Dispatcher that processes exit requests based on data format + * @param data Packed exit requests data + * @param dataFormat Format of the data (1 or 2) + */ + function _processExitRequestsList(bytes calldata data, uint256 dataFormat) internal { + if (dataFormat == DATA_FORMAT_LIST) { + _processExitRequestsListV1(data); + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + _processExitRequestsListV2(data); + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + + /** + * @notice Process exit requests for format 1 (64 bytes per request, no keyIndex) + * @dev Check dataWithoutPubkey <= lastDataWithoutPubkey prevents duplicates and ensures sorting + * @param data Packed exit requests data (DATA_FORMAT=1) + */ + function _processExitRequestsListV1(bytes calldata data) internal { uint256 offset; uint256 offsetPastEnd; uint256 lastDataWithoutPubkey = 0; @@ -731,6 +1041,91 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } } + /** + * @notice Process exit requests for format 2 (72 bytes per request, includes keyIndex) + * @dev Uniqueness and sort check uses (moduleId, nodeOpId, valIndex) only — keyIndex is excluded + * so that the same validator cannot appear twice with different keyIndex (double-counted). + * @param data Packed exit requests data (DATA_FORMAT=2) + */ + function _processExitRequestsListV2(bytes calldata data) internal { + uint256 offset; + uint256 offsetPastEnd; + uint256 lastValidatorData = 0; // (moduleId, nodeOpId, valIndex) — 128 bits, no keyIndex + uint256 timestamp = _getTimestamp(); + uint256 index = 0; + + assembly { + offset := data.offset + offsetPastEnd := add(offset, data.length) + } + + bytes calldata pubkey; + uint256 dataWithoutPubkey; + uint256 validatorData; // 128 bits: moduleId | nodeOpId | valIndex (for sort/duplicate check) + uint256 moduleId; + + // Cache module data to avoid repeated external calls for the same module + uint256 cachedModuleId = 0; + address cachedModuleAddress; + + assembly { + pubkey.length := 48 + } + + while (offset < offsetPastEnd) { + assembly { + // 24 most significant bytes are taken by module id, node op id, val index, and key index + dataWithoutPubkey := shr(64, calldataload(offset)) + // the next 48 bytes are taken by the pubkey + pubkey.offset := add(offset, 24) + // totalling to 72 bytes + offset := add(offset, 72) + } + + // For sort/duplicate check use only (moduleId, nodeOpId, valIndex) — drop keyIndex (low 64 bits) + validatorData = dataWithoutPubkey >> 64; + + moduleId = uint24(dataWithoutPubkey >> (64 + 64 + 40)); + + if (moduleId == 0) { + revert InvalidModuleId(); + } + + // Uniqueness and sort by validator identity only: (moduleId, nodeOpId, valIndex) + // dataWithoutPubkey (192b): ... | valIndex | keyIndex | -> validatorData (128b): ... | valIndex + if (validatorData <= lastValidatorData) { + revert InvalidRequestsDataSortOrder(); + } + + // Verify that the pubkey belongs to the module and node operator + // Cache is updated if module changed + cachedModuleAddress = _verifyKey( + moduleId, + uint40(dataWithoutPubkey >> (64 + 64)), // nodeOpId + uint64(dataWithoutPubkey), // keyIndex + pubkey, + index, + cachedModuleId, + cachedModuleAddress + ); + + cachedModuleId = moduleId; + lastValidatorData = validatorData; + + emit ValidatorExitRequest( + moduleId, + uint40(dataWithoutPubkey >> (64 + 64)), // nodeOpId + uint64(dataWithoutPubkey >> 64), // valIndex + pubkey, + timestamp + ); + + unchecked { + ++index; + } + } + } + /// Storage helpers function _storageRequestStatus() internal pure returns (mapping(bytes32 => RequestStatus) storage r) { diff --git a/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol b/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol index b3062b241b..31505ff001 100644 --- a/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol +++ b/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol @@ -10,7 +10,7 @@ import {BaseOracle} from "./BaseOracle.sol"; import {ValidatorsExitBus} from "./ValidatorsExitBus.sol"; interface IOracleReportSanityChecker { - function checkExitBusOracleReport(uint256 _exitRequestsCount) external view; + function checkExitBusOracleReport(uint256 _maxBalanceExitRequestedPerReportInEth) external view; } contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { @@ -49,16 +49,19 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { uint256 secondsPerSlot, uint256 genesisTime, address lidoLocator - ) BaseOracle(secondsPerSlot, genesisTime) ValidatorsExitBus(lidoLocator) {} + ) + BaseOracle(secondsPerSlot, genesisTime) + ValidatorsExitBus(lidoLocator) + {} function initialize( address admin, address consensusContract, uint256 consensusVersion, uint256 lastProcessingRefSlot, - uint256 maxValidatorsPerRequest, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxValidatorsPerReport, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) external { if (admin == address(0)) revert AdminCannotBeZero(); @@ -66,33 +69,30 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { _pauseFor(PAUSE_INFINITELY); _initialize(consensusContract, consensusVersion, lastProcessingRefSlot); + _updateContractVersion(2); + _updateContractVersion(3); - _initialize_v2(maxValidatorsPerRequest, maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setMaxValidatorsPerReport(maxValidatorsPerReport); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /** - * @notice A function to finalize upgrade to v2 (from v1). Can be called only once + * @notice A function to finalize upgrade to v3 (from v1). Can be called only once * * For more details see https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md */ - function finalizeUpgrade_v2( + function finalizeUpgrade_v3( uint256 maxValidatorsPerReport, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, - uint256 frameDurationInSec + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, + uint256 frameDurationInSec, + uint256 consensusVersion ) external { - _initialize_v2(maxValidatorsPerReport, maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); - } + _updateContractVersion(3); + _setConsensusVersion(consensusVersion); - function _initialize_v2( - uint256 maxValidatorsPerReport, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, - uint256 frameDurationInSec - ) internal { - _updateContractVersion(2); _setMaxValidatorsPerReport(maxValidatorsPerReport); - _setExitRequestLimit(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /// @@ -119,8 +119,8 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { /// @dev Total number of validator exit requests in this report. Must not be greater /// than limit checked in OracleReportSanityChecker.checkExitBusOracleReport. uint256 requestsCount; - /// @dev Format of the validator exit requests data. Currently, only the - /// DATA_FORMAT_LIST=1 is supported. + /// @dev Format of the validator exit requests data. Currently, only the extended + /// DATA_FORMAT_LIST_WITH_KEY_INDEX=2 is supported. uint256 dataFormat; /// @dev Validator exit requests data. Can differ based on the data format, /// see the constant defining a specific data format below for more info. @@ -226,27 +226,33 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { } function _handleConsensusReportData(ReportData calldata data) internal { - if (data.dataFormat != DATA_FORMAT_LIST) { + if (data.dataFormat != DATA_FORMAT_LIST_WITH_KEY_INDEX) { revert UnsupportedRequestsDataFormat(data.dataFormat); } - if (data.data.length % PACKED_REQUEST_LENGTH != 0) { + uint256 packedLength = _getPackedRequestLength(data.dataFormat); + if (data.data.length % packedLength != 0) { revert InvalidRequestsDataLength(); } - if (data.data.length / PACKED_REQUEST_LENGTH != data.requestsCount) { + if (data.data.length / packedLength != data.requestsCount) { revert UnexpectedRequestsDataLength(); } - IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitBusOracleReport(data.requestsCount); + // Calculate total balance of validators being exited in ETH (uint256) + // Module 1 (curated) uses 32 ETH, other modules use 2048 ETH per validator + uint256 totalExitBalanceEth = _calculateTotalExitBalanceEth(data.data, data.dataFormat); + IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitBusOracleReport( + totalExitBalanceEth + ); - _processExitRequestsList(data.data); + _processExitRequestsList(data.data, data.dataFormat); _storageDataProcessingState().value = DataProcessingState({ refSlot: data.refSlot.toUint64(), requestsCount: data.requestsCount.toUint64(), requestsProcessed: data.requestsCount.toUint64(), - dataFormat: uint16(DATA_FORMAT_LIST) + dataFormat: uint16(data.dataFormat) }); if (data.requestsCount == 0) { diff --git a/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol b/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol index bd1c1ab03c..a929bcbd79 100644 --- a/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol +++ b/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol @@ -12,8 +12,8 @@ import {AccessControlEnumerable} from "../utils/access/AccessControlEnumerable.s import {PositiveTokenRebaseLimiter, TokenRebaseLimiterData} from "../lib/PositiveTokenRebaseLimiter.sol"; import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; import {IBurner} from "contracts/common/interfaces/IBurner.sol"; - -import {StakingRouter} from "../StakingRouter.sol"; +import {ILido} from "contracts/common/interfaces/ILido.sol"; +import {IVersioned} from "contracts/common/interfaces/IVersioned.sol"; import {ISecondOpinionOracle} from "../interfaces/ISecondOpinionOracle.sol"; interface IWithdrawalQueue { @@ -32,32 +32,36 @@ interface IWithdrawalQueue { bool isClaimed; } - function getWithdrawalStatus(uint256[] calldata _requestIds) - external - view - returns (WithdrawalRequestStatus[] memory statuses); + function getWithdrawalStatus( + uint256[] calldata _requestIds + ) external view returns (WithdrawalRequestStatus[] memory statuses); } interface IBaseOracle { - function SECONDS_PER_SLOT() external view returns (uint256); - function GENESIS_TIME() external view returns (uint256); function getLastProcessingRefSlot() external view returns (uint256); } +interface IStakingRouter { + function getStakingModuleStateAccounting(uint256 _stakingModuleId) + external + view + returns ( + uint64 validatorsBalanceGwei, + uint64 exitedValidatorsCount + ); +} + /// @notice The set of restrictions used in the sanity checks of the oracle report /// @dev struct is loaded from the storage and stored in memory during the tx running struct LimitsList { - /// @notice The max possible number of validators that might be reported as `exited` - /// per single day, depends on the Consensus Layer churn limit - /// @dev Must fit into uint16 (<= 65_535) - uint256 exitedValidatorsPerDayLimit; - - /// @notice The max possible number of validators that might be reported as `appeared` - /// per single day, limited by the max daily deposits via DepositSecurityModule in practice - /// isn't limited by a consensus layer (because `appeared` includes `pending`, i.e., not `activated` yet) - /// @dev Must fit into uint16 (<= 65_535) - uint256 appearedValidatorsPerDayLimit; - + /// @notice The max possible exited ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 exitedEthAmountPerDayLimit; + /// @notice The max possible appeared ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 appearedEthAmountPerDayLimit; /// @notice The max annual increase of the total validators' balances on the Consensus Layer /// since the previous oracle report /// (the increase that is limited does not include fresh deposits to the Beacon Chain as well as withdrawn ether) @@ -70,85 +74,136 @@ struct LimitsList { /// @dev Represented in the Basis Points (100% == 10_000) uint256 simulatedShareRateDeviationBPLimit; - /// @notice The max number of exit requests allowed in report to ValidatorsExitBusOracle - uint256 maxValidatorExitRequestsPerReport; + /// @notice The max requested to exit balance in ETH + /// @dev Sum of all max effective balances of all requested validators should be equal or lower in one report + uint256 maxBalanceExitRequestedPerReportInEth; + /// @notice WC 0x01 max effective balance equivalent weight in ETH + /// @dev Must fit into uint16 and be non-zero + uint256 maxEffectiveBalanceWeightWCType01; + /// @notice WC 0x02 max effective balance equivalent weight in ETH + /// @dev Must fit into uint16 and be non-zero + uint256 maxEffectiveBalanceWeightWCType02; /// @notice The max number of data list items reported to accounting oracle in extra data per single transaction /// @dev Must fit into uint16 (<= 65_535) uint256 maxItemsPerExtraDataTransaction; - /// @notice The max number of node operators reported per extra data list item /// @dev Must fit into uint16 (<= 65_535) uint256 maxNodeOperatorsPerExtraDataItem; - /// @notice The min time required to be passed from the creation of the request to be /// finalized till the time of the oracle report uint256 requestTimestampMargin; - /// @notice The positive token rebase allowed per single LidoOracle report /// @dev uses 1e9 precision, e.g.: 1e6 - 0.1%; 1e9 - 100%, see `setMaxPositiveTokenRebase()` uint256 maxPositiveTokenRebase; - - /// @notice Initial slashing amount per one validator to calculate initial slashing of the validators' balances on the Consensus Layer - /// @dev Represented in the PWei (1^15 Wei). Must fit into uint16 (<= 65_535) - uint256 initialSlashingAmountPWei; - - /// @notice Inactivity penalties amount per one validator to calculate penalties of the validators' balances on the Consensus Layer - /// @dev Represented in the PWei (1^15 Wei). Must fit into uint16 (<= 65_535) - uint256 inactivityPenaltiesAmountPWei; - + /// @notice The max allowed CL balance decrease over the CL_BALANCE_WINDOW as a fraction of the adjusted balance + /// @dev Represented in the Basis Points (100% == 10_000). Must fit into uint16 (<= 65_535) + uint256 maxCLBalanceDecreaseBP; /// @notice The maximum percent on how Second Opinion Oracle reported value could be greater /// than reported by the AccountingOracle. There is an assumption that second opinion oracle CL balance /// can be greater as calculated for the withdrawal credentials. /// @dev Represented in the Basis Points (100% == 10_000) uint256 clBalanceOraclesErrorUpperBPLimit; + /// @notice The max possible consolidation ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 consolidationEthAmountPerDayLimit; + /// @notice Effective ETH amount attributed to a single exited validator + /// in the exited ETH amount per day check. + /// @dev Stored in whole ETH units. Must fit into uint16. + uint256 exitedValidatorEthAmountLimit; + /// @notice Extra protocol-level pending balance cap to tolerate bounded side deposits + /// or same-validator top-ups that were not funded by Lido. + /// @dev Stored in whole ETH units. Must fit into uint16. + uint256 externalPendingBalanceCapEth; } -/// @dev The packed version of the LimitsList struct to be effectively persisted in storage -struct LimitsListPacked { - uint16 exitedValidatorsPerDayLimit; - uint16 appearedValidatorsPerDayLimit; +/// @dev The packed accounting/rebase limits persisted in a single storage slot +struct AccountingCoreLimitsPacked { + uint32 exitedEthAmountPerDayLimit; + uint32 appearedEthAmountPerDayLimit; + uint32 consolidationEthAmountPerDayLimit; uint16 annualBalanceIncreaseBPLimit; uint16 simulatedShareRateDeviationBPLimit; - uint16 maxValidatorExitRequestsPerReport; + uint64 maxPositiveTokenRebase; + uint16 maxCLBalanceDecreaseBP; + uint16 clBalanceOraclesErrorUpperBPLimit; + uint16 exitedValidatorEthAmountLimit; + uint16 externalPendingBalanceCapEth; +} + +/// @dev The packed operational limits persisted in a single storage slot +struct OperationalLimitsPacked { + uint16 maxBalanceExitRequestedPerReportInEth; + uint16 maxEffectiveBalanceWeightWCType01; + uint16 maxEffectiveBalanceWeightWCType02; uint16 maxItemsPerExtraDataTransaction; uint16 maxNodeOperatorsPerExtraDataItem; uint32 requestTimestampMargin; - uint64 maxPositiveTokenRebase; - uint16 initialSlashingAmountPWei; - uint16 inactivityPenaltiesAmountPWei; - uint16 clBalanceOraclesErrorUpperBPLimit; } struct ReportData { - uint64 timestamp; - uint64 totalExitedValidators; - uint128 negativeCLRebaseWei; + uint64 timestamp; // Logical report timestamp in seconds + uint128 clBalance; // Total CL balance (validators + pending) in Wei + uint128 deposits; // Deposits for the period since the last report in Wei + uint128 clWithdrawals; // Actual ETH moved from CL to withdrawal vault this period +} + +struct CLBalanceDecreaseCheckParams { + uint256 maxCLBalanceDecreaseBP; + uint256 clBalanceOraclesErrorUpperBPLimit; + uint256 preCLBalance; + uint256 postCLBalance; + uint256 withdrawalVaultBalance; + uint256 withdrawalsVaultTransfer; + uint256 deposits; + uint256 timeElapsed; +} + +struct CLBalanceChangeCheckParams { + uint256 timeElapsed; + uint256 preCLValidatorsBalance; + uint256 preCLPendingBalance; + uint256 postCLValidatorsBalance; + uint256 postCLPendingBalance; + uint256 deposits; +} + +struct ActivationBalanceCheckResult { + uint256 effectiveTimeElapsed; + uint256 activatedBalanceWithGap; } uint256 constant MAX_BASIS_POINTS = 10_000; uint256 constant SHARE_RATE_PRECISION_E27 = 1e27; -uint256 constant ONE_PWEI = 1e15; /// @title Sanity checks for the Lido's oracle report /// @notice The contracts contain methods to perform sanity checks of the Lido's oracle report /// and lever methods for granular tuning of the params of the checks contract OracleReportSanityChecker is AccessControlEnumerable { using LimitsListPacker for LimitsList; - using LimitsListUnpacker for LimitsListPacked; + using LimitsListUnpacker for AccountingCoreLimitsPacked; using PositiveTokenRebaseLimiter for TokenRebaseLimiterData; bytes32 public constant ALL_LIMITS_MANAGER_ROLE = keccak256("ALL_LIMITS_MANAGER_ROLE"); - bytes32 public constant EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = - keccak256("EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); - bytes32 public constant APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = - keccak256("APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE = + keccak256("EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE"); + bytes32 public constant EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE = + keccak256("EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE"); bytes32 public constant ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE = keccak256("ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE"); bytes32 public constant SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE = keccak256("SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE"); - bytes32 public constant MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE = - keccak256("MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE"); + bytes32 public constant MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE = + keccak256("MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE"); + bytes32 public constant MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE = + keccak256("MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE"); bytes32 public constant MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE = keccak256("MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE"); bytes32 public constant MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE = @@ -156,21 +211,23 @@ contract OracleReportSanityChecker is AccessControlEnumerable { bytes32 public constant REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE = keccak256("REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE"); bytes32 public constant MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE = keccak256("MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE"); - bytes32 public constant SECOND_OPINION_MANAGER_ROLE = - keccak256("SECOND_OPINION_MANAGER_ROLE"); - bytes32 public constant INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE = - keccak256("INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE"); - + bytes32 public constant SECOND_OPINION_MANAGER_ROLE = keccak256("SECOND_OPINION_MANAGER_ROLE"); + bytes32 public constant MAX_CL_BALANCE_DECREASE_MANAGER_ROLE = + keccak256("MAX_CL_BALANCE_DECREASE_MANAGER_ROLE"); uint256 private constant DEFAULT_TIME_ELAPSED = 1 hours; uint256 private constant DEFAULT_CL_BALANCE = 1 gwei; uint256 private constant SECONDS_PER_DAY = 24 * 60 * 60; + uint256 private constant ANNUAL_BALANCE_INCREASE_DENOMINATOR = 365 days * MAX_BASIS_POINTS; + /// @dev Maximum withdrawals ether used for migration bootstrap, bounded by CL churn limit per report window + uint256 private constant MAX_WITHDRAWALS_ETH_BY_CHURN_LIMIT_PER_REPORT = 57_600 ether; + /// @dev Time window for the CL balance decrease check + uint256 private constant CL_BALANCE_WINDOW = 36 days; ILidoLocator private immutable LIDO_LOCATOR; - uint256 private immutable GENESIS_TIME; - uint256 private immutable SECONDS_PER_SLOT; address private immutable ACCOUNTING_ADDRESS; - LimitsListPacked private _limits; + AccountingCoreLimitsPacked private _accountingCoreLimits; + OperationalLimitsPacked private _operationalLimits; /// @dev Historical reports data ReportData[] public reportData; @@ -178,23 +235,30 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @dev The address of the second opinion oracle ISecondOpinionOracle public secondOpinionOracle; + /// @dev Withdrawal vault balance after the last report's transfer was applied. + /// Used to compute actual CL withdrawals: clWithdrawals = WVB_current - _lastVaultBalanceAfterTransfer + uint256 private _lastVaultBalanceAfterTransfer; + + /// @dev Logical timestamp of the latest stored report snapshot. + /// It is advanced by `_timeElapsed` on each accounting report. + uint256 private _lastReportTimestamp; + + /// @dev Migration flag: false until the first successful accounting report after migration. + /// The per-module validators balance increase check is skipped while the flag is false. + bool private _isPostMigrationFirstReportDone; + /// @param _lidoLocator address of the LidoLocator instance - /// @param _accountingOracle address of the AccountingOracle instance /// @param _accounting address of the Accounting instance /// @param _admin address to grant DEFAULT_ADMIN_ROLE of the AccessControl contract /// @param _limitsList initial values to be set for the limits list constructor( address _lidoLocator, - address _accountingOracle, address _accounting, address _admin, LimitsList memory _limitsList ) { if (_admin == address(0)) revert AdminCannotBeZero(); LIDO_LOCATOR = ILidoLocator(_lidoLocator); - - GENESIS_TIME = IBaseOracle(_accountingOracle).GENESIS_TIME(); - SECONDS_PER_SLOT = IBaseOracle(_accountingOracle).SECONDS_PER_SLOT(); ACCOUNTING_ADDRESS = _accounting; _updateLimits(_limitsList); @@ -214,7 +278,19 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Returns the limits list for the Lido's oracle report sanity checks function getOracleReportLimits() public view returns (LimitsList memory) { - return _limits.unpack(); + return _accountingCoreLimits.unpack(_operationalLimits); + } + + function getMaxCLBalanceDecreaseBP() external view returns (uint256) { + return _accountingCoreLimits.maxCLBalanceDecreaseBP; + } + + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256) { + return _operationalLimits.maxEffectiveBalanceWeightWCType01; + } + + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256) { + return _operationalLimits.maxEffectiveBalanceWeightWCType02; } /// @notice Returns max positive token rebase value with 1e9 precision: @@ -241,13 +317,16 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// NB: The value is not set by default (explicit initialization required), /// the recommended sane values are from 0.05% to 0.1%. function getMaxPositiveTokenRebase() public view returns (uint256) { - return _limits.maxPositiveTokenRebase; + return _accountingCoreLimits.maxPositiveTokenRebase; } /// @notice Sets the new values for the limits list and second opinion oracle /// @param _limitsList new limits list /// @param _secondOpinionOracle negative rebase oracle. - function setOracleReportLimits(LimitsList calldata _limitsList, ISecondOpinionOracle _secondOpinionOracle) external onlyRole(ALL_LIMITS_MANAGER_ROLE) { + function setOracleReportLimits( + LimitsList calldata _limitsList, + ISecondOpinionOracle _secondOpinionOracle + ) external onlyRole(ALL_LIMITS_MANAGER_ROLE) { _updateLimits(_limitsList); if (_secondOpinionOracle != secondOpinionOracle) { secondOpinionOracle = _secondOpinionOracle; @@ -255,47 +334,69 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - /// @notice Sets the new value for the exitedValidatorsPerDayLimit - /// - /// NB: AccountingOracle reports validators as exited once they passed the `EXIT_EPOCH` on Consensus Layer - /// therefore, the value should be set in accordance to the consensus layer churn limit - /// - /// @param _exitedValidatorsPerDayLimit new exitedValidatorsPerDayLimit value - function setExitedValidatorsPerDayLimit(uint256 _exitedValidatorsPerDayLimit) - external - onlyRole(EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.exitedValidatorsPerDayLimit = _exitedValidatorsPerDayLimit; - _updateLimits(limitsList); + /// @notice Sets the new value for the exitedEthAmountPerDayLimit + /// @param _exitedEthAmountPerDayLimit new exitedEthAmountPerDayLimit value + function setExitedEthAmountPerDayLimit( + uint256 _exitedEthAmountPerDayLimit + ) public onlyRole(EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_exitedEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.exitedEthAmountPerDayLimit = SafeCast.toUint32(_exitedEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); } - /// @notice Sets the new value for the appearedValidatorsPerDayLimit - /// - /// NB: AccountingOracle reports validators as appeared once they become `pending` - /// (might be not `activated` yet). Thus, this limit should be high enough because consensus layer - /// has no intrinsic churn limit for the amount of `pending` validators (only for `activated` instead). - /// For Lido it depends on the amount of deposits that can be made via DepositSecurityModule daily. - /// - /// @param _appearedValidatorsPerDayLimit new appearedValidatorsPerDayLimit value - function setAppearedValidatorsPerDayLimit(uint256 _appearedValidatorsPerDayLimit) - external - onlyRole(APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.appearedValidatorsPerDayLimit = _appearedValidatorsPerDayLimit; - _updateLimits(limitsList); + /// @notice Sets the new value for the appearedEthAmountPerDayLimit + /// @param _appearedEthAmountPerDayLimit new appearedEthAmountPerDayLimit value + function setAppearedEthAmountPerDayLimit( + uint256 _appearedEthAmountPerDayLimit + ) public onlyRole(APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_appearedEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.appearedEthAmountPerDayLimit = SafeCast.toUint32(_appearedEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); + } + + /// @notice Sets the new value for the consolidationEthAmountPerDayLimit + /// @param _consolidationEthAmountPerDayLimit new consolidationEthAmountPerDayLimit value + function setConsolidationEthAmountPerDayLimit( + uint256 _consolidationEthAmountPerDayLimit + ) external onlyRole(CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_consolidationEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.consolidationEthAmountPerDayLimit = SafeCast.toUint32(_consolidationEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); + } + + /// @notice Sets exited validator ETH amount limiter value. + function setExitedValidatorEthAmountLimit( + uint256 _exitedValidatorEthAmountLimit + ) external onlyRole(EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_exitedValidatorEthAmountLimit, 1, type(uint16).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.exitedValidatorEthAmountLimit = SafeCast.toUint16(_exitedValidatorEthAmountLimit); + _updateAccountingCoreLimits(limits); + } + + /// @notice Sets the extra external pending balance cap tolerated above Lido-funded pending. + /// @dev Stored in whole ETH units to keep accounting core limits within a single storage slot. + function setExternalPendingBalanceCapEth( + uint256 _externalPendingBalanceCapEth + ) external onlyRole(EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE) { + _checkLimitValue(_externalPendingBalanceCapEth, 0, type(uint16).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.externalPendingBalanceCapEth = SafeCast.toUint16(_externalPendingBalanceCapEth); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the annualBalanceIncreaseBPLimit /// @param _annualBalanceIncreaseBPLimit new annualBalanceIncreaseBPLimit value - function setAnnualBalanceIncreaseBPLimit(uint256 _annualBalanceIncreaseBPLimit) - external - onlyRole(ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.annualBalanceIncreaseBPLimit = _annualBalanceIncreaseBPLimit; - _updateLimits(limitsList); + function setAnnualBalanceIncreaseBPLimit( + uint256 _annualBalanceIncreaseBPLimit + ) external onlyRole(ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.annualBalanceIncreaseBPLimit = LimitsListPacker.toBasisPoints(_annualBalanceIncreaseBPLimit); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the simulatedShareRateDeviationBPLimit @@ -304,31 +405,55 @@ contract OracleReportSanityChecker is AccessControlEnumerable { external onlyRole(SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.simulatedShareRateDeviationBPLimit = _simulatedShareRateDeviationBPLimit; - _updateLimits(limitsList); + _checkLimitValue(_simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.simulatedShareRateDeviationBPLimit = LimitsListPacker.toBasisPoints(_simulatedShareRateDeviationBPLimit); + _updateAccountingCoreLimits(limits); } - /// @notice Sets the new value for the maxValidatorExitRequestsPerReport - /// @param _maxValidatorExitRequestsPerReport new maxValidatorExitRequestsPerReport value - function setMaxExitRequestsPerOracleReport(uint256 _maxValidatorExitRequestsPerReport) + /// @notice Sets the new value for the maxBalanceExitRequestedPerReportInEth + /// @param _maxBalanceExitRequestedPerReportInEth new maxBalanceExitRequestedPerReportInEth value + function setMaxBalanceExitRequestedPerReportInEth(uint256 _maxBalanceExitRequestedPerReportInEth) external - onlyRole(MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE) + onlyRole(MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxValidatorExitRequestsPerReport = _maxValidatorExitRequestsPerReport; - _updateLimits(limitsList); + _checkLimitValue(_maxBalanceExitRequestedPerReportInEth, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxBalanceExitRequestedPerReportInEth = SafeCast.toUint16(_maxBalanceExitRequestedPerReportInEth); + _updateOperationalLimits(limits); } - /// @notice Sets the new value for the requestTimestampMargin - /// @param _requestTimestampMargin new requestTimestampMargin value - function setRequestTimestampMargin(uint256 _requestTimestampMargin) + /// @notice Sets the new WC 0x01 max effective balance equivalent weight in ETH + function setMaxEffectiveBalanceWeightWCType01(uint256 _maxEffectiveBalanceWeightWCType01) + external + onlyRole(MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE) + { + _checkLimitValue(_maxEffectiveBalanceWeightWCType01, 1, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxEffectiveBalanceWeightWCType01 = SafeCast.toUint16(_maxEffectiveBalanceWeightWCType01); + _updateOperationalLimits(limits); + } + + /// @notice Sets the new WC 0x02 max effective balance equivalent weight in ETH + function setMaxEffectiveBalanceWeightWCType02(uint256 _maxEffectiveBalanceWeightWCType02) external - onlyRole(REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE) + onlyRole(MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.requestTimestampMargin = _requestTimestampMargin; - _updateLimits(limitsList); + _checkLimitValue(_maxEffectiveBalanceWeightWCType02, 1, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxEffectiveBalanceWeightWCType02 = SafeCast.toUint16(_maxEffectiveBalanceWeightWCType02); + _updateOperationalLimits(limits); + } + + /// @notice Sets the new value for the requestTimestampMargin + /// @param _requestTimestampMargin new requestTimestampMargin value + function setRequestTimestampMargin( + uint256 _requestTimestampMargin + ) external onlyRole(REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE) { + _checkLimitValue(_requestTimestampMargin, 0, type(uint32).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.requestTimestampMargin = SafeCast.toUint32(_requestTimestampMargin); + _updateOperationalLimits(limits); } /// @notice Set max positive token rebase allowed per single oracle report token rebase happens @@ -338,35 +463,35 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// e.g.: 1e6 - 0.1%; 1e9 - 100% /// - passing zero value is prohibited /// - to allow unlimited rebases, pass max uint64, i.e.: type(uint64).max - function setMaxPositiveTokenRebase(uint256 _maxPositiveTokenRebase) - external - onlyRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxPositiveTokenRebase = _maxPositiveTokenRebase; - _updateLimits(limitsList); + function setMaxPositiveTokenRebase( + uint256 _maxPositiveTokenRebase + ) external onlyRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE) { + _checkLimitValue(_maxPositiveTokenRebase, 1, type(uint64).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.maxPositiveTokenRebase = SafeCast.toUint64(_maxPositiveTokenRebase); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the maxItemsPerExtraDataTransaction /// @param _maxItemsPerExtraDataTransaction new maxItemsPerExtraDataTransaction value - function setMaxItemsPerExtraDataTransaction(uint256 _maxItemsPerExtraDataTransaction) - external - onlyRole(MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxItemsPerExtraDataTransaction = _maxItemsPerExtraDataTransaction; - _updateLimits(limitsList); + function setMaxItemsPerExtraDataTransaction( + uint256 _maxItemsPerExtraDataTransaction + ) external onlyRole(MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE) { + _checkLimitValue(_maxItemsPerExtraDataTransaction, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxItemsPerExtraDataTransaction = SafeCast.toUint16(_maxItemsPerExtraDataTransaction); + _updateOperationalLimits(limits); } /// @notice Sets the new value for the max maxNodeOperatorsPerExtraDataItem /// @param _maxNodeOperatorsPerExtraDataItem new maxNodeOperatorsPerExtraDataItem value - function setMaxNodeOperatorsPerExtraDataItem(uint256 _maxNodeOperatorsPerExtraDataItem) - external - onlyRole(MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxNodeOperatorsPerExtraDataItem = _maxNodeOperatorsPerExtraDataItem; - _updateLimits(limitsList); + function setMaxNodeOperatorsPerExtraDataItem( + uint256 _maxNodeOperatorsPerExtraDataItem + ) external onlyRole(MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE) { + _checkLimitValue(_maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxNodeOperatorsPerExtraDataItem = SafeCast.toUint16(_maxNodeOperatorsPerExtraDataItem); + _updateOperationalLimits(limits); } /// @notice Sets the address of the second opinion oracle and clBalanceOraclesErrorUpperBPLimit value @@ -374,30 +499,59 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// If it's zero address — oracle is disabled. /// Default value is zero address. /// @param _clBalanceOraclesErrorUpperBPLimit new clBalanceOraclesErrorUpperBPLimit value - function setSecondOpinionOracleAndCLBalanceUpperMargin(ISecondOpinionOracle _secondOpinionOracle, uint256 _clBalanceOraclesErrorUpperBPLimit) - external - onlyRole(SECOND_OPINION_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.clBalanceOraclesErrorUpperBPLimit = _clBalanceOraclesErrorUpperBPLimit; - _updateLimits(limitsList); + function setSecondOpinionOracleAndCLBalanceUpperMargin( + ISecondOpinionOracle _secondOpinionOracle, + uint256 _clBalanceOraclesErrorUpperBPLimit + ) external onlyRole(SECOND_OPINION_MANAGER_ROLE) { + _checkLimitValue(_clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.clBalanceOraclesErrorUpperBPLimit = LimitsListPacker.toBasisPoints(_clBalanceOraclesErrorUpperBPLimit); + _updateAccountingCoreLimits(limits); if (_secondOpinionOracle != secondOpinionOracle) { secondOpinionOracle = ISecondOpinionOracle(_secondOpinionOracle); emit SecondOpinionOracleChanged(_secondOpinionOracle); } } - /// @notice Sets the initial slashing and penalties amounts - /// @param _initialSlashingAmountPWei - initial slashing amount (in PWei) - /// @param _inactivityPenaltiesAmountPWei - penalties amount (in PWei) - function setInitialSlashingAndPenaltiesAmount(uint256 _initialSlashingAmountPWei, uint256 _inactivityPenaltiesAmountPWei) + /// @notice Sets the max allowed CL balance decrease in basis points + /// @param _maxCLBalanceDecreaseBP max CL balance decrease over the sliding window (in BP, e.g. 360 = 3.6%) + function setMaxCLBalanceDecreaseBP(uint256 _maxCLBalanceDecreaseBP) external - onlyRole(INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE) + onlyRole(MAX_CL_BALANCE_DECREASE_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.initialSlashingAmountPWei = _initialSlashingAmountPWei; - limitsList.inactivityPenaltiesAmountPWei = _inactivityPenaltiesAmountPWei; - _updateLimits(limitsList); + _checkLimitValue(_maxCLBalanceDecreaseBP, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.maxCLBalanceDecreaseBP = LimitsListPacker.toBasisPoints(_maxCLBalanceDecreaseBP); + _updateAccountingCoreLimits(limits); + } + + /// @notice One-time migration: seeds initial snapshots into reportData + /// so that the sliding-window CL decrease check has a valid starting point. + /// @dev Permissionless by design: after the first successful call, further calls revert. + function migrateBaselineSnapshot() external { + if (reportData.length != 0) revert MigrationAlreadyDone(); + + address lidoAddr = LIDO_LOCATOR.lido(); + uint256 lidoVersion = IVersioned(lidoAddr).getContractVersion(); + if (lidoVersion != 4) revert UnexpectedLidoVersion(lidoVersion, 4); + + (uint256 migrationCLValidatorsBalance, uint256 migrationCLPendingBalance,, uint256 migrationDeposits) = ILido(lidoAddr) + .getBalanceStats(); + uint256 migrationCLBalance = migrationCLValidatorsBalance + migrationCLPendingBalance; + uint256 migrationCLWithdrawals = MAX_WITHDRAWALS_ETH_BY_CHURN_LIMIT_PER_REPORT; + // Initialize vault state: vault is not drained during migration, + // so after-transfer balance equals current vault balance + _lastVaultBalanceAfterTransfer = LIDO_LOCATOR.withdrawalVault().balance; + + // The decrease formula uses baseline report B[X-k] and sums flows from reports [X-k+1..X]. + // To include migration-time deposits/withdrawals without any special-case branch in formula code: + // 1) store pure baseline point with zero flows; + // 2) store bootstrap flow chunk at the same CL balance right after baseline. + uint256 migrationReportTimestamp = _lastReportTimestamp; + _addReportData(migrationReportTimestamp, migrationCLBalance, 0, 0); + _addReportData(migrationReportTimestamp, migrationCLBalance, migrationDeposits, migrationCLWithdrawals); + + emit BaselineSnapshotMigrated(migrationCLBalance, migrationDeposits, migrationCLWithdrawals); } /// @notice Returns the allowed ETH amount that might be taken from the withdrawal vault and EL @@ -427,12 +581,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { uint256 _sharesRequestedToBurn, uint256 _etherToLockForWithdrawals, uint256 _newSharesToBurnForWithdrawals - ) external view returns ( - uint256 withdrawals, - uint256 elRewards, - uint256 sharesFromWQToBurn, - uint256 sharesToBurn - ) { + ) external view returns (uint256 withdrawals, uint256 elRewards, uint256 sharesFromWQToBurn, uint256 sharesToBurn) { TokenRebaseLimiterData memory tokenRebaseLimiter = PositiveTokenRebaseLimiter.initLimiterState( getMaxPositiveTokenRebase(), _preInternalEther, @@ -466,91 +615,199 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Applies sanity checks to the accounting params of Lido's oracle report /// WARNING. The function has side effects and modifies the state of the contract. - /// It's needed to keep information about exited validators counts and negative rebase values over time. - /// The function called from Lido contract that uses the 'old' Solidity version (0.4.24) and will do a correct - /// call to this method even it's declared as "view" in interface there. + /// It's needed to keep CL balance snapshots for the balance decrease check over a sliding window. /// @param _timeElapsed time elapsed since the previous oracle report - /// @param _preCLBalance sum of all Lido validators' balances on the Consensus Layer before the - /// current oracle report (NB: also include the initial balance of newly appeared validators) - /// @param _postCLBalance sum of all Lido validators' balances on the Consensus Layer after the - /// current oracle report + /// @param _preCLValidatorsBalance sum of all Lido validators' balances on the Consensus Layer + /// before the current oracle report + /// @param _preCLPendingBalance pending deposits balance on the Consensus Layer before the current oracle report + /// @param _postCLValidatorsBalance sum of all Lido validators' balances on the Consensus Layer + /// after the current oracle report + /// @param _postCLPendingBalance pending deposits balance on the Consensus Layer after the current oracle report /// @param _withdrawalVaultBalance withdrawal vault balance on Execution Layer for the report reference slot /// @param _elRewardsVaultBalance el rewards vault balance on Execution Layer for the report reference slot /// @param _sharesRequestedToBurn shares requested to burn for the report reference slot - /// @param _preCLValidators Lido-participating validators on the CL side before the current oracle report - /// @param _postCLValidators Lido-participating validators on the CL side after the current oracle report + /// @param _deposits deposits to the Beacon Chain since the previous oracle report in Wei + /// @param _withdrawalsVaultTransfer ETH amount transferred from withdrawal vault this report function checkAccountingOracleReport( uint256 _timeElapsed, - uint256 _preCLBalance, - uint256 _postCLBalance, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, uint256 _withdrawalVaultBalance, uint256 _elRewardsVaultBalance, uint256 _sharesRequestedToBurn, - uint256 _preCLValidators, - uint256 _postCLValidators + uint256 _deposits, + uint256 _withdrawalsVaultTransfer ) external { if (msg.sender != ACCOUNTING_ADDRESS) { revert CalledNotFromAccounting(); } - LimitsList memory limitsList = _limits.unpack(); - uint256 refSlot = IBaseOracle(LIDO_LOCATOR.accountingOracle()).getLastProcessingRefSlot(); - - address withdrawalVault = LIDO_LOCATOR.withdrawalVault(); // 1. Withdrawals vault reported balance - _checkWithdrawalVaultBalance(withdrawalVault.balance, _withdrawalVaultBalance); - - address elRewardsVault = LIDO_LOCATOR.elRewardsVault(); + _checkWithdrawalVaultBalance(LIDO_LOCATOR.withdrawalVault().balance, _withdrawalVaultBalance); // 2. EL rewards vault reported balance - _checkELRewardsVaultBalance(elRewardsVault.balance, _elRewardsVaultBalance); - + _checkELRewardsVaultBalance(LIDO_LOCATOR.elRewardsVault().balance, _elRewardsVaultBalance); // 3. Burn requests _checkSharesRequestedToBurn(_sharesRequestedToBurn); + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalance, + preCLPendingBalance: _preCLPendingBalance, + postCLValidatorsBalance: _postCLValidatorsBalance, + postCLPendingBalance: _postCLPendingBalance, + deposits: _deposits + }); + _checkAccountingOracleReportCLBalances( + checkParams, + _withdrawalVaultBalance, + _withdrawalsVaultTransfer + ); + } + function _checkAccountingOracleReportCLBalances( + CLBalanceChangeCheckParams memory _checkParams, + uint256 _withdrawalVaultBalance, + uint256 _withdrawalsVaultTransfer + ) internal { + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; + CLBalanceDecreaseCheckParams memory decreaseCheckParams; + decreaseCheckParams.maxCLBalanceDecreaseBP = limitsList.maxCLBalanceDecreaseBP; + decreaseCheckParams.clBalanceOraclesErrorUpperBPLimit = limitsList.clBalanceOraclesErrorUpperBPLimit; + decreaseCheckParams.preCLBalance = + _checkParams.preCLValidatorsBalance + _checkParams.preCLPendingBalance + _checkParams.deposits; + decreaseCheckParams.postCLBalance = _checkParams.postCLValidatorsBalance + _checkParams.postCLPendingBalance; + decreaseCheckParams.withdrawalVaultBalance = _withdrawalVaultBalance; + decreaseCheckParams.withdrawalsVaultTransfer = _withdrawalsVaultTransfer; + decreaseCheckParams.deposits = _checkParams.deposits; + decreaseCheckParams.timeElapsed = _checkParams.timeElapsed; + uint256 clWithdrawals = _getCLWithdrawals(_withdrawalVaultBalance); + _checkWithdrawalsVaultTransfer(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + _checkCLPendingBalanceIncrease(limitsList, _checkParams, clWithdrawals); // 4. Consensus Layer balance decrease - _checkCLBalanceDecrease(limitsList, _preCLBalance, - _postCLBalance, _withdrawalVaultBalance, _postCLValidators, refSlot); - + _checkCLBalanceDecrease(decreaseCheckParams, clWithdrawals); // 5. Consensus Layer annual balances increase - _checkAnnualBalancesIncrease(limitsList, _preCLBalance, _postCLBalance, _timeElapsed); + _checkAnnualBalancesIncrease( + limitsList, + decreaseCheckParams.preCLBalance, + decreaseCheckParams.postCLBalance, + _checkParams.timeElapsed + ); + _finalizePostReportState(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + } - // 6. Appeared validators increase - if (_postCLValidators > _preCLValidators) { - _checkAppearedValidatorsChurnLimit(limitsList, (_postCLValidators - _preCLValidators), _timeElapsed); + /// @notice Check total pending CL balance from the current report against protocol state and growth limits. + function checkCLPendingBalanceIncrease( + uint256 _timeElapsed, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, + uint256 _withdrawalVaultBalance, + uint256 _deposits + ) external view { + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalance, + preCLPendingBalance: _preCLPendingBalance, + postCLValidatorsBalance: _postCLValidatorsBalance, + postCLPendingBalance: _postCLPendingBalance, + deposits: _deposits + }); + _checkCLPendingBalanceIncrease(_accountingCoreLimits, checkParams, _getCLWithdrawals(_withdrawalVaultBalance)); + } + + /// @notice Check that per-module validators CL balances in wei are consistent with reported total validators balance. + function checkCLBalancesConsistency( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _clValidatorsBalanceWei + ) external pure { + _checkCLBalancesConsistency( + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + _clValidatorsBalanceWei + ); + } + + /// @notice Check per-module validators balances consistency and global CL growth budget derived from protocol pending, all in wei. + function checkModuleAndCLBalancesChangeRates( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _preCLValidatorsBalanceWei, + uint256 _preCLPendingBalanceWei, + uint256 _postCLValidatorsBalanceWei, + uint256 _postCLPendingBalanceWei, + uint256 _depositsWei, + uint256 _timeElapsed + ) external view { + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalanceWei, + preCLPendingBalance: _preCLPendingBalanceWei, + postCLValidatorsBalance: _postCLValidatorsBalanceWei, + postCLPendingBalance: _postCLPendingBalanceWei, + deposits: _depositsWei + }); + _checkCLBalancesConsistency( + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + checkParams.postCLValidatorsBalance + ); + + // StakingRouter migration seeds per-module validators balances from active validators count + // using the max effective balance, so those migration values may be higher than the first + // oracle-reported balances. Skip the module validators balance increase check until the + // first report overwrites the migrated accounting state with the actual per-module values. + if (!_isPostMigrationFirstReportDone) { + return; } + + _checkModuleValidatorsBalanceIncrease( + IStakingRouter(LIDO_LOCATOR.stakingRouter()), + _accountingCoreLimits, + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + checkParams + ); } /// @notice Applies sanity checks to the number of validator exit requests supplied to ValidatorExitBusOracle - /// @param _exitRequestsCount Number of validator exit requests supplied per oracle report - function checkExitBusOracleReport(uint256 _exitRequestsCount) + /// @notice Checks the total balance of validator exit requests supplied per oracle report + /// @param _maxBalanceExitRequestedPerReportInEth Total balance in ETH of all validators requested to exit in the oracle report + function checkExitBusOracleReport(uint256 _maxBalanceExitRequestedPerReportInEth) external view { - uint256 limit = _limits.unpack().maxValidatorExitRequestsPerReport; - if (_exitRequestsCount > limit) { - revert IncorrectNumberOfExitRequestsPerReport(limit); + uint256 limit = _operationalLimits.maxBalanceExitRequestedPerReportInEth; + if (_maxBalanceExitRequestedPerReportInEth > limit) { + revert IncorrectSumOfExitBalancePerReport(_maxBalanceExitRequestedPerReportInEth); } } - /// @notice Check rate of exited validators per day - /// @param _exitedValidatorsCount Number of validator exited per oracle report - function checkExitedValidatorsRatePerDay(uint256 _exitedValidatorsCount) - external - view - { - uint256 exitedValidatorsLimit = _limits.unpack().exitedValidatorsPerDayLimit; - if (_exitedValidatorsCount > exitedValidatorsLimit) { - revert ExitedValidatorsLimitExceeded(exitedValidatorsLimit, _exitedValidatorsCount); - } + /// @notice Check exited ETH amount rate per day based on exited validators count. + /// @param _newlyExitedValidatorsCount Number of newly exited validators since previous report. + /// @param _timeElapsed Time elapsed in seconds since previous report. + function checkExitedEthAmountPerDay( + uint256 _newlyExitedValidatorsCount, + uint256 _timeElapsed + ) external view { + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; + uint256 exitedEthAmount = _newlyExitedValidatorsCount * uint256(limitsList.exitedValidatorEthAmountLimit) * 1 ether; + uint256 exitedEthAmountPerDay = _normalizePerDay(exitedEthAmount, _timeElapsed); + _checkExitedEthAmountPerDay(limitsList, exitedEthAmountPerDay); + } + + /// @notice Check appeared ETH amount rate per day. + /// @param _appearedEthAmountPerDay Appeared ETH amount per day in Wei. + function checkAppearedEthAmountPerDay(uint256 _appearedEthAmountPerDay) external view { + _checkAppearedEthAmountPerDay(_accountingCoreLimits, _appearedEthAmountPerDay); } /// @notice check the number of node operators reported per extra data item in the accounting oracle report. /// @param _itemIndex Index of item in extra data /// @param _nodeOperatorsCount Number of validator exit requests supplied per oracle report - function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) - external - view - { - uint256 limit = _limits.unpack().maxNodeOperatorsPerExtraDataItem; + function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) external view { + uint256 limit = _operationalLimits.maxNodeOperatorsPerExtraDataItem; if (_nodeOperatorsCount > limit) { revert TooManyNodeOpsPerExtraDataItem(_itemIndex, _nodeOperatorsCount); } @@ -558,11 +815,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Check the number of extra data list items per transaction in the accounting oracle report. /// @param _extraDataListItemsCount Number of items per single transaction in the accounting oracle report - function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) - external - view - { - uint256 limit = _limits.unpack().maxItemsPerExtraDataTransaction; + function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) external view { + uint256 limit = _operationalLimits.maxItemsPerExtraDataTransaction; if (_extraDataListItemsCount > limit) { revert TooManyItemsPerExtraDataTransaction(limit, _extraDataListItemsCount); } @@ -574,11 +828,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { function checkWithdrawalQueueOracleReport( uint256 _lastFinalizableRequestId, uint256 _reportTimestamp - ) - external - view - { - LimitsList memory limitsList = _limits.unpack(); + ) external view { + OperationalLimitsPacked memory limitsList = _operationalLimits; address withdrawalQueue = LIDO_LOCATOR.withdrawalQueue(); _checkLastFinalizableId(limitsList, withdrawalQueue, _lastFinalizableRequestId, _reportTimestamp); @@ -597,7 +848,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { uint256 _sharesToBurnForWithdrawals, uint256 _simulatedShareRate ) external view { - LimitsList memory limitsList = _limits.unpack(); + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; // Pretending that withdrawals were not processed // virtually return locked ether back to `_postTotalPooledEther` @@ -610,6 +861,228 @@ contract OracleReportSanityChecker is AccessControlEnumerable { ); } + function _checkCLBalancesConsistency( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _clValidatorsBalanceWei + ) internal pure { + uint256 modulesCount = _stakingModuleIdsWithUpdatedBalance.length; + if (modulesCount != _validatorBalancesWeiByStakingModule.length) { + revert InvalidClBalancesData(); + } + + uint256 validatorBalancesSum; + for (uint256 i = 0; i < modulesCount;) { + validatorBalancesSum += _validatorBalancesWeiByStakingModule[i]; + unchecked { + ++i; + } + } + + if (validatorBalancesSum != _clValidatorsBalanceWei) { + revert InconsistentValidatorsBalanceByModule(_clValidatorsBalanceWei, validatorBalancesSum); + } + } + + function _checkExitedEthAmountPerDay( + AccountingCoreLimitsPacked memory _limitsList, + uint256 _exitedEthAmountPerDay + ) internal pure { + uint256 exitedEthLimitWithConsolidation = + (uint256(_limitsList.exitedEthAmountPerDayLimit) + uint256(_limitsList.consolidationEthAmountPerDayLimit)) * + 1 ether; + if (_exitedEthAmountPerDay > exitedEthLimitWithConsolidation) { + revert ExitedEthAmountPerDayLimitExceeded(exitedEthLimitWithConsolidation, _exitedEthAmountPerDay); + } + } + + function _checkAppearedEthAmountPerDay( + AccountingCoreLimitsPacked memory _limitsList, + uint256 _appearedEthAmountPerDay + ) internal pure { + uint256 appearedEthLimitWithConsolidation = + (uint256(_limitsList.appearedEthAmountPerDayLimit) + uint256(_limitsList.consolidationEthAmountPerDayLimit)) * + 1 ether; + if (_appearedEthAmountPerDay > appearedEthLimitWithConsolidation) { + revert AppearedEthAmountPerDayLimitExceeded(appearedEthLimitWithConsolidation, _appearedEthAmountPerDay); + } + } + + function _normalizePerDay(uint256 _amount, uint256 _timeElapsed) internal pure returns (uint256) { + return (_amount * SECONDS_PER_DAY) / _getTimeElapsedForRateNormalization(_timeElapsed); + } + + function _getTimeElapsedForRateNormalization(uint256 _timeElapsed) internal pure returns (uint256) { + return _timeElapsed == 0 ? 1 : _timeElapsed; + } + + function _getTimeElapsedForAllowanceChecks(uint256 _timeElapsed) internal pure returns (uint256) { + return _timeElapsed == 0 ? DEFAULT_TIME_ELAPSED : _timeElapsed; + } + + function _calculateAmountForPeriod( + uint256 _amountPerDay, + uint256 _effectiveTimeElapsed + ) internal pure returns (uint256) { + return (_amountPerDay * _effectiveTimeElapsed) / SECONDS_PER_DAY; + } + + function _calculateValidatorsBalanceAprSafetyCap( + uint256 _preCLValidatorsBalance, + uint256 _annualBalanceIncreaseMultiplier + ) internal pure returns (uint256) { + return (_preCLValidatorsBalance * _annualBalanceIncreaseMultiplier) / ANNUAL_BALANCE_INCREASE_DENOMINATOR; + } + + function _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + AccountingCoreLimitsPacked memory _limitsList, + CLBalanceChangeCheckParams memory _checkParams + ) internal pure returns (ActivationBalanceCheckResult memory result) { + result.effectiveTimeElapsed = _getTimeElapsedForAllowanceChecks(_checkParams.timeElapsed); + + uint256 fundedPendingBalance = _checkParams.preCLPendingBalance + _checkParams.deposits; + uint256 pendingBalanceCap = fundedPendingBalance + uint256(_limitsList.externalPendingBalanceCapEth) * 1 ether; + if (_checkParams.postCLPendingBalance > pendingBalanceCap) { + revert IncorrectTotalPendingBalance(pendingBalanceCap, _checkParams.postCLPendingBalance); + } + + uint256 activatedBalance = fundedPendingBalance > _checkParams.postCLPendingBalance + ? fundedPendingBalance - _checkParams.postCLPendingBalance + : 0; + + uint256 appearedEthLimitPerPeriod = _calculateAmountForPeriod( + uint256(_limitsList.appearedEthAmountPerDayLimit) * 1 ether, + result.effectiveTimeElapsed + ); + if (activatedBalance > appearedEthLimitPerPeriod) { + revert IncorrectTotalActivatedBalance(appearedEthLimitPerPeriod, activatedBalance); + } + + result.activatedBalanceWithGap = + activatedBalance + + _calculateValidatorsBalanceAprSafetyCap( + _checkParams.preCLValidatorsBalance + activatedBalance, + uint256(_limitsList.annualBalanceIncreaseBPLimit) * result.effectiveTimeElapsed + ); + } + + function _checkCLPendingBalanceIncrease( + AccountingCoreLimitsPacked memory _limitsList, + CLBalanceChangeCheckParams memory _checkParams, + uint256 _clWithdrawals + ) internal pure { + ActivationBalanceCheckResult memory activationCheckResult = _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + _limitsList, + _checkParams + ); + uint256 preCLValidatorsBalanceAfterWithdrawals = _clWithdrawals >= _checkParams.preCLValidatorsBalance + ? 0 + : _checkParams.preCLValidatorsBalance - _clWithdrawals; + if (_checkParams.postCLValidatorsBalance > preCLValidatorsBalanceAfterWithdrawals) { + uint256 validatorsBalanceIncrease = + _checkParams.postCLValidatorsBalance - preCLValidatorsBalanceAfterWithdrawals; + if (validatorsBalanceIncrease > activationCheckResult.activatedBalanceWithGap) { + revert IncorrectTotalCLBalanceIncrease( + activationCheckResult.activatedBalanceWithGap, + validatorsBalanceIncrease + ); + } + } + } + + function _checkModuleValidatorsBalanceIncrease( + IStakingRouter _stakingRouter, + AccountingCoreLimitsPacked memory _limitsList, + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + CLBalanceChangeCheckParams memory _checkParams + ) internal view { + ActivationBalanceCheckResult memory activationCheckResult = _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + _limitsList, + _checkParams + ); + + if (_checkParams.postCLValidatorsBalance > _checkParams.preCLValidatorsBalance) { + uint256 validatorsBalanceIncrease = + _checkParams.postCLValidatorsBalance - _checkParams.preCLValidatorsBalance; + if (validatorsBalanceIncrease > activationCheckResult.activatedBalanceWithGap) { + revert IncorrectTotalCLBalanceIncrease( + activationCheckResult.activatedBalanceWithGap, + validatorsBalanceIncrease + ); + } + } + + uint256 totalActivatedInClByModules = _calculateTotalActivatedInClByModules( + _stakingRouter, + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule + ); + + uint256 consolidationLimitPerPeriodWei = _calculateAmountForPeriod( + uint256(_limitsList.consolidationEthAmountPerDayLimit) * 1 ether, + activationCheckResult.effectiveTimeElapsed + ); + uint256 totalActivatedInClByModulesLimit = + activationCheckResult.activatedBalanceWithGap + consolidationLimitPerPeriodWei; + if (totalActivatedInClByModules > totalActivatedInClByModulesLimit) { + revert IncorrectTotalModuleValidatorsBalanceIncrease( + totalActivatedInClByModulesLimit, + totalActivatedInClByModules + ); + } + } + + function _calculateTotalActivatedInClByModules( + IStakingRouter _stakingRouter, + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule + ) internal view returns (uint256 totalActivatedInClByModules) { + uint256 modulesCount = _stakingModuleIdsWithUpdatedBalance.length; + for (uint256 i = 0; i < modulesCount;) { + (bool hasPreviousAccounting, uint64 previousModuleValidatorsBalanceGwei,) = + _getModuleAccountingState(_stakingRouter, _stakingModuleIdsWithUpdatedBalance[i]); + uint256 previousModuleValidatorsBalanceWei = uint256(previousModuleValidatorsBalanceGwei) * 1 gwei; + // Skip module-delta aggregation until the module has previous accounting baseline. + if (hasPreviousAccounting && _validatorBalancesWeiByStakingModule[i] > previousModuleValidatorsBalanceWei) { + totalActivatedInClByModules += + _validatorBalancesWeiByStakingModule[i] - previousModuleValidatorsBalanceWei; + } + + unchecked { + ++i; + } + } + } + + /// @notice Returns stored module accounting state and whether it can be used as previous baseline in sanity checks. + /// @dev All modules existing at release activation get their initial accounting baseline via StakingRouter migration. + /// @dev Modules added after the release have no previous baseline in the first report, so module-delta + /// aggregation is skipped for them until `reportValidatorBalancesByStakingModule(...)` seeds their accounting state. + /// @param _stakingRouter StakingRouter contract used as the source of module accounting state. + /// @param _moduleId Staking module id. + /// @return hasPreviousAccounting True if previous accounting baseline is available for sanity checks. + /// @return previousValidatorsBalanceGwei Previous module validators balance in gwei. + /// @return exitedValidatorsCount Previous module exited validators count. + function _getModuleAccountingState( + IStakingRouter _stakingRouter, + uint256 _moduleId + ) + internal + view + returns ( + bool hasPreviousAccounting, + uint64 previousValidatorsBalanceGwei, + uint64 exitedValidatorsCount + ) + { + (previousValidatorsBalanceGwei, exitedValidatorsCount) = + _stakingRouter.getStakingModuleStateAccounting(_moduleId); + hasPreviousAccounting = + previousValidatorsBalanceGwei != 0 || + exitedValidatorsCount != 0; + } + function _checkWithdrawalVaultBalance( uint256 _actualWithdrawalVaultBalance, uint256 _reportedWithdrawalVaultBalance @@ -636,97 +1109,186 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - function _addReportData(uint256 _timestamp, uint256 _exitedValidatorsCount, uint256 _negativeCLRebase) internal { - reportData.push(ReportData( - SafeCast.toUint64(_timestamp), - SafeCast.toUint64(_exitedValidatorsCount), - SafeCast.toUint128(_negativeCLRebase) - )); + function _addReportData( + uint256 _timestamp, + uint256 _clBalance, + uint256 _deposits, + uint256 _clWithdrawals + ) internal { + reportData.push( + ReportData({ + timestamp: SafeCast.toUint64(_timestamp), + clBalance: SafeCast.toUint128(_clBalance), + deposits: SafeCast.toUint128(_deposits), + clWithdrawals: SafeCast.toUint128(_clWithdrawals) + }) + ); } - function _sumNegativeRebasesNotOlderThan(uint256 _timestamp) internal view returns (uint256) { - uint256 sum; - for (int256 index = int256(reportData.length) - 1; index >= 0; index--) { - if (reportData[uint256(index)].timestamp > SafeCast.toUint64(_timestamp)) { - sum += reportData[uint256(index)].negativeCLRebaseWei; - } else { - break; + function _checkCLBalanceDecrease( + CLBalanceDecreaseCheckParams memory _checkParams, + uint256 _clWithdrawals + ) internal { + // Compute actual CL withdrawals for this period: + // clWithdrawals = current vault balance - vault balance after last report's transfer + uint256 reportTimestamp = _lastReportTimestamp + _checkParams.timeElapsed; + _addReportData(reportTimestamp, _checkParams.postCLBalance, _checkParams.deposits, _clWithdrawals); + _lastReportTimestamp = reportTimestamp; + + // If the CL balance didn't decrease accounting for withdrawals, skip the window check + if (_checkParams.preCLBalance <= _checkParams.postCLBalance) return; + if (_checkParams.preCLBalance - _checkParams.postCLBalance <= _clWithdrawals) return; + + uint256 len = reportData.length; + // Need at least two snapshots to build a window: baseline B[X-k] and current point B[X]. + // With migration we seed them upfront (baseline + bootstrap flow chunk), so checks work immediately. + // Without migration this still works, but the very first report cannot be checked and pre-deploy + // state is not part of the window until enough post-deploy snapshots are accumulated. + if (len < 2) return; + + (uint256 actualCLBalanceDiff, uint256 maxAllowedCLBalanceDiff) = _calcWindowDiff( + _checkParams.maxCLBalanceDecreaseBP, + _checkParams.postCLBalance, + len + ); + + if (actualCLBalanceDiff == 0) return; + uint256 refSlot = IBaseOracle(LIDO_LOCATOR.accountingOracle()).getLastProcessingRefSlot(); + + if (actualCLBalanceDiff > maxAllowedCLBalanceDiff) { + if (address(secondOpinionOracle) == address(0)) { + revert IncorrectCLBalanceDecrease(actualCLBalanceDiff, maxAllowedCLBalanceDiff); } + _askSecondOpinion( + refSlot, + _checkParams.postCLBalance, + _checkParams.withdrawalVaultBalance, + _checkParams.clBalanceOraclesErrorUpperBPLimit + ); + return; } - return sum; + + emit NegativeCLRebaseAccepted( + refSlot, + _checkParams.postCLBalance, + actualCLBalanceDiff, + maxAllowedCLBalanceDiff + ); } - function _exitedValidatorsAtTimestamp(uint256 _timestamp) internal view returns (uint256) { - for (int256 index = int256(reportData.length) - 1; index >= 0; index--) { - if (reportData[uint256(index)].timestamp <= SafeCast.toUint64(_timestamp)) { - return reportData[uint256(index)].totalExitedValidators; - } + function _getCLWithdrawals(uint256 _withdrawalVaultBalance) internal view returns (uint256) { + if (_withdrawalVaultBalance < _lastVaultBalanceAfterTransfer) { + revert IncorrectCLWithdrawalsVaultBalance(_withdrawalVaultBalance, _lastVaultBalanceAfterTransfer); } - return 0; + return _withdrawalVaultBalance - _lastVaultBalanceAfterTransfer; } - function _checkCLBalanceDecrease( - LimitsList memory _limitsList, - uint256 _preCLBalance, - uint256 _postCLBalance, + function _checkWithdrawalsVaultTransfer( uint256 _withdrawalVaultBalance, - uint256 _postCLValidators, - uint256 _refSlot - ) internal { - uint256 reportTimestamp = GENESIS_TIME + _refSlot * SECONDS_PER_SLOT; + uint256 _withdrawalsVaultTransfer + ) internal pure { + // In the current Accounting flow `withdrawalsVaultTransfer` comes from `smoothenTokenRebase()`, + // where it is capped by `_withdrawalVaultBalance`, so the subtraction below cannot underflow. + // Keep this explicit guard anyway because `checkAccountingOracleReport` still receives it as an external input. + if (_withdrawalsVaultTransfer > _withdrawalVaultBalance) { + revert IncorrectWithdrawalsVaultTransfer(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + } + } - // Checking exitedValidators against StakingRouter - StakingRouter stakingRouter = StakingRouter(payable(LIDO_LOCATOR.stakingRouter())); - uint256[] memory ids = stakingRouter.getStakingModuleIds(); + /// @notice Finalizes sanity-check state after a successful accounting report. + /// @dev Stores the withdrawals vault balance after the current report transfer so the next report can derive + /// actual CL withdrawals as `current vault balance - last vault balance after transfer`. + /// @dev Marks the post-migration first report as completed so subsequent reports stop skipping + /// `_checkModuleValidatorsBalanceIncrease(...)`; this is needed because StakingRouter migration can seed + /// per-module validators balances above the first oracle-reported values. + /// @param _withdrawalVaultBalance Withdrawal vault balance reported for the current report, before transfer. + /// @param _withdrawalsVaultTransfer ETH amount transferred from the withdrawal vault during the current report. + function _finalizePostReportState( + uint256 _withdrawalVaultBalance, + uint256 _withdrawalsVaultTransfer + ) internal { + _lastVaultBalanceAfterTransfer = _withdrawalVaultBalance - _withdrawalsVaultTransfer; + _isPostMigrationFirstReportDone = true; + } - uint256 stakingRouterExitedValidators; - for (uint256 i = 0; i < ids.length; i++) { - StakingRouter.StakingModule memory module = stakingRouter.getStakingModule(ids[i]); - stakingRouterExitedValidators += module.exitedValidatorsCount; + function _calcWindowDiff( + uint256 _maxDecreaseBP, + uint256 _postCLBalance, + uint256 _reportCount + ) internal view returns (uint256 actualCLBalanceDiff, uint256 maxAllowedCLBalanceDiff) { + // Window formula: + // adjustedBase = B[baseline] + sum(deposits) - sum(clWithdrawals) + // actualDiff = abs(B[baseline] - B[current]) + // maxAllowed = adjustedBase * limitBP / 10_000 + uint256 lastIndex = _reportCount - 1; + uint256 lastTimestamp = reportData[lastIndex].timestamp; + uint256 windowStart = lastTimestamp > CL_BALANCE_WINDOW ? lastTimestamp - CL_BALANCE_WINDOW : 0; + uint256 baselineIndex = _findWindowStartIndex(lastIndex, windowStart); + + uint256 baselineBalance = reportData[baselineIndex].clBalance; + actualCLBalanceDiff = baselineBalance > _postCLBalance + ? baselineBalance - _postCLBalance + : _postCLBalance - baselineBalance; + + uint256 totalDeposits; + uint256 totalCLWithdrawals; + for (uint256 i = baselineIndex + 1; i <= lastIndex; ++i) { + totalDeposits += reportData[i].deposits; + totalCLWithdrawals += reportData[i].clWithdrawals; } - if (_preCLBalance <= _postCLBalance + _withdrawalVaultBalance) { - _addReportData(reportTimestamp, stakingRouterExitedValidators, 0); - // If the CL balance is not decreased, we don't need to check anything here - return; + uint256 adjustedBase = baselineBalance + totalDeposits; + if (adjustedBase < totalCLWithdrawals) { + revert IncorrectCLBalanceDecreaseWindowData(baselineBalance, totalDeposits, totalCLWithdrawals); } - _addReportData(reportTimestamp, stakingRouterExitedValidators, _preCLBalance - (_postCLBalance + _withdrawalVaultBalance)); - - // NOTE. Values of 18 and 54 days are taken from spec. Check the details here - // https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-23.md - uint256 negativeCLRebaseSum = _sumNegativeRebasesNotOlderThan(reportTimestamp - 18 days); - uint256 maxAllowedCLRebaseNegativeSum = - _limitsList.initialSlashingAmountPWei * ONE_PWEI * (_postCLValidators - _exitedValidatorsAtTimestamp(reportTimestamp - 18 days)) + - _limitsList.inactivityPenaltiesAmountPWei * ONE_PWEI * (_postCLValidators - _exitedValidatorsAtTimestamp(reportTimestamp - 54 days)); + adjustedBase -= totalCLWithdrawals; - if (negativeCLRebaseSum <= maxAllowedCLRebaseNegativeSum) { - // If the rebase diff is less or equal max allowed sum, we accept the report - emit NegativeCLRebaseAccepted(_refSlot, _postCLBalance + _withdrawalVaultBalance, negativeCLRebaseSum, maxAllowedCLRebaseNegativeSum); - return; - } + maxAllowedCLBalanceDiff = (adjustedBase * _maxDecreaseBP) / MAX_BASIS_POINTS; + } - // If there is no negative rebase oracle, then we don't need to check it's report - if (address(secondOpinionOracle) == address(0)) { - // If there is no oracle and the diff is more than limit, we revert - revert IncorrectCLBalanceDecrease(negativeCLRebaseSum, maxAllowedCLRebaseNegativeSum); + function _findWindowStartIndex( + uint256 _lastIndex, + uint256 _windowStart + ) internal view returns (uint256 windowStartIndex) { + windowStartIndex = _lastIndex; + while (windowStartIndex > 0 && reportData[windowStartIndex - 1].timestamp >= _windowStart) { + --windowStartIndex; } - _askSecondOpinion(_refSlot, _postCLBalance, _withdrawalVaultBalance, _limitsList); } - function _askSecondOpinion(uint256 _refSlot, uint256 _postCLBalance, uint256 _withdrawalVaultBalance, LimitsList memory _limitsList) internal { - (bool success, uint256 clOracleBalanceGwei, uint256 oracleWithdrawalVaultBalanceWei,,) = secondOpinionOracle.getReport(_refSlot); + function _askSecondOpinion( + uint256 _refSlot, + uint256 _postCLBalance, + uint256 _withdrawalVaultBalance, + uint256 _clBalanceOraclesErrorUpperBPLimit + ) internal { + (bool success, uint256 clOracleBalanceGwei, uint256 oracleWithdrawalVaultBalanceWei, , ) = secondOpinionOracle + .getReport(_refSlot); if (success) { uint256 clBalanceWei = clOracleBalanceGwei * 1 gwei; if (clBalanceWei < _postCLBalance) { - revert NegativeRebaseFailedCLBalanceMismatch(_postCLBalance, clBalanceWei, _limitsList.clBalanceOraclesErrorUpperBPLimit); + revert NegativeRebaseFailedCLBalanceMismatch( + _postCLBalance, + clBalanceWei, + _clBalanceOraclesErrorUpperBPLimit + ); } - if (MAX_BASIS_POINTS * (clBalanceWei - _postCLBalance) > - _limitsList.clBalanceOraclesErrorUpperBPLimit * clBalanceWei) { - revert NegativeRebaseFailedCLBalanceMismatch(_postCLBalance, clBalanceWei, _limitsList.clBalanceOraclesErrorUpperBPLimit); + if ( + MAX_BASIS_POINTS * (clBalanceWei - _postCLBalance) > + _clBalanceOraclesErrorUpperBPLimit * clBalanceWei + ) { + revert NegativeRebaseFailedCLBalanceMismatch( + _postCLBalance, + clBalanceWei, + _clBalanceOraclesErrorUpperBPLimit + ); } if (oracleWithdrawalVaultBalanceWei != _withdrawalVaultBalance) { - revert NegativeRebaseFailedWithdrawalVaultBalanceMismatch(_withdrawalVaultBalance, oracleWithdrawalVaultBalanceWei); + revert NegativeRebaseFailedWithdrawalVaultBalanceMismatch( + _withdrawalVaultBalance, + oracleWithdrawalVaultBalanceWei + ); } emit NegativeCLRebaseConfirmed(_refSlot, _postCLBalance, _withdrawalVaultBalance); } else { @@ -735,7 +1297,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } function _checkAnnualBalancesIncrease( - LimitsList memory _limitsList, + AccountingCoreLimitsPacked memory _limitsList, uint256 _preCLBalance, uint256 _postCLBalance, uint256 _timeElapsed @@ -748,13 +1310,10 @@ contract OracleReportSanityChecker is AccessControlEnumerable { if (_preCLBalance >= _postCLBalance) return; - if (_timeElapsed == 0) { - _timeElapsed = DEFAULT_TIME_ELAPSED; - } + _timeElapsed = _getTimeElapsedForAllowanceChecks(_timeElapsed); uint256 balanceIncrease = _postCLBalance - _preCLBalance; - uint256 annualBalanceIncrease = ((365 days * MAX_BASIS_POINTS * balanceIncrease) / - _preCLBalance) / + uint256 annualBalanceIncrease = (ANNUAL_BALANCE_INCREASE_DENOMINATOR * balanceIncrease) / _preCLBalance / _timeElapsed; if (annualBalanceIncrease > _limitsList.annualBalanceIncreaseBPLimit) { @@ -762,22 +1321,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - function _checkAppearedValidatorsChurnLimit( - LimitsList memory _limitsList, - uint256 _appearedValidators, - uint256 _timeElapsed - ) internal pure { - if (_timeElapsed == 0) { - _timeElapsed = DEFAULT_TIME_ELAPSED; - } - - uint256 appearedLimit = (_limitsList.appearedValidatorsPerDayLimit * _timeElapsed) / SECONDS_PER_DAY; - - if (_appearedValidators > appearedLimit) revert IncorrectAppearedValidators(_appearedValidators); - } - function _checkLastFinalizableId( - LimitsList memory _limitsList, + OperationalLimitsPacked memory _limitsList, address _withdrawalQueue, uint256 _lastFinalizableId, uint256 _reportTimestamp @@ -787,12 +1332,12 @@ contract OracleReportSanityChecker is AccessControlEnumerable { IWithdrawalQueue.WithdrawalRequestStatus[] memory statuses = IWithdrawalQueue(_withdrawalQueue) .getWithdrawalStatus(requestIds); - if (_reportTimestamp < statuses[0].timestamp + _limitsList.requestTimestampMargin) + if (_reportTimestamp < statuses[0].timestamp + uint256(_limitsList.requestTimestampMargin)) revert IncorrectRequestFinalization(statuses[0].timestamp); } function _checkSimulatedShareRate( - LimitsList memory _limitsList, + AccountingCoreLimitsPacked memory _limitsList, uint256 _noWithdrawalsPostInternalEther, uint256 _noWithdrawalsPostInternalShares, uint256 _simulatedShareRate @@ -837,92 +1382,140 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } function _updateLimits(LimitsList memory _newLimitsList) internal { - LimitsList memory _oldLimitsList = _limits.unpack(); - if (_oldLimitsList.exitedValidatorsPerDayLimit != _newLimitsList.exitedValidatorsPerDayLimit) { - _checkLimitValue(_newLimitsList.exitedValidatorsPerDayLimit, 0, type(uint16).max); - emit ExitedValidatorsPerDayLimitSet(_newLimitsList.exitedValidatorsPerDayLimit); - } - if (_oldLimitsList.appearedValidatorsPerDayLimit != _newLimitsList.appearedValidatorsPerDayLimit) { - _checkLimitValue(_newLimitsList.appearedValidatorsPerDayLimit, 0, type(uint16).max); - emit AppearedValidatorsPerDayLimitSet(_newLimitsList.appearedValidatorsPerDayLimit); + _validateLimitsList(_newLimitsList); + _updateAccountingCoreLimits(_newLimitsList.packAccountingCore()); + _updateOperationalLimits(_newLimitsList.packOperational()); + } + + function _checkLimitValue(uint256 _value, uint256 _minAllowedValue, uint256 _maxAllowedValue) internal pure { + if (_value > _maxAllowedValue || _value < _minAllowedValue) { + revert IncorrectLimitValue(_value, _minAllowedValue, _maxAllowedValue); } - if (_oldLimitsList.annualBalanceIncreaseBPLimit != _newLimitsList.annualBalanceIncreaseBPLimit) { - _checkLimitValue(_newLimitsList.annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); - emit AnnualBalanceIncreaseBPLimitSet(_newLimitsList.annualBalanceIncreaseBPLimit); + } + + function _validateLimitsList(LimitsList memory _limitsList) internal pure { + _checkLimitValue(_limitsList.exitedEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.appearedEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.consolidationEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.exitedValidatorEthAmountLimit, 1, type(uint16).max); + _checkLimitValue(_limitsList.externalPendingBalanceCapEth, 0, type(uint16).max); + _checkLimitValue(_limitsList.annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.maxBalanceExitRequestedPerReportInEth, 0, type(uint16).max); + _checkLimitValue(_limitsList.maxEffectiveBalanceWeightWCType01, 1, type(uint16).max); + _checkLimitValue(_limitsList.maxEffectiveBalanceWeightWCType02, 1, type(uint16).max); + _checkLimitValue(_limitsList.maxItemsPerExtraDataTransaction, 0, type(uint16).max); + _checkLimitValue(_limitsList.maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); + _checkLimitValue(_limitsList.requestTimestampMargin, 0, type(uint32).max); + _checkLimitValue(_limitsList.maxPositiveTokenRebase, 1, type(uint64).max); + _checkLimitValue(_limitsList.maxCLBalanceDecreaseBP, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); + } + + function _updateAccountingCoreLimits(AccountingCoreLimitsPacked memory _newLimits) internal { + AccountingCoreLimitsPacked memory _oldLimits = _accountingCoreLimits; + + if (_oldLimits.exitedEthAmountPerDayLimit != _newLimits.exitedEthAmountPerDayLimit) { + emit ExitedEthAmountPerDayLimitSet(_newLimits.exitedEthAmountPerDayLimit); } - if (_oldLimitsList.simulatedShareRateDeviationBPLimit != _newLimitsList.simulatedShareRateDeviationBPLimit) { - _checkLimitValue(_newLimitsList.simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); - emit SimulatedShareRateDeviationBPLimitSet(_newLimitsList.simulatedShareRateDeviationBPLimit); + if (_oldLimits.appearedEthAmountPerDayLimit != _newLimits.appearedEthAmountPerDayLimit) { + emit AppearedEthAmountPerDayLimitSet(_newLimits.appearedEthAmountPerDayLimit); } - if (_oldLimitsList.maxValidatorExitRequestsPerReport != _newLimitsList.maxValidatorExitRequestsPerReport) { - _checkLimitValue(_newLimitsList.maxValidatorExitRequestsPerReport, 0, type(uint16).max); - emit MaxValidatorExitRequestsPerReportSet(_newLimitsList.maxValidatorExitRequestsPerReport); + if (_oldLimits.consolidationEthAmountPerDayLimit != _newLimits.consolidationEthAmountPerDayLimit) { + emit ConsolidationEthAmountPerDayLimitSet(_newLimits.consolidationEthAmountPerDayLimit); } - if (_oldLimitsList.maxItemsPerExtraDataTransaction != _newLimitsList.maxItemsPerExtraDataTransaction) { - _checkLimitValue(_newLimitsList.maxItemsPerExtraDataTransaction, 0, type(uint16).max); - emit MaxItemsPerExtraDataTransactionSet(_newLimitsList.maxItemsPerExtraDataTransaction); + if (_oldLimits.exitedValidatorEthAmountLimit != _newLimits.exitedValidatorEthAmountLimit) { + emit ExitedValidatorEthAmountLimitSet(_newLimits.exitedValidatorEthAmountLimit); } - if (_oldLimitsList.maxNodeOperatorsPerExtraDataItem != _newLimitsList.maxNodeOperatorsPerExtraDataItem) { - _checkLimitValue(_newLimitsList.maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); - emit MaxNodeOperatorsPerExtraDataItemSet(_newLimitsList.maxNodeOperatorsPerExtraDataItem); + if (_oldLimits.externalPendingBalanceCapEth != _newLimits.externalPendingBalanceCapEth) { + emit ExternalPendingBalanceCapEthSet(_newLimits.externalPendingBalanceCapEth); } - if (_oldLimitsList.requestTimestampMargin != _newLimitsList.requestTimestampMargin) { - _checkLimitValue(_newLimitsList.requestTimestampMargin, 0, type(uint32).max); - emit RequestTimestampMarginSet(_newLimitsList.requestTimestampMargin); + if (_oldLimits.annualBalanceIncreaseBPLimit != _newLimits.annualBalanceIncreaseBPLimit) { + emit AnnualBalanceIncreaseBPLimitSet(_newLimits.annualBalanceIncreaseBPLimit); } - if (_oldLimitsList.maxPositiveTokenRebase != _newLimitsList.maxPositiveTokenRebase) { - _checkLimitValue(_newLimitsList.maxPositiveTokenRebase, 1, type(uint64).max); - emit MaxPositiveTokenRebaseSet(_newLimitsList.maxPositiveTokenRebase); + if (_oldLimits.simulatedShareRateDeviationBPLimit != _newLimits.simulatedShareRateDeviationBPLimit) { + emit SimulatedShareRateDeviationBPLimitSet(_newLimits.simulatedShareRateDeviationBPLimit); } - if (_oldLimitsList.initialSlashingAmountPWei != _newLimitsList.initialSlashingAmountPWei) { - _checkLimitValue(_newLimitsList.initialSlashingAmountPWei, 0, type(uint16).max); - emit InitialSlashingAmountSet(_newLimitsList.initialSlashingAmountPWei); + if (_oldLimits.maxPositiveTokenRebase != _newLimits.maxPositiveTokenRebase) { + emit MaxPositiveTokenRebaseSet(_newLimits.maxPositiveTokenRebase); } - if (_oldLimitsList.inactivityPenaltiesAmountPWei != _newLimitsList.inactivityPenaltiesAmountPWei) { - _checkLimitValue(_newLimitsList.inactivityPenaltiesAmountPWei, 0, type(uint16).max); - emit InactivityPenaltiesAmountSet(_newLimitsList.inactivityPenaltiesAmountPWei); + if (_oldLimits.maxCLBalanceDecreaseBP != _newLimits.maxCLBalanceDecreaseBP) { + emit MaxCLBalanceDecreaseBPSet(_newLimits.maxCLBalanceDecreaseBP); } - if (_oldLimitsList.clBalanceOraclesErrorUpperBPLimit != _newLimitsList.clBalanceOraclesErrorUpperBPLimit) { - _checkLimitValue(_newLimitsList.clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); - emit CLBalanceOraclesErrorUpperBPLimitSet(_newLimitsList.clBalanceOraclesErrorUpperBPLimit); + if (_oldLimits.clBalanceOraclesErrorUpperBPLimit != _newLimits.clBalanceOraclesErrorUpperBPLimit) { + emit CLBalanceOraclesErrorUpperBPLimitSet(_newLimits.clBalanceOraclesErrorUpperBPLimit); } - _limits = _newLimitsList.pack(); + + _accountingCoreLimits = _newLimits; } - function _checkLimitValue(uint256 _value, uint256 _minAllowedValue, uint256 _maxAllowedValue) internal pure { - if (_value > _maxAllowedValue || _value < _minAllowedValue) { - revert IncorrectLimitValue(_value, _minAllowedValue, _maxAllowedValue); + function _updateOperationalLimits(OperationalLimitsPacked memory _newLimits) internal { + OperationalLimitsPacked memory _oldLimits = _operationalLimits; + + if (_oldLimits.maxBalanceExitRequestedPerReportInEth != _newLimits.maxBalanceExitRequestedPerReportInEth) { + emit MaxBalanceExitRequestedPerReportInEthSet(_newLimits.maxBalanceExitRequestedPerReportInEth); + } + if (_oldLimits.maxEffectiveBalanceWeightWCType01 != _newLimits.maxEffectiveBalanceWeightWCType01) { + emit MaxEffectiveBalanceWeightWCType01Set(_newLimits.maxEffectiveBalanceWeightWCType01); + } + if (_oldLimits.maxEffectiveBalanceWeightWCType02 != _newLimits.maxEffectiveBalanceWeightWCType02) { + emit MaxEffectiveBalanceWeightWCType02Set(_newLimits.maxEffectiveBalanceWeightWCType02); + } + if (_oldLimits.maxItemsPerExtraDataTransaction != _newLimits.maxItemsPerExtraDataTransaction) { + emit MaxItemsPerExtraDataTransactionSet(_newLimits.maxItemsPerExtraDataTransaction); + } + if (_oldLimits.maxNodeOperatorsPerExtraDataItem != _newLimits.maxNodeOperatorsPerExtraDataItem) { + emit MaxNodeOperatorsPerExtraDataItemSet(_newLimits.maxNodeOperatorsPerExtraDataItem); } + if (_oldLimits.requestTimestampMargin != _newLimits.requestTimestampMargin) { + emit RequestTimestampMarginSet(_newLimits.requestTimestampMargin); + } + + _operationalLimits = _newLimits; } - event ExitedValidatorsPerDayLimitSet(uint256 exitedValidatorsPerDayLimit); - event AppearedValidatorsPerDayLimitSet(uint256 appearedValidatorsPerDayLimit); + event ExitedEthAmountPerDayLimitSet(uint256 exitedEthAmountPerDayLimit); + event AppearedEthAmountPerDayLimitSet(uint256 appearedEthAmountPerDayLimit); + event ConsolidationEthAmountPerDayLimitSet(uint256 consolidationEthAmountPerDayLimit); + event ExitedValidatorEthAmountLimitSet(uint256 exitedValidatorEthAmountLimit); + event ExternalPendingBalanceCapEthSet(uint256 externalPendingBalanceCapEth); event SecondOpinionOracleChanged(ISecondOpinionOracle indexed secondOpinionOracle); event AnnualBalanceIncreaseBPLimitSet(uint256 annualBalanceIncreaseBPLimit); event SimulatedShareRateDeviationBPLimitSet(uint256 simulatedShareRateDeviationBPLimit); event MaxPositiveTokenRebaseSet(uint256 maxPositiveTokenRebase); - event MaxValidatorExitRequestsPerReportSet(uint256 maxValidatorExitRequestsPerReport); + event MaxBalanceExitRequestedPerReportInEthSet(uint256 maxBalanceExitRequestedPerReportInEth); + event MaxEffectiveBalanceWeightWCType01Set(uint256 maxEffectiveBalanceWeightWCType01); + event MaxEffectiveBalanceWeightWCType02Set(uint256 maxEffectiveBalanceWeightWCType02); event MaxItemsPerExtraDataTransactionSet(uint256 maxItemsPerExtraDataTransaction); event MaxNodeOperatorsPerExtraDataItemSet(uint256 maxNodeOperatorsPerExtraDataItem); event RequestTimestampMarginSet(uint256 requestTimestampMargin); - event InitialSlashingAmountSet(uint256 initialSlashingAmountPWei); - event InactivityPenaltiesAmountSet(uint256 inactivityPenaltiesAmountPWei); + event MaxCLBalanceDecreaseBPSet(uint256 maxCLBalanceDecreaseBP); event CLBalanceOraclesErrorUpperBPLimitSet(uint256 clBalanceOraclesErrorUpperBPLimit); event NegativeCLRebaseConfirmed(uint256 refSlot, uint256 clBalanceWei, uint256 withdrawalVaultBalance); - event NegativeCLRebaseAccepted(uint256 refSlot, uint256 clTotalBalance, uint256 clBalanceDecrease, uint256 maxAllowedCLRebaseNegativeSum); + event NegativeCLRebaseAccepted( + uint256 refSlot, + uint256 clTotalBalance, + uint256 clBalanceDecrease, + uint256 maxAllowedDecrease + ); error IncorrectLimitValue(uint256 value, uint256 minAllowedValue, uint256 maxAllowedValue); error IncorrectWithdrawalsVaultBalance(uint256 actualWithdrawalVaultBalance); error IncorrectELRewardsVaultBalance(uint256 actualELRewardsVaultBalance); error IncorrectSharesRequestedToBurn(uint256 actualSharesToBurn); error IncorrectCLBalanceIncrease(uint256 annualBalanceDiff); - error IncorrectAppearedValidators(uint256 appearedValidatorsLimit); - error IncorrectNumberOfExitRequestsPerReport(uint256 maxRequestsCount); - error IncorrectExitedValidators(uint256 exitedValidatorsLimit); + error InvalidClBalancesData(); + error InconsistentValidatorsBalanceByModule(uint256 expected, uint256 actual); + error IncorrectTotalPendingBalance(uint256 maxAllowed, uint256 actual); + error IncorrectTotalActivatedBalance(uint256 maxAllowed, uint256 actual); + error IncorrectTotalCLBalanceIncrease(uint256 maxAllowed, uint256 actual); + error IncorrectTotalModuleValidatorsBalanceIncrease(uint256 maxAllowed, uint256 actual); + error AppearedEthAmountPerDayLimitExceeded(uint256 limitPerDay, uint256 appearedPerDay); + error IncorrectSumOfExitBalancePerReport(uint256 maxBalanceSum); error IncorrectRequestFinalization(uint256 requestCreationBlock); error IncorrectSimulatedShareRate(uint256 simulatedShareRate, uint256 actualShareRate); error TooManyItemsPerExtraDataTransaction(uint256 maxItemsCount, uint256 receivedItemsCount); - error ExitedValidatorsLimitExceeded(uint256 limitPerDay, uint256 exitedPerDay); + error ExitedEthAmountPerDayLimitExceeded(uint256 limitPerDay, uint256 exitedPerDay); error TooManyNodeOpsPerExtraDataItem(uint256 itemIndex, uint256 nodeOpsCount); error AdminCannotBeZero(); @@ -931,27 +1524,52 @@ contract OracleReportSanityChecker is AccessControlEnumerable { error NegativeRebaseFailedWithdrawalVaultBalanceMismatch(uint256 reportedValue, uint256 provedValue); error NegativeRebaseFailedSecondOpinionReportIsNotReady(); error CalledNotFromAccounting(); + error IncorrectCLWithdrawalsVaultBalance( + uint256 withdrawalVaultBalance, + uint256 lastWithdrawalVaultBalanceAfterTransfer + ); + error IncorrectWithdrawalsVaultTransfer(uint256 withdrawalVaultBalance, uint256 withdrawalsVaultTransfer); + error IncorrectCLBalanceDecreaseWindowData( + uint256 baselineBalance, + uint256 totalDeposits, + uint256 totalCLWithdrawals + ); + error MigrationAlreadyDone(); + error UnexpectedLidoVersion(uint256 actual, uint256 expected); + + event BaselineSnapshotMigrated(uint256 clBalance, uint256 deposits, uint256 clWithdrawals); } library LimitsListPacker { error BasisPointsOverflow(uint256 value, uint256 maxValue); - function pack(LimitsList memory _limitsList) internal pure returns (LimitsListPacked memory res) { - res.exitedValidatorsPerDayLimit = SafeCast.toUint16(_limitsList.exitedValidatorsPerDayLimit); - res.appearedValidatorsPerDayLimit = SafeCast.toUint16(_limitsList.appearedValidatorsPerDayLimit); - res.annualBalanceIncreaseBPLimit = _toBasisPoints(_limitsList.annualBalanceIncreaseBPLimit); - res.simulatedShareRateDeviationBPLimit = _toBasisPoints(_limitsList.simulatedShareRateDeviationBPLimit); - res.requestTimestampMargin = SafeCast.toUint32(_limitsList.requestTimestampMargin); + function packAccountingCore( + LimitsList memory _limitsList + ) internal pure returns (AccountingCoreLimitsPacked memory res) { + res.exitedEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.exitedEthAmountPerDayLimit); + res.appearedEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.appearedEthAmountPerDayLimit); + res.consolidationEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.consolidationEthAmountPerDayLimit); + res.annualBalanceIncreaseBPLimit = toBasisPoints(_limitsList.annualBalanceIncreaseBPLimit); + res.simulatedShareRateDeviationBPLimit = toBasisPoints(_limitsList.simulatedShareRateDeviationBPLimit); res.maxPositiveTokenRebase = SafeCast.toUint64(_limitsList.maxPositiveTokenRebase); - res.maxValidatorExitRequestsPerReport = SafeCast.toUint16(_limitsList.maxValidatorExitRequestsPerReport); + res.maxCLBalanceDecreaseBP = toBasisPoints(_limitsList.maxCLBalanceDecreaseBP); + res.clBalanceOraclesErrorUpperBPLimit = toBasisPoints(_limitsList.clBalanceOraclesErrorUpperBPLimit); + res.exitedValidatorEthAmountLimit = SafeCast.toUint16(_limitsList.exitedValidatorEthAmountLimit); + res.externalPendingBalanceCapEth = SafeCast.toUint16(_limitsList.externalPendingBalanceCapEth); + } + + function packOperational( + LimitsList memory _limitsList + ) internal pure returns (OperationalLimitsPacked memory res) { + res.maxBalanceExitRequestedPerReportInEth = SafeCast.toUint16(_limitsList.maxBalanceExitRequestedPerReportInEth); + res.maxEffectiveBalanceWeightWCType01 = SafeCast.toUint16(_limitsList.maxEffectiveBalanceWeightWCType01); + res.maxEffectiveBalanceWeightWCType02 = SafeCast.toUint16(_limitsList.maxEffectiveBalanceWeightWCType02); res.maxItemsPerExtraDataTransaction = SafeCast.toUint16(_limitsList.maxItemsPerExtraDataTransaction); res.maxNodeOperatorsPerExtraDataItem = SafeCast.toUint16(_limitsList.maxNodeOperatorsPerExtraDataItem); - res.initialSlashingAmountPWei = SafeCast.toUint16(_limitsList.initialSlashingAmountPWei); - res.inactivityPenaltiesAmountPWei = SafeCast.toUint16(_limitsList.inactivityPenaltiesAmountPWei); - res.clBalanceOraclesErrorUpperBPLimit = _toBasisPoints(_limitsList.clBalanceOraclesErrorUpperBPLimit); + res.requestTimestampMargin = SafeCast.toUint32(_limitsList.requestTimestampMargin); } - function _toBasisPoints(uint256 _value) private pure returns (uint16) { + function toBasisPoints(uint256 _value) internal pure returns (uint16) { if (_value > MAX_BASIS_POINTS) { revert BasisPointsOverflow(_value, MAX_BASIS_POINTS); } @@ -960,18 +1578,25 @@ library LimitsListPacker { } library LimitsListUnpacker { - function unpack(LimitsListPacked memory _limitsList) internal pure returns (LimitsList memory res) { - res.exitedValidatorsPerDayLimit = _limitsList.exitedValidatorsPerDayLimit; - res.appearedValidatorsPerDayLimit = _limitsList.appearedValidatorsPerDayLimit; - res.annualBalanceIncreaseBPLimit = _limitsList.annualBalanceIncreaseBPLimit; - res.simulatedShareRateDeviationBPLimit = _limitsList.simulatedShareRateDeviationBPLimit; - res.requestTimestampMargin = _limitsList.requestTimestampMargin; - res.maxPositiveTokenRebase = _limitsList.maxPositiveTokenRebase; - res.maxValidatorExitRequestsPerReport = _limitsList.maxValidatorExitRequestsPerReport; - res.maxItemsPerExtraDataTransaction = _limitsList.maxItemsPerExtraDataTransaction; - res.maxNodeOperatorsPerExtraDataItem = _limitsList.maxNodeOperatorsPerExtraDataItem; - res.initialSlashingAmountPWei = _limitsList.initialSlashingAmountPWei; - res.inactivityPenaltiesAmountPWei = _limitsList.inactivityPenaltiesAmountPWei; - res.clBalanceOraclesErrorUpperBPLimit = _limitsList.clBalanceOraclesErrorUpperBPLimit; + function unpack( + AccountingCoreLimitsPacked memory _accountingLimits, + OperationalLimitsPacked memory _operationalLimitsPacked + ) internal pure returns (LimitsList memory res) { + res.exitedEthAmountPerDayLimit = _accountingLimits.exitedEthAmountPerDayLimit; + res.appearedEthAmountPerDayLimit = _accountingLimits.appearedEthAmountPerDayLimit; + res.annualBalanceIncreaseBPLimit = _accountingLimits.annualBalanceIncreaseBPLimit; + res.simulatedShareRateDeviationBPLimit = _accountingLimits.simulatedShareRateDeviationBPLimit; + res.maxBalanceExitRequestedPerReportInEth = _operationalLimitsPacked.maxBalanceExitRequestedPerReportInEth; + res.maxEffectiveBalanceWeightWCType01 = _operationalLimitsPacked.maxEffectiveBalanceWeightWCType01; + res.maxEffectiveBalanceWeightWCType02 = _operationalLimitsPacked.maxEffectiveBalanceWeightWCType02; + res.maxItemsPerExtraDataTransaction = _operationalLimitsPacked.maxItemsPerExtraDataTransaction; + res.maxNodeOperatorsPerExtraDataItem = _operationalLimitsPacked.maxNodeOperatorsPerExtraDataItem; + res.requestTimestampMargin = _operationalLimitsPacked.requestTimestampMargin; + res.maxPositiveTokenRebase = _accountingLimits.maxPositiveTokenRebase; + res.maxCLBalanceDecreaseBP = _accountingLimits.maxCLBalanceDecreaseBP; + res.clBalanceOraclesErrorUpperBPLimit = _accountingLimits.clBalanceOraclesErrorUpperBPLimit; + res.consolidationEthAmountPerDayLimit = _accountingLimits.consolidationEthAmountPerDayLimit; + res.exitedValidatorEthAmountLimit = _accountingLimits.exitedValidatorEthAmountLimit; + res.externalPendingBalanceCapEth = _accountingLimits.externalPendingBalanceCapEth; } } diff --git a/contracts/common/interfaces/ICircuitBreaker.sol b/contracts/common/interfaces/ICircuitBreaker.sol new file mode 100644 index 0000000000..1ddc6e4427 --- /dev/null +++ b/contracts/common/interfaces/ICircuitBreaker.sol @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +// solhint-disable-next-line +pragma solidity >=0.4.24 <0.9.0; + +// https://github.com/lidofinance/circuit-breaker/blob/main/src/CircuitBreaker.sol +interface ICircuitBreaker { + function pause(address _pausable) external; + function registerPauser(address _pausable, address _newPauser) external; + function getPauser(address _pausable) external view returns (address); +} diff --git a/contracts/common/interfaces/ILido.sol b/contracts/common/interfaces/ILido.sol index 3293823f7b..a285688b3d 100644 --- a/contracts/common/interfaces/ILido.sol +++ b/contracts/common/interfaces/ILido.sol @@ -41,21 +41,30 @@ interface ILido is IERC20, IVersioned { view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance); + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ); + function processClStateUpdate( uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance + uint256 _clValidatorsBalance, + uint256 _clPendingBalance ) external; function collectRewardsAndProcessWithdrawals( uint256 _reportTimestamp, uint256 _reportClBalance, - uint256 _adjustedPreCLBalance, + uint256 _principalCLBalance, uint256 _withdrawalsToWithdraw, uint256 _elRewardsToWithdraw, uint256 _lastWithdrawalRequestToFinalize, - uint256 _simulatedShareRate, + uint256 _withdrawalsShareRate, uint256 _etherToLockOnWithdrawalQueue ) external; diff --git a/contracts/common/interfaces/ILidoLocator.sol b/contracts/common/interfaces/ILidoLocator.sol index 84b7239964..92f7973f5f 100644 --- a/contracts/common/interfaces/ILidoLocator.sol +++ b/contracts/common/interfaces/ILidoLocator.sol @@ -26,6 +26,10 @@ interface ILidoLocator { function vaultFactory() external view returns (address); function lazyOracle() external view returns (address); function operatorGrid() external view returns (address); + function topUpGateway() external view returns (address); + function validatorExitDelayVerifier() external view returns (address); + function triggerableWithdrawalsGateway() external view returns (address); + function consolidationGateway() external view returns (address); /// @notice Returns core Lido protocol component addresses in a single call /// @dev This function provides a gas-efficient way to fetch multiple component addresses in a single call diff --git a/contracts/common/interfaces/IOracleReportSanityChecker.sol b/contracts/common/interfaces/IOracleReportSanityChecker.sol index a32d8d8162..107f52f9dd 100644 --- a/contracts/common/interfaces/IOracleReportSanityChecker.sol +++ b/contracts/common/interfaces/IOracleReportSanityChecker.sol @@ -7,8 +7,8 @@ pragma solidity >=0.4.24; interface IOracleReportSanityChecker { function smoothenTokenRebase( - uint256 _preTotalPooledEther, - uint256 _preTotalShares, + uint256 _preInternalEther, + uint256 _preInternalShares, uint256 _preCLBalance, uint256 _postCLBalance, uint256 _withdrawalVaultBalance, @@ -21,15 +21,28 @@ interface IOracleReportSanityChecker { // function checkAccountingOracleReport( uint256 _timeElapsed, - uint256 _preCLBalance, - uint256 _postCLBalance, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, uint256 _withdrawalVaultBalance, uint256 _elRewardsVaultBalance, uint256 _sharesRequestedToBurn, - uint256 _preCLValidators, - uint256 _postCLValidators + uint256 _deposits, + uint256 _withdrawalsVaultTransfer ) external; + // + function checkCLPendingBalanceIncrease( + uint256 _timeElapsed, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, + uint256 _withdrawalVaultBalance, + uint256 _deposits + ) external view; + // function checkWithdrawalQueueOracleReport( uint256 _lastFinalizableRequestId, @@ -38,10 +51,10 @@ interface IOracleReportSanityChecker { // function checkSimulatedShareRate( - uint256 _postTotalPooledEther, - uint256 _postTotalShares, - uint256 _etherLockedOnWithdrawalQueue, - uint256 _sharesBurntDueToWithdrawals, + uint256 _postInternalEther, + uint256 _postInternalShares, + uint256 _etherToFinalizeWQ, + uint256 _sharesToBurnForWithdrawals, uint256 _simulatedShareRate ) external view; } diff --git a/contracts/common/interfaces/IOssifiableProxy.sol b/contracts/common/interfaces/IOssifiableProxy.sol index 7eb3898388..83b8def590 100644 --- a/contracts/common/interfaces/IOssifiableProxy.sol +++ b/contracts/common/interfaces/IOssifiableProxy.sol @@ -6,6 +6,7 @@ pragma solidity >=0.4.24; interface IOssifiableProxy { function proxy__upgradeTo(address newImplementation) external; + function proxy__upgradeToAndCall(address newImplementation, bytes memory setupCalldata, bool forceCall) external; function proxy__changeAdmin(address newAdmin) external; function proxy__getAdmin() external view returns (address); function proxy__getImplementation() external view returns (address); diff --git a/contracts/common/interfaces/IPausableUntil.sol b/contracts/common/interfaces/IPausableUntil.sol new file mode 100644 index 0000000000..dbd6f27541 --- /dev/null +++ b/contracts/common/interfaces/IPausableUntil.sol @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2026 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +// solhint-disable-next-line +pragma solidity >=0.4.24 <0.9.0; + +/// @title IPausable +/// @notice Interface pausable contracts must implement for CircuitBreaker compatibility. +interface IPausableUntil { + /// @notice Whether the contract is currently paused. + function isPaused() external view returns (bool); + + /// @notice Pause the contract for a given duration. + /// @param _duration Duration in seconds. + function pauseFor(uint256 _duration) external; +} diff --git a/contracts/common/interfaces/IStakingModule.sol b/contracts/common/interfaces/IStakingModule.sol index 6641aa6a02..b075e23021 100644 --- a/contracts/common/interfaces/IStakingModule.sol +++ b/contracts/common/interfaces/IStakingModule.sol @@ -75,11 +75,10 @@ interface IStakingModule { /// official Deposit Contract. This value is a cumulative counter: even when the validator /// goes into EXITED state this counter is not decreasing /// @return depositableValidatorsCount number of validators in the set available for deposit - function getStakingModuleSummary() external view returns ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ); + function getStakingModuleSummary() + external + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount); /// @notice Returns all-validators summary belonging to the node operator with the given id /// @param _nodeOperatorId id of the operator to return report for @@ -96,16 +95,21 @@ interface IStakingModule { /// Deposit Contract. This value is a cumulative counter: even when the validator goes into /// EXITED state this counter is not decreasing /// @return depositableValidatorsCount number of validators in the set available for deposit - function getNodeOperatorSummary(uint256 _nodeOperatorId) external view returns ( - uint256 targetLimitMode, - uint256 targetValidatorsCount, - uint256 stuckValidatorsCount, - uint256 refundedValidatorsCount, - uint256 stuckPenaltyEndTimestamp, - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ); + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ); /// @notice Returns a counter that MUST change its value whenever the deposit data set changes. /// Below is the typical list of actions that requires an update of the nonce: @@ -134,11 +138,10 @@ interface IStakingModule { /// the returned ids is not defined and might change between calls. /// @dev This view must not revert in case of invalid data passed. When `_offset` exceeds the /// total node operators count or when `_limit` is equal to 0 MUST be returned empty array. - function getNodeOperatorIds(uint256 _offset, uint256 _limit) - external - view - returns (uint256[] memory nodeOperatorIds); - + function getNodeOperatorIds( + uint256 _offset, + uint256 _limit + ) external view returns (uint256[] memory nodeOperatorIds); /// @notice Called by StakingRouter to signal that stETH rewards were minted for this module. /// @param _totalShares Amount of stETH shares that were minted to reward all node operators. @@ -176,10 +179,7 @@ interface IStakingModule { /// 'unsafely' means that this method can both increase and decrease exited and stuck counters /// @param _nodeOperatorId Id of the node operator /// @param _exitedValidatorsCount New number of EXITED validators for the node operator - function unsafeUpdateValidatorsCount( - uint256 _nodeOperatorId, - uint256 _exitedValidatorsCount - ) external; + function unsafeUpdateValidatorsCount(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount) external; /// @notice Obtains deposit data to be used by StakingRouter to deposit to the Ethereum Deposit /// contract @@ -189,9 +189,10 @@ interface IStakingModule { /// IMPORTANT: _depositCalldata MUST NOT modify the deposit data set of the staking module /// @return publicKeys Batch of the concatenated public validators keys /// @return signatures Batch of the concatenated deposit signatures for returned public keys - function obtainDepositData(uint256 _depositsCount, bytes calldata _depositCalldata) - external - returns (bytes memory publicKeys, bytes memory signatures); + function obtainDepositData( + uint256 _depositsCount, + bytes calldata _depositCalldata + ) external returns (bytes memory publicKeys, bytes memory signatures); /// @notice Called by StakingRouter after it finishes updating exited and stuck validators /// counts for this module's node operators. diff --git a/contracts/common/interfaces/IStakingModuleV2.sol b/contracts/common/interfaces/IStakingModuleV2.sol new file mode 100644 index 0000000000..a891c3569a --- /dev/null +++ b/contracts/common/interfaces/IStakingModuleV2.sol @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +interface IStakingModuleV2 { + // Top ups + /// @notice Validates provided keys and calculates deposit allocations for top-up + /// @dev Reverts if any key doesn't belong to the module or data is invalid + /// @param depositAmount Total ether amount available for top-up (must be multiple of 1 gwei) + /// @param pubkeys List of validator public keys to top up + /// @param keyIndices Indices of keys within their respective operators + /// @param operatorIds Node operator IDs that own the keys + /// @param topUpLimits Maximum amount that can be deposited per key based on Consensus Layer data and SR internal logic. + /// @return allocations Amount to deposit to each corresponding key + /// @dev allocations list can contain zero values + /// @dev sum of allocations can be less or equal to maxDepositAmount + /// @dev Values depositAmount, topUpLimits, allocations are denominated in wei + function allocateDeposits( + uint256 depositAmount, + bytes[] calldata pubkeys, + uint256[] calldata keyIndices, + uint256[] calldata operatorIds, + uint256[] calldata topUpLimits + ) external returns (uint256[] memory allocations); + + /// @notice returns the total amount of ETH staked in the module, in wei + function getTotalModuleStake() external view returns (uint256); +} diff --git a/contracts/common/interfaces/ReportValues.sol b/contracts/common/interfaces/ReportValues.sol index db2293a1b8..0a04fafb65 100644 --- a/contracts/common/interfaces/ReportValues.sol +++ b/contracts/common/interfaces/ReportValues.sol @@ -10,10 +10,10 @@ struct ReportValues { uint256 timestamp; /// @notice seconds elapsed since the previous report uint256 timeElapsed; - /// @notice total number of Lido validators on Consensus Layers (exited included) - uint256 clValidators; - /// @notice sum of all Lido validators' balances on Consensus Layer - uint256 clBalance; + /// @notice Validators balance without pending deposits + uint256 clValidatorsBalance; + /// @notice Pending deposits balance on Consensus Layer + uint256 clPendingBalance; /// @notice withdrawal vault balance uint256 withdrawalVaultBalance; /// @notice elRewards vault balance diff --git a/contracts/common/interfaces/TopUpWitness.sol b/contracts/common/interfaces/TopUpWitness.sol new file mode 100644 index 0000000000..ada957af0c --- /dev/null +++ b/contracts/common/interfaces/TopUpWitness.sol @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line +pragma solidity >=0.8.9; + +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/ValidatorWitness.sol"; + +struct TopUpData { + uint256 moduleId; + uint256[] keyIndices; + uint256[] operatorIds; + uint256[] validatorIndices; + BeaconRootData beaconRootData; + ValidatorWitness[] validatorWitness; + uint256[] pendingBalanceGwei; +} diff --git a/contracts/common/interfaces/ValidatorWitness.sol b/contracts/common/interfaces/ValidatorWitness.sol new file mode 100644 index 0000000000..c89728dc75 --- /dev/null +++ b/contracts/common/interfaces/ValidatorWitness.sol @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line +pragma solidity >=0.8.9; + +struct BeaconRootData { + uint64 childBlockTimestamp; // for EIP-4788 lookup + uint64 slot; // header slot + uint64 proposerIndex; // header proposer +} + +struct ValidatorWitness { + // Merkle path: Validator[i] → … → state_root → beacon_block_root + bytes32[] proofValidator; + // Full Validator container fields (minus WC) + bytes pubkey; + uint64 effectiveBalance; + uint64 activationEligibilityEpoch; + uint64 activationEpoch; + uint64 exitEpoch; + uint64 withdrawableEpoch; + bool slashed; +} diff --git a/contracts/common/lib/Bytes32String.sol b/contracts/common/lib/Bytes32String.sol new file mode 100644 index 0000000000..495bcb9642 --- /dev/null +++ b/contracts/common/lib/Bytes32String.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0 +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +/** + * @title Converts bytes32 to string and vice versa + * @author KRogLA + * @notice Allows packing and unpacking short strings (length < 32 bytes) to/from bytes32 + * (e.g., to store a string in an immutable variable). + * @dev Packed bytes32 layout: [31 bytes of data][1 byte of length] + */ +library Bytes32String { + error StringTooLong(); + /// @notice Packs a string into bytes32. + /// @dev Reverts if the string length is greater than 31 bytes. + function toBytes32(string memory s) internal pure returns (bytes32 r) { + uint256 len = bytes(s).length; + if (len > 31) revert StringTooLong(); + assembly ("memory-safe") { + let data := mload(add(s, 32)) + // Clear potentially dirty lower (32 - len) bytes using a mask. + let mask := shl(mul(sub(32, len), 8), not(0)) + r := or(and(data, mask), len) + } + } + + /// @notice Unpacks a bytes32 value into a string. + function toString(bytes32 b) internal pure returns (string memory s) { + assembly ("memory-safe") { + let len := and(b, 0xFF) + s := mload(0x40) + mstore(s, len) + // Zero out the last byte (length). + mstore(add(s, 32), and(b, not(0xFF))) + mstore(0x40, add(s, 64)) + } + } +} diff --git a/contracts/common/lib/RateLimit.sol b/contracts/common/lib/RateLimit.sol new file mode 100644 index 0000000000..434ec83e9a --- /dev/null +++ b/contracts/common/lib/RateLimit.sol @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* solhint-disable one-contract-per-file */ + +/* See contracts/COMPILERS.md */ +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +struct LimitData { + uint32 maxLimit; // Maximum limit + uint32 prevLimit; // Limit left after previous operations + uint32 prevTimestamp; // Timestamp of the last update + uint32 frameDurationInSec; // Seconds that should pass to restore part of the limit + uint32 itemsPerFrame; // Restored items per frame +} +library RateLimitStorage { + struct DataStorage { + LimitData _limitData; + } + + function getStorageLimit(bytes32 _position) internal view returns (LimitData memory) { + return _getDataStorage(_position)._limitData; + } + + function setStorageLimit(bytes32 _position, LimitData memory _data) internal { + _getDataStorage(_position)._limitData = _data; + } + + function _getDataStorage(bytes32 _position) private pure returns (DataStorage storage $) { + assembly { + $.slot := _position + } + } +} +// A replenishing quota per time frame +library RateLimit { + /// @notice Error when new value for remaining limit exceeds maximum limit. + error LimitExceeded(); + + /// @notice Error when max limit exceeds uint32 max. + error TooLargeMaxLimit(); + + /// @notice Error when frame duration exceeds uint32 max. + error TooLargeFrameDuration(); + + /// @notice Error when items per frame exceed the maximum limit. + error TooLargeItemsPerFrame(); + + /// @notice Error when frame duration is zero. + error ZeroFrameDuration(); + + function calculateCurrentLimit( + LimitData memory _data, + uint256 timestamp + ) internal pure returns (uint256 currentLimit) { + uint256 secondsPassed = timestamp - _data.prevTimestamp; + + if (secondsPassed < _data.frameDurationInSec || _data.itemsPerFrame == 0) { + return _data.prevLimit; + } + + uint256 framesPassed = secondsPassed / _data.frameDurationInSec; + uint256 restoredLimit = framesPassed * _data.itemsPerFrame; + + uint256 newLimit = _data.prevLimit + restoredLimit; + if (newLimit > _data.maxLimit) { + newLimit = _data.maxLimit; + } + + return newLimit; + } + + function updatePrevLimit( + LimitData memory _data, + uint256 newLimit, + uint256 timestamp + ) internal pure returns (LimitData memory) { + if (_data.maxLimit < newLimit) revert LimitExceeded(); + + uint256 secondsPassed = timestamp - _data.prevTimestamp; + uint256 framesPassed = secondsPassed / _data.frameDurationInSec; + uint32 passedTime = uint32(framesPassed) * _data.frameDurationInSec; + + _data.prevLimit = uint32(newLimit); + _data.prevTimestamp += passedTime; + + return _data; + } + + function setLimits( + LimitData memory _data, + uint256 maxLimit, + uint256 itemsPerFrame, + uint256 frameDurationInSec, + uint256 timestamp + ) internal pure returns (LimitData memory) { + if (maxLimit > type(uint32).max) revert TooLargeMaxLimit(); + if (frameDurationInSec > type(uint32).max) revert TooLargeFrameDuration(); + if (itemsPerFrame > maxLimit) revert TooLargeItemsPerFrame(); + if (frameDurationInSec == 0) revert ZeroFrameDuration(); + + _data.itemsPerFrame = uint32(itemsPerFrame); + _data.frameDurationInSec = uint32(frameDurationInSec); + + if ( + // new maxLimit is smaller than prev remaining limit + maxLimit < _data.prevLimit || + // previously items were unlimited + _data.maxLimit == 0 + ) { + _data.prevLimit = uint32(maxLimit); + } + + _data.maxLimit = uint32(maxLimit); + _data.prevTimestamp = uint32(timestamp); + + return _data; + } + + function isLimitSet(LimitData memory _data) internal pure returns (bool) { + return _data.maxLimit != 0; + } +} diff --git a/contracts/common/lib/WithdrawalCredentials.sol b/contracts/common/lib/WithdrawalCredentials.sol new file mode 100644 index 0000000000..c016093ca7 --- /dev/null +++ b/contracts/common/lib/WithdrawalCredentials.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-3.0 +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +/** + * @title Withdrawal credentials helpers. + * @author KRogLA + * @notice Provides functionality for managing withdrawal credentials + * @dev WC bytes layout: [0] = prefix (0x00/0x01/0x02), [1..11] = zero, [12..31] = execution address (20b) + */ +library WithdrawalCredentials { + // Withdrawal Credentials types + uint8 public constant WC_TYPE_01 = 0x01; + uint8 public constant WC_TYPE_02 = 0x02; + + /// @notice Get the current prefix (0x00/0x01/0x02) + function getType(bytes32 wc) internal pure returns (uint8) { + return uint8(uint256(wc) >> 248); + } + + /// @notice Extract the execution address from the WC (low 20 bytes) + function getAddr(bytes32 wc) internal pure returns (address) { + return address(uint160(uint256(wc))); + } + + /// @notice Set 1st byte to wcType (0x00/0x01/0x02), keep the rest + function setType(bytes32 wc, uint8 wcType) internal pure returns (bytes32) { + return bytes32((uint256(wc) & type(uint248).max) | (uint256(wcType) << 248)); + } + + function isTypeValid(uint256 wcType) internal pure returns (bool) { + return isType1(wcType) || isType2(wcType); + } + + function isType1(bytes32 wc) internal pure returns (bool) { + return isType1(getType(wc)); + } + + function isType2(bytes32 wc) internal pure returns (bool) { + return isType2(getType(wc)); + } + + function isType1(uint256 wcType) internal pure returns (bool) { + return wcType == WC_TYPE_01; + } + + function isType2(uint256 wcType) internal pure returns (bool) { + return wcType == WC_TYPE_02; + } +} diff --git a/contracts/upgrade/UpgradeConfig.sol b/contracts/upgrade/UpgradeConfig.sol new file mode 100644 index 0000000000..02116e738a --- /dev/null +++ b/contracts/upgrade/UpgradeConfig.sol @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {Bytes32String} from "contracts/common/lib/Bytes32String.sol"; +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {IUpgradeConfig} from "./interfaces/IUpgradeConfig.sol"; +import { + UpgradeParameters, + EasyTrackNewFactories, + EasyTrackOldFactories, + CoreUpgradeParams, + CSMUpgradeParams, + CuratedModuleParams, + GlobalConfig, + CoreUpgradeConfig, + CuratedModuleConfig, + CSMUpgradeConfig, + IAragonKernel, + IAragonApp, + IEasyTrack, + IBaseModuleV3, + ICuratedModule, + IFeeOracleV3, + IFeeDistributorV3, + IValidatorStrikesV3 +} from "./UpgradeTypes.sol"; + +/** + * @title UpgradeConfig + * @notice Stores immutable addresses and parameters required for the upgrade process. + * This contract centralizes address/param management for UpgradeTemplate and UpgradeVoteScript. + */ +contract UpgradeConfig is IUpgradeConfig { + // + // -------- public fields -------- + // + address public immutable LOCATOR; + address public immutable AGENT; + address public immutable VOTING; + address public immutable DUAL_GOVERNANCE; + address public immutable RESEAL_MANAGER; + address public immutable CIRCUIT_BREAKER; + address public immutable BURNER; + + // + // -------- Pre-upgrade old implementations -------- + // + address internal immutable OLD_LOCATOR_IMPL; + address internal immutable OLD_LIDO_IMPL; + address internal immutable OLD_ACCOUNTING_IMPL; + address internal immutable OLD_ACCOUNTING_ORACLE_IMPL; + address internal immutable OLD_STAKING_ROUTER_IMPL; + address internal immutable OLD_WITHDRAWAL_VAULT_IMPL; + address internal immutable OLD_VALIDATORS_EXIT_BUS_ORACLE_IMPL; + address internal immutable OLD_ORACLE_REPORT_SANITY_CHECKER; + address internal immutable OLD_DEPOSIT_SECURITY_MODULE; + + // + // -------- New implementations -------- + // + address internal immutable NEW_LOCATOR_IMPL; + address internal immutable NEW_LIDO_IMPL; + address internal immutable NEW_ACCOUNTING_IMPL; + address internal immutable NEW_ACCOUNTING_ORACLE_IMPL; + address internal immutable NEW_STAKING_ROUTER_IMPL; + address internal immutable NEW_WITHDRAWAL_VAULT_IMPL; + address internal immutable NEW_VALIDATORS_EXIT_BUS_ORACLE_IMPL; + address internal immutable NEW_ORACLE_REPORT_SANITY_CHECKER; + address internal immutable NEW_DEPOSIT_SECURITY_MODULE; + address internal immutable CONSOLIDATION_BUS_IMPL; + address internal immutable CONSOLIDATION_MIGRATOR_IMPL; + address internal immutable TOP_UP_GATEWAY_IMPL; + + // + // -------- Upgraded contracts -------- + // + address internal immutable LIDO; + address internal immutable STAKING_ROUTER; + address internal immutable ACCOUNTING_ORACLE; + address internal immutable VALIDATORS_EXIT_BUS_ORACLE; + address internal immutable WITHDRAWAL_VAULT; + address internal immutable ACCOUNTING; + address internal immutable TRIGGERABLE_WITHDRAWALS_GATEWAY; + + // + // -------- New contracts -------- + // + address internal immutable TOP_UP_GATEWAY; + address internal immutable CONSOLIDATION_GATEWAY; + address internal immutable CONSOLIDATION_BUS; + address internal immutable CONSOLIDATION_MIGRATOR; + // address internal immutable ORACLE_REPORT_SANITY_CHECKER; + // address internal immutable DEPOSIT_SECURITY_MODULE; + address internal immutable VALIDATOR_EXIT_DELAY_VERIFIER; + + // + // -------- Upgrade parameters -------- + // + uint256 internal immutable LIDO_DEPOSITS_RESERVE_TARGET; + address internal immutable CURATED_MODULE_COMMITTEE; + address internal immutable TOP_UP_GATEWAY_DEPOSITOR; + address internal immutable CONSOLIDATION_GATEWAY_PAUSER; + uint256 internal immutable TW_MAX_EXIT_REQUESTS_LIMIT; + uint256 internal immutable TW_EXITS_PER_FRAME; + uint256 internal immutable TW_FRAME_DURATION_IN_SEC; + uint256 internal immutable AO_CONSENSUS_VERSION; + uint256 internal immutable VEBO_MAX_VALIDATORS_PER_REPORT; + uint256 internal immutable VEBO_MAX_EXIT_BALANCE_ETH; + uint256 internal immutable VEBO_BALANCE_PER_FRAME_ETH; + uint256 internal immutable VEBO_FRAME_DURATION_IN_SEC; + uint256 internal immutable VEBO_CONSENSUS_VERSION; + + // -------- EasyTrack addresses -------- + // + address internal immutable EASY_TRACK; + address internal immutable EASY_TRACK_EVM_SCRIPT_EXECUTOR; + // ETF = EasyTrack Factory + // SR Factories + address internal immutable ETF_NEW_UPDATE_STAKING_MODULE_SHARE_LIMITS; + address internal immutable ETF_NEW_ALLOW_CONSOLIDATION_PAIR; + // CSM Factories + address internal immutable ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CSM; + address internal immutable ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CSM; + address internal immutable ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CSM; + // CM Factories + address internal immutable ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CM; + address internal immutable ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CM; + address internal immutable ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CM; + address internal immutable ETF_NEW_CREATE_OR_UPDATE_OPERATOR_GROUP; + // old factories + address internal immutable ETF_OLD_SETTLE_EL_STEALING_PENALTY; + address internal immutable ETF_OLD_CSM_SET_VETTED_GATE_TREE; + + // + // ------- Misc ------- + // + address internal immutable KERNEL; + bytes32 internal immutable LIDO_APP_ID; + address internal immutable ACL; + + // CSM + address internal immutable CSM; + address internal immutable CSM_IMPL; + address internal immutable CSM_PARAMETERS_REGISTRY; + address internal immutable CSM_PARAMETERS_REGISTRY_IMPL; + address internal immutable CSM_FEE_ORACLE; + address internal immutable CSM_FEE_ORACLE_IMPL; + uint256 internal immutable CSM_FEE_ORACLE_CONSENSUS_VERSION; + address internal immutable CSM_VETTED_GATE; + address internal immutable CSM_IDENTIFIED_DVT_CLUSTER_GATE; + address internal immutable CSM_IDENTIFIED_DVT_CLUSTER_CURVE_SETUP; + uint256 internal immutable CSM_IDENTIFIED_DVT_CLUSTER_BOND_CURVE_ID; + address internal immutable CSM_VETTED_GATE_IMPL; + address internal immutable CSM_ACCOUNTING; + address internal immutable CSM_ACCOUNTING_IMPL; + address internal immutable CSM_FEE_DISTRIBUTOR; + address internal immutable CSM_FEE_DISTRIBUTOR_IMPL; + address internal immutable CSM_EXIT_PENALTIES; + address internal immutable CSM_EXIT_PENALTIES_IMPL; + address internal immutable CSM_STRIKES; + address internal immutable CSM_STRIKES_IMPL; + address internal immutable CSM_OLD_PERMISSIONLESS_GATE; + address internal immutable CSM_OLD_VERIFIER; + address internal immutable CSM_NEW_VERIFIER; + address internal immutable CSM_NEW_PERMISSIONLESS_GATE; + address internal immutable CSM_OLD_EJECTOR; + address internal immutable CSM_EJECTOR; + address internal immutable CSM_COMMITTEE; + + // CMv2 + address internal immutable CURATED_MODULE; + address[] internal CURATED_GATES; + address internal immutable CURATED_PARAMETERS_REGISTRY; + address internal immutable CURATED_ACCOUNTING; + address internal immutable CURATED_VERIFIER; + address internal immutable CURATED_CIRCUIT_BREAKER_PAUSER; + address internal immutable CURATED_FEE_DISTRIBUTOR; + address internal immutable CURATED_FEE_ORACLE; + address internal immutable CURATED_STRIKES; + address internal immutable CURATED_EJECTOR; + address internal immutable CURATED_HASH_CONSENSUS; + bytes32 internal immutable CURATED_MODULE_NAME; + uint256 internal immutable CURATED_STAKE_SHARE_LIMIT; + uint256 internal immutable CURATED_PRIORITY_EXIT_SHARE_THRESHOLD; + uint256 internal immutable CURATED_STAKING_MODULE_FEE; + uint256 internal immutable CURATED_TREASURY_FEE; + uint256 internal immutable CURATED_MAX_DEPOSITS_PER_BLOCK; + uint256 internal immutable CURATED_MIN_DEPOSIT_BLOCK_DISTANCE; + uint256 internal immutable CURATED_FEE_ORACLE_CONSENSUS_VERSION; + uint256 internal immutable CURATED_HASH_CONSENSUS_INITIAL_EPOCH; + address internal immutable CURATED_META_REGISTRY; + + // UpgradeParameters public upgradeParams; + + constructor(UpgradeParameters memory params) { + // Core upgrade params + CoreUpgradeParams memory coreUpgradeParams = params.coreUpgrade; + + if (coreUpgradeParams.newLocatorImpl == coreUpgradeParams.oldLocatorImpl) { + revert NewAndOldLocatorImplementationsMustBeDifferent(); + } + + // Save passed parameters + AGENT = params.agent; + KERNEL = IAragonApp(AGENT).kernel(); + ACL = IAragonKernel(KERNEL).acl(); + + VOTING = params.voting; + DUAL_GOVERNANCE = params.dualGovernance; + RESEAL_MANAGER = params.resealManager; + CIRCUIT_BREAKER = params.circuitBreaker; + + EASY_TRACK = params.easyTrack; + EASY_TRACK_EVM_SCRIPT_EXECUTOR = IEasyTrack(params.easyTrack).evmScriptExecutor(); + + OLD_LOCATOR_IMPL = coreUpgradeParams.oldLocatorImpl; + OLD_LIDO_IMPL = coreUpgradeParams.oldLidoImpl; + OLD_ACCOUNTING_IMPL = coreUpgradeParams.oldAccountingImpl; + OLD_ACCOUNTING_ORACLE_IMPL = coreUpgradeParams.oldAccountingOracleImpl; + OLD_STAKING_ROUTER_IMPL = coreUpgradeParams.oldStakingRouterImpl; + OLD_WITHDRAWAL_VAULT_IMPL = coreUpgradeParams.oldWithdrawalVaultImpl; + OLD_VALIDATORS_EXIT_BUS_ORACLE_IMPL = coreUpgradeParams.oldValidatorsExitBusOracleImpl; + + NEW_LOCATOR_IMPL = coreUpgradeParams.newLocatorImpl; + NEW_LIDO_IMPL = coreUpgradeParams.newLidoImpl; + NEW_ACCOUNTING_ORACLE_IMPL = coreUpgradeParams.newAccountingOracleImpl; + NEW_STAKING_ROUTER_IMPL = coreUpgradeParams.newStakingRouterImpl; + NEW_ACCOUNTING_IMPL = coreUpgradeParams.newAccountingImpl; + NEW_WITHDRAWAL_VAULT_IMPL = coreUpgradeParams.newWithdrawalVaultImpl; + NEW_VALIDATORS_EXIT_BUS_ORACLE_IMPL = coreUpgradeParams.newValidatorsExitBusOracleImpl; + CONSOLIDATION_BUS_IMPL = coreUpgradeParams.consolidationBusImpl; + CONSOLIDATION_MIGRATOR_IMPL = coreUpgradeParams.consolidationMigratorImpl; + TOP_UP_GATEWAY_IMPL = coreUpgradeParams.topUpGatewayImpl; + + CONSOLIDATION_BUS = coreUpgradeParams.consolidationBus; + CONSOLIDATION_MIGRATOR = coreUpgradeParams.consolidationMigrator; + + LIDO_DEPOSITS_RESERVE_TARGET = coreUpgradeParams.lidoDepositsReserveTarget; + CURATED_MODULE_COMMITTEE = coreUpgradeParams.curatedModuleCommittee; + TOP_UP_GATEWAY_DEPOSITOR = coreUpgradeParams.topUpGatewayDepositor; + CONSOLIDATION_GATEWAY_PAUSER = coreUpgradeParams.consolidationGatewayPauser; + TW_MAX_EXIT_REQUESTS_LIMIT = coreUpgradeParams.twMaxExitRequestsLimit; + TW_EXITS_PER_FRAME = coreUpgradeParams.twExitsPerFrame; + TW_FRAME_DURATION_IN_SEC = coreUpgradeParams.twFrameDurationInSec; + + AO_CONSENSUS_VERSION = coreUpgradeParams.aoConsensusVersion; + VEBO_MAX_VALIDATORS_PER_REPORT = coreUpgradeParams.veboMaxValidatorsPerReport; + VEBO_MAX_EXIT_BALANCE_ETH = coreUpgradeParams.veboMaxExitBalanceEth; + VEBO_BALANCE_PER_FRAME_ETH = coreUpgradeParams.veboBalancePerFrameEth; + VEBO_FRAME_DURATION_IN_SEC = coreUpgradeParams.veboFrameDurationInSec; + VEBO_CONSENSUS_VERSION = coreUpgradeParams.veboConsensusVersion; + + // EasyTrack new factories + EasyTrackNewFactories memory newFactories = params.newFactories; + ETF_NEW_UPDATE_STAKING_MODULE_SHARE_LIMITS = newFactories.UpdateStakingModuleShareLimits; + ETF_NEW_ALLOW_CONSOLIDATION_PAIR = newFactories.AllowConsolidationPair; + ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CSM = newFactories.SetMerkleGateTreeForCSM; + ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CSM = + newFactories.ReportWithdrawalsForSlashedValidatorsForCSM; + ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CSM = newFactories.SettleGeneralDelayedPenaltyForCSM; + ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CM = newFactories.SetMerkleGateTreeForCM; + ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CM = + newFactories.ReportWithdrawalsForSlashedValidatorsForCM; + ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CM = newFactories.SettleGeneralDelayedPenaltyForCM; + ETF_NEW_CREATE_OR_UPDATE_OPERATOR_GROUP = newFactories.CreateOrUpdateOperatorGroupForCM; + + // EasyTrack old factories + EasyTrackOldFactories memory oldFactories = params.oldFactories; + ETF_OLD_SETTLE_EL_STEALING_PENALTY = oldFactories.CSMSettleElStealingPenalty; + ETF_OLD_CSM_SET_VETTED_GATE_TREE = oldFactories.CSMSetVettedGateTree; + + // Discover via locator + LOCATOR = params.locator; + ILidoLocator oldLocator = ILidoLocator(params.locator); + OLD_ORACLE_REPORT_SANITY_CHECKER = oldLocator.oracleReportSanityChecker(); + OLD_DEPOSIT_SECURITY_MODULE = oldLocator.depositSecurityModule(); + + ILidoLocator locator = ILidoLocator(coreUpgradeParams.newLocatorImpl); + LIDO = locator.lido(); + LIDO_APP_ID = IAragonApp(LIDO).appId(); + + ACCOUNTING_ORACLE = locator.accountingOracle(); + ACCOUNTING = locator.accounting(); + STAKING_ROUTER = locator.stakingRouter(); + VALIDATORS_EXIT_BUS_ORACLE = locator.validatorsExitBusOracle(); + WITHDRAWAL_VAULT = locator.withdrawalVault(); + TOP_UP_GATEWAY = locator.topUpGateway(); + BURNER = locator.burner(); + TRIGGERABLE_WITHDRAWALS_GATEWAY = locator.triggerableWithdrawalsGateway(); + VALIDATOR_EXIT_DELAY_VERIFIER = locator.validatorExitDelayVerifier(); + CONSOLIDATION_GATEWAY = locator.consolidationGateway(); + NEW_ORACLE_REPORT_SANITY_CHECKER = locator.oracleReportSanityChecker(); + NEW_DEPOSIT_SECURITY_MODULE = locator.depositSecurityModule(); + + /// CSMv3 + CSMUpgradeParams memory csmUpgradeParams = params.csmUpgrade; + + CSM = csmUpgradeParams.csmProxy; + CSM_IMPL = csmUpgradeParams.csmImpl; + CSM_PARAMETERS_REGISTRY_IMPL = csmUpgradeParams.parametersRegistryImpl; + CSM_FEE_ORACLE_IMPL = csmUpgradeParams.feeOracleImpl; + CSM_FEE_ORACLE_CONSENSUS_VERSION = csmUpgradeParams.feeOracleConsensusVersion; + CSM_VETTED_GATE = csmUpgradeParams.vettedGateProxy; + CSM_IDENTIFIED_DVT_CLUSTER_GATE = csmUpgradeParams.identifiedDVTClusterGate; + CSM_IDENTIFIED_DVT_CLUSTER_CURVE_SETUP = csmUpgradeParams.identifiedDVTClusterCurveSetup; + CSM_IDENTIFIED_DVT_CLUSTER_BOND_CURVE_ID = csmUpgradeParams.identifiedDVTClusterBondCurveId; + CSM_VETTED_GATE_IMPL = csmUpgradeParams.vettedGateImpl; + CSM_ACCOUNTING_IMPL = csmUpgradeParams.accountingImpl; + CSM_FEE_DISTRIBUTOR_IMPL = csmUpgradeParams.feeDistributorImpl; + CSM_EXIT_PENALTIES_IMPL = csmUpgradeParams.exitPenaltiesImpl; + CSM_STRIKES_IMPL = csmUpgradeParams.strikesImpl; + CSM_OLD_PERMISSIONLESS_GATE = csmUpgradeParams.oldPermissionlessGate; + CSM_OLD_VERIFIER = csmUpgradeParams.oldVerifier; + CSM_NEW_VERIFIER = csmUpgradeParams.newVerifier; + CSM_NEW_PERMISSIONLESS_GATE = csmUpgradeParams.newPermissionlessGate; + CSM_EJECTOR = csmUpgradeParams.ejector; + CSM_COMMITTEE = csmUpgradeParams.csmCommittee; + + IBaseModuleV3 csm = IBaseModuleV3(CSM); + CSM_PARAMETERS_REGISTRY = csm.PARAMETERS_REGISTRY(); + CSM_ACCOUNTING = csm.ACCOUNTING(); + CSM_EXIT_PENALTIES = csm.EXIT_PENALTIES(); + CSM_FEE_DISTRIBUTOR = csm.FEE_DISTRIBUTOR(); + CSM_FEE_ORACLE = IFeeDistributorV3(CSM_FEE_DISTRIBUTOR).ORACLE(); + CSM_STRIKES = IFeeOracleV3(CSM_FEE_ORACLE).STRIKES(); + CSM_OLD_EJECTOR = IValidatorStrikesV3(CSM_STRIKES).ejector(); + + // CMv2 + CuratedModuleParams memory curatedModuleParams = params.curatedModule; + + CURATED_MODULE = curatedModuleParams.module; + for (uint256 i = 0; i < curatedModuleParams.curatedGates.length; ++i) { + CURATED_GATES.push(curatedModuleParams.curatedGates[i]); + } + CURATED_MODULE_NAME = Bytes32String.toBytes32(curatedModuleParams.moduleName); + CURATED_STAKE_SHARE_LIMIT = curatedModuleParams.stakeShareLimit; + CURATED_PRIORITY_EXIT_SHARE_THRESHOLD = curatedModuleParams.priorityExitShareThreshold; + CURATED_STAKING_MODULE_FEE = curatedModuleParams.stakingModuleFee; + CURATED_TREASURY_FEE = curatedModuleParams.treasuryFee; + CURATED_MAX_DEPOSITS_PER_BLOCK = curatedModuleParams.maxDepositsPerBlock; + CURATED_MIN_DEPOSIT_BLOCK_DISTANCE = curatedModuleParams.minDepositBlockDistance; + CURATED_FEE_ORACLE_CONSENSUS_VERSION = curatedModuleParams.feeOracleConsensusVersion; + CURATED_HASH_CONSENSUS_INITIAL_EPOCH = curatedModuleParams.hashConsensusInitialEpoch; + CURATED_VERIFIER = curatedModuleParams.verifier; + CURATED_CIRCUIT_BREAKER_PAUSER = curatedModuleParams.circuitBreakerPauser; + + ICuratedModule curatedModule = ICuratedModule(CURATED_MODULE); + CURATED_META_REGISTRY = curatedModule.META_REGISTRY(); + CURATED_PARAMETERS_REGISTRY = curatedModule.PARAMETERS_REGISTRY(); + CURATED_ACCOUNTING = curatedModule.ACCOUNTING(); + CURATED_FEE_DISTRIBUTOR = curatedModule.FEE_DISTRIBUTOR(); + CURATED_FEE_ORACLE = IFeeDistributorV3(CURATED_FEE_DISTRIBUTOR).ORACLE(); + CURATED_HASH_CONSENSUS = IFeeOracleV3(CURATED_FEE_ORACLE).getConsensusContract(); + CURATED_STRIKES = IFeeOracleV3(CURATED_FEE_ORACLE).STRIKES(); + CURATED_EJECTOR = IValidatorStrikesV3(CURATED_STRIKES).ejector(); + } + + function getGlobalConfig() external view returns (GlobalConfig memory) { + return GlobalConfig({ + agent: AGENT, + lido: LIDO, + burner: BURNER, + resealManager: RESEAL_MANAGER, + circuitBreaker: CIRCUIT_BREAKER, + easyTrack: EASY_TRACK, + easyTrackEVMScriptExecutor: EASY_TRACK_EVM_SCRIPT_EXECUTOR, + stakingRouter: STAKING_ROUTER, + triggerableWithdrawalsGateway: TRIGGERABLE_WITHDRAWALS_GATEWAY + }); + } + + function getEasyTrackConfig() external view returns (EasyTrackNewFactories memory, EasyTrackOldFactories memory) { + return ( + EasyTrackNewFactories({ + UpdateStakingModuleShareLimits: ETF_NEW_UPDATE_STAKING_MODULE_SHARE_LIMITS, + AllowConsolidationPair: ETF_NEW_ALLOW_CONSOLIDATION_PAIR, + SetMerkleGateTreeForCSM: ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CSM, + ReportWithdrawalsForSlashedValidatorsForCSM: ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CSM, + SettleGeneralDelayedPenaltyForCSM: ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CSM, + SetMerkleGateTreeForCM: ETF_NEW_SET_MERKLE_GATE_TREE_FOR_CM, + ReportWithdrawalsForSlashedValidatorsForCM: ETF_NEW_REPORT_WITHDRAWALS_FOR_SLASHED_VALIDATORS_FOR_CM, + SettleGeneralDelayedPenaltyForCM: ETF_NEW_SETTLE_GENERAL_DELAYED_PENALTY_FOR_CM, + CreateOrUpdateOperatorGroupForCM: ETF_NEW_CREATE_OR_UPDATE_OPERATOR_GROUP + }), + EasyTrackOldFactories({ + CSMSettleElStealingPenalty: ETF_OLD_SETTLE_EL_STEALING_PENALTY, + CSMSetVettedGateTree: ETF_OLD_CSM_SET_VETTED_GATE_TREE + }) + ); + } + + function getCoreUpgradeConfig() external view returns (CoreUpgradeConfig memory) { + return CoreUpgradeConfig({ + kernel: KERNEL, + acl: ACL, + lidoAppId: LIDO_APP_ID, + locator: LOCATOR, + // old impl + oldLocatorImpl: OLD_LOCATOR_IMPL, + oldLidoImpl: OLD_LIDO_IMPL, + oldAccountingImpl: OLD_ACCOUNTING_IMPL, + oldAccountingOracleImpl: OLD_ACCOUNTING_ORACLE_IMPL, + oldStakingRouterImpl: OLD_STAKING_ROUTER_IMPL, + oldWithdrawalVaultImpl: OLD_WITHDRAWAL_VAULT_IMPL, + oldValidatorsExitBusOracleImpl: OLD_VALIDATORS_EXIT_BUS_ORACLE_IMPL, + oldOracleReportSanityChecker: OLD_ORACLE_REPORT_SANITY_CHECKER, + oldDepositSecurityModule: OLD_DEPOSIT_SECURITY_MODULE, + // new impl + newLocatorImpl: NEW_LOCATOR_IMPL, + newLidoImpl: NEW_LIDO_IMPL, + newAccountingImpl: NEW_ACCOUNTING_IMPL, + newAccountingOracleImpl: NEW_ACCOUNTING_ORACLE_IMPL, + newStakingRouterImpl: NEW_STAKING_ROUTER_IMPL, + newWithdrawalVaultImpl: NEW_WITHDRAWAL_VAULT_IMPL, + newValidatorsExitBusOracleImpl: NEW_VALIDATORS_EXIT_BUS_ORACLE_IMPL, + newOracleReportSanityChecker: NEW_ORACLE_REPORT_SANITY_CHECKER, + newDepositSecurityModule: NEW_DEPOSIT_SECURITY_MODULE, + consolidationBusImpl: CONSOLIDATION_BUS_IMPL, + consolidationMigratorImpl: CONSOLIDATION_MIGRATOR_IMPL, + topUpGatewayImpl: TOP_UP_GATEWAY_IMPL, + // contracts + accounting: ACCOUNTING, + accountingOracle: ACCOUNTING_ORACLE, + validatorsExitBusOracle: VALIDATORS_EXIT_BUS_ORACLE, + withdrawalVault: WITHDRAWAL_VAULT, + consolidationGateway: CONSOLIDATION_GATEWAY, + consolidationBus: CONSOLIDATION_BUS, + consolidationMigrator: CONSOLIDATION_MIGRATOR, + topUpGateway: TOP_UP_GATEWAY, + // params + lidoDepositsReserveTarget: LIDO_DEPOSITS_RESERVE_TARGET, + curatedModuleCommittee: CURATED_MODULE_COMMITTEE, + topUpGatewayDepositor: TOP_UP_GATEWAY_DEPOSITOR, + consolidationGatewayPauser: CONSOLIDATION_GATEWAY_PAUSER, + // twGateway limits + twMaxExitRequestsLimit: TW_MAX_EXIT_REQUESTS_LIMIT, + twExitsPerFrame: TW_EXITS_PER_FRAME, + twFrameDurationInSec: TW_FRAME_DURATION_IN_SEC, + // oracles parameters + aoConsensusVersion: AO_CONSENSUS_VERSION, + veboMaxValidatorsPerReport: VEBO_MAX_VALIDATORS_PER_REPORT, + veboMaxExitBalanceEth: VEBO_MAX_EXIT_BALANCE_ETH, + veboBalancePerFrameEth: VEBO_BALANCE_PER_FRAME_ETH, + veboFrameDurationInSec: VEBO_FRAME_DURATION_IN_SEC, + veboConsensusVersion: VEBO_CONSENSUS_VERSION + }); + } + + function getCSMUpgradeConfig() external view returns (CSMUpgradeConfig memory) { + return CSMUpgradeConfig({ + csm: CSM, + csmImpl: CSM_IMPL, + parametersRegistry: CSM_PARAMETERS_REGISTRY, + parametersRegistryImpl: CSM_PARAMETERS_REGISTRY_IMPL, + feeOracle: CSM_FEE_ORACLE, + feeOracleImpl: CSM_FEE_ORACLE_IMPL, + feeOracleConsensusVersion: CSM_FEE_ORACLE_CONSENSUS_VERSION, + vettedGate: CSM_VETTED_GATE, + identifiedDVTClusterGate: CSM_IDENTIFIED_DVT_CLUSTER_GATE, + identifiedDVTClusterCurveSetup: CSM_IDENTIFIED_DVT_CLUSTER_CURVE_SETUP, + identifiedDVTClusterBondCurveId: CSM_IDENTIFIED_DVT_CLUSTER_BOND_CURVE_ID, + vettedGateImpl: CSM_VETTED_GATE_IMPL, + accounting: CSM_ACCOUNTING, + accountingImpl: CSM_ACCOUNTING_IMPL, + feeDistributor: CSM_FEE_DISTRIBUTOR, + feeDistributorImpl: CSM_FEE_DISTRIBUTOR_IMPL, + exitPenalties: CSM_EXIT_PENALTIES, + exitPenaltiesImpl: CSM_EXIT_PENALTIES_IMPL, + strikes: CSM_STRIKES, + strikesImpl: CSM_STRIKES_IMPL, + oldPermissionlessGate: CSM_OLD_PERMISSIONLESS_GATE, + oldVerifier: CSM_OLD_VERIFIER, + newVerifier: CSM_NEW_VERIFIER, + newPermissionlessGate: CSM_NEW_PERMISSIONLESS_GATE, + oldEjector: CSM_OLD_EJECTOR, + ejector: CSM_EJECTOR, + csmCommittee: CSM_COMMITTEE + }); + } + + function getCuratedModuleConfig() external view returns (CuratedModuleConfig memory) { + return CuratedModuleConfig({ + module: CURATED_MODULE, + curatedGates: CURATED_GATES, + parametersRegistry: CURATED_PARAMETERS_REGISTRY, + accounting: CURATED_ACCOUNTING, + ejector: CURATED_EJECTOR, + verifier: CURATED_VERIFIER, + circuitBreakerPauser: CURATED_CIRCUIT_BREAKER_PAUSER, + feeDistributor: CURATED_FEE_DISTRIBUTOR, + feeOracle: CURATED_FEE_ORACLE, + hashConsensus: CURATED_HASH_CONSENSUS, + strikes: CURATED_STRIKES, + moduleName: Bytes32String.toString(CURATED_MODULE_NAME), + stakeShareLimit: CURATED_STAKE_SHARE_LIMIT, + priorityExitShareThreshold: CURATED_PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: CURATED_STAKING_MODULE_FEE, + treasuryFee: CURATED_TREASURY_FEE, + maxDepositsPerBlock: CURATED_MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: CURATED_MIN_DEPOSIT_BLOCK_DISTANCE, + feeOracleConsensusVersion: CURATED_FEE_ORACLE_CONSENSUS_VERSION, + hashConsensusInitialEpoch: CURATED_HASH_CONSENSUS_INITIAL_EPOCH, + metaRegistry: CURATED_META_REGISTRY + }); + } + + error NewAndOldLocatorImplementationsMustBeDifferent(); +} diff --git a/contracts/upgrade/UpgradeTemplate.sol b/contracts/upgrade/UpgradeTemplate.sol new file mode 100644 index 0000000000..0e0952c51d --- /dev/null +++ b/contracts/upgrade/UpgradeTemplate.sol @@ -0,0 +1,833 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +pragma solidity 0.8.25; + +import { + IAccessControl, + IAccessControlEnumerable +} from "@openzeppelin/contracts-v5.2/access/extensions/IAccessControlEnumerable.sol"; + +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {IOssifiableProxy} from "contracts/common/interfaces/IOssifiableProxy.sol"; +import {IHashConsensus} from "contracts/common/interfaces/IHashConsensus.sol"; +import {IPausableUntil} from "contracts/common/interfaces/IPausableUntil.sol"; +import {ICircuitBreaker} from "contracts/common/interfaces/ICircuitBreaker.sol"; +import {ModuleStateConfig, StakingModuleStatus} from "contracts/0.8.25/sr/SRTypes.sol"; +import {IUpgradeTemplate} from "./interfaces/IUpgradeTemplate.sol"; +import { + UpgradeParameters, + GlobalConfig, + CoreUpgradeConfig, + CSMUpgradeConfig, + CuratedModuleConfig, + ILidoUpgrade, + IBaseOracle, + IWithdrawalsManagerProxy, + IAragonKernel, + IAragonACL, + IVersioned, + IStakingRouterUpgrade, + IDepositSecurityModule, + IConsolidationMigrator, + IInitializedVersionView, + IMerkleGate, + IOneShotCurveSetup +} from "./UpgradeTypes.sol"; + +import {UpgradeConfig} from "./UpgradeConfig.sol"; + +/** + * @title Lido Upgrade Template + * + * @dev Must be used by means of two calls: + * - `startUpgrade()` before upgrading LidoLocator and before everything else + * - `finishUpgrade()` as the last step of the upgrade + */ +contract UpgradeTemplate is IUpgradeTemplate { + // + // Events + // + + event UpgradeStarted(); + event UpgradeFinished(); + + // + // -------- Constants -------- + // + + bytes32 internal constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + bytes32 internal constant RESUME_ROLE = keccak256("RESUME_ROLE"); + bytes32 internal constant ALLOW_PAIR_ROLE = keccak256("ALLOW_PAIR_ROLE"); + bytes32 internal constant DISALLOW_PAIR_ROLE = keccak256("DISALLOW_PAIR_ROLE"); + bytes32 internal constant TOP_UP_ROLE = keccak256("TOP_UP_ROLE"); + bytes32 internal constant ADD_CONSOLIDATION_REQUEST_ROLE = keccak256("ADD_CONSOLIDATION_REQUEST_ROLE"); + bytes32 internal constant PUBLISH_ROLE = keccak256("PUBLISH_ROLE"); + bytes32 internal constant EXECUTE_ROLE = keccak256("EXECUTE_ROLE"); + bytes32 internal constant REMOVE_ROLE = keccak256("REMOVE_ROLE"); + bytes32 internal constant MANAGE_ROLE = keccak256("MANAGE_ROLE"); + // csm roles + bytes32 internal constant REPORT_EL_REWARDS_STEALING_PENALTY_ROLE = + keccak256("REPORT_EL_REWARDS_STEALING_PENALTY_ROLE"); + bytes32 internal constant SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE = + keccak256("SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE"); + bytes32 internal constant REPORT_GENERAL_DELAYED_PENALTY_ROLE = keccak256("REPORT_GENERAL_DELAYED_PENALTY_ROLE"); + bytes32 internal constant SETTLE_GENERAL_DELAYED_PENALTY_ROLE = keccak256("SETTLE_GENERAL_DELAYED_PENALTY_ROLE"); + bytes32 internal constant REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE = + keccak256("REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE"); + bytes32 internal constant REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE = + keccak256("REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE"); + bytes32 internal constant START_REFERRAL_SEASON_ROLE = keccak256("START_REFERRAL_SEASON_ROLE"); + bytes32 internal constant END_REFERRAL_SEASON_ROLE = keccak256("END_REFERRAL_SEASON_ROLE"); + bytes32 internal constant ADD_FULL_WITHDRAWAL_REQUEST_ROLE = keccak256("ADD_FULL_WITHDRAWAL_REQUEST_ROLE"); + bytes32 internal constant CREATE_NODE_OPERATOR_ROLE = keccak256("CREATE_NODE_OPERATOR_ROLE"); + bytes32 internal constant SET_BOND_CURVE_ROLE = keccak256("SET_BOND_CURVE_ROLE"); + bytes32 internal constant MANAGE_BOND_CURVES_ROLE = keccak256("MANAGE_BOND_CURVES_ROLE"); + bytes32 internal constant MANAGE_CURVE_PARAMETERS_ROLE = keccak256("MANAGE_CURVE_PARAMETERS_ROLE"); + bytes32 internal constant MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE = + keccak256("MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE"); + bytes32 internal constant REQUEST_BURN_MY_STETH_ROLE = keccak256("REQUEST_BURN_MY_STETH_ROLE"); + bytes32 internal constant REQUEST_BURN_SHARES_ROLE = keccak256("REQUEST_BURN_SHARES_ROLE"); + bytes32 internal constant VERIFIER_ROLE = keccak256("VERIFIER_ROLE"); + + // sr roles + bytes32 internal constant MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = keccak256("MANAGE_WITHDRAWAL_CREDENTIALS_ROLE"); + bytes32 internal constant STAKING_MODULE_MANAGE_ROLE = keccak256("STAKING_MODULE_MANAGE_ROLE"); + bytes32 internal constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); + bytes32 internal constant REPORT_EXITED_VALIDATORS_ROLE = keccak256("REPORT_EXITED_VALIDATORS_ROLE"); + bytes32 internal constant UNSAFE_SET_EXITED_VALIDATORS_ROLE = keccak256("UNSAFE_SET_EXITED_VALIDATORS_ROLE"); + bytes32 internal constant REPORT_REWARDS_MINTED_ROLE = keccak256("REPORT_REWARDS_MINTED_ROLE"); + bytes32 internal constant REPORT_VALIDATOR_EXITING_STATUS_ROLE = keccak256("REPORT_VALIDATOR_EXITING_STATUS_ROLE"); + bytes32 internal constant REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = keccak256("REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE"); + bytes32 internal constant STAKING_MODULE_SHARE_MANAGE_ROLE = keccak256("STAKING_MODULE_SHARE_MANAGE_ROLE"); + bytes32 internal constant BUFFER_RESERVE_MANAGER_ROLE = keccak256("BUFFER_RESERVE_MANAGER_ROLE"); + bytes32 internal constant TW_EXIT_LIMIT_MANAGER_ROLE = keccak256("TW_EXIT_LIMIT_MANAGER_ROLE"); + + //sanitychecker roles + bytes32 internal constant ALL_LIMITS_MANAGER_ROLE = keccak256("ALL_LIMITS_MANAGER_ROLE"); + bytes32 internal constant EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 internal constant APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 internal constant ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE = + keccak256("ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE"); + bytes32 internal constant SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE = + keccak256("SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE"); + bytes32 internal constant MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE = + keccak256("MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE"); + bytes32 internal constant MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE = + keccak256("MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE"); + bytes32 internal constant MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE = + keccak256("MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE"); + bytes32 internal constant REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE = + keccak256("REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE"); + bytes32 internal constant MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE = + keccak256("MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE"); + bytes32 internal constant SECOND_OPINION_MANAGER_ROLE = keccak256("SECOND_OPINION_MANAGER_ROLE"); + bytes32 internal constant INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE = + keccak256("INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE"); + + uint256 public constant EXPECTED_FINAL_LIDO_VERSION = 4; + uint256 public constant EXPECTED_FINAL_STAKING_ROUTER_VERSION = 4; + uint256 public constant EXPECTED_FINAL_ACCOUNTING_ORACLE_VERSION = 5; + uint256 public constant EXPECTED_FINAL_ACCOUNTING_ORACLE_CONSENSUS_VERSION = 6; + uint256 public constant EXPECTED_FINAL_VALIDATORS_EXIT_BUS_ORACLE_VERSION = 3; + uint256 public constant EXPECTED_FINAL_VALIDATORS_EXIT_BUS_ORACLE_CONSENSUS_VERSION = 5; + uint256 public constant EXPECTED_FINAL_WITHDRAWAL_VAULT_VERSION = 3; + uint256 public constant EXPECTED_FINAL_COMMUNITY_FEE_ORACLE_VERSION = 3; + + uint64 public constant EXPECTED_FINAL_CSM_MODULE_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CSM_PARAMETERS_REGISTRY_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CSM_ACCOUNTING_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CSM_FEE_DISTRIBUTOR_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CSM_VALIDATOR_STRIKES_INITIALIZED_VERSION = 1; + uint64 public constant EXPECTED_FINAL_CSM_VETTED_GATE_INITIALIZED_VERSION = 1; + + uint64 public constant EXPECTED_FINAL_CM_MODULE_INITIALIZED_VERSION = 1; + uint64 public constant EXPECTED_FINAL_CM_PARAMETERS_REGISTRY_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CM_ACCOUNTING_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CM_FEE_DISTRIBUTOR_INITIALIZED_VERSION = 3; + uint64 public constant EXPECTED_FINAL_CM_VALIDATOR_STRIKES_INITIALIZED_VERSION = 1; + + bytes32 internal constant DEFAULT_ADMIN_ROLE = 0x00; + + // Initial value of upgradeBlockNumber storage variable + uint256 internal constant UPGRADE_NOT_STARTED = 0; + uint256 internal constant INFINITE_ALLOWANCE = type(uint256).max; + + // Upgrade config (self deployed internal contract) + address public immutable CONFIG; + address public immutable AGENT; + + // Timestamp since which startUpgrade() + // This behavior is introduced to disarm the template if the upgrade voting creation or enactment + // didn't happen in proper time period + uint256 public immutable EXPIRE_SINCE_INCLUSIVE; + + // + // Structured storage + // + + uint256 public upgradeBlockNumber = UPGRADE_NOT_STARTED; + bool public isUpgradeFinished; + + uint256 internal initialBufferedEther; + uint256 internal initialDepositedValidators; + uint256 internal initialBeaconValidators; + uint256 internal initialBeaconBalance; + bytes32 internal initialWithdrawalCredentials; + uint256 internal initialModulesCount; + + // + // Slots for transient storage + // + + // Slot for the upgrade started flag + // / keccak256("UpgradeTemplate.upgradeStartedFlag"); + bytes32 public constant UPGRADE_STARTED_SLOT = 0x35b46117eef044799338cc40f60a0c4c38c26772e3f81f535801c8d814ecc33d; + + /// @param _params Params required to initialize the addresses contract + /// @param _expireSinceInclusive Unix timestamp after which upgrade actions revert + constructor(UpgradeParameters memory _params, uint256 _expireSinceInclusive) { + UpgradeConfig config = new UpgradeConfig(_params); + CONFIG = address(config); + AGENT = config.AGENT(); + EXPIRE_SINCE_INCLUSIVE = _expireSinceInclusive; + } + + /// @notice Must be called before LidoLocator is upgraded + function startUpgrade() external { + UpgradeConfig config = UpgradeConfig(CONFIG); + GlobalConfig memory g = config.getGlobalConfig(); + + if (msg.sender != g.agent) revert OnlyAgentCanUpgrade(); + if (block.timestamp >= EXPIRE_SINCE_INCLUSIVE) revert Expired(); + if (isUpgradeFinished) revert UpgradeAlreadyFinished(); + if (_isStartCalledInThisTx()) revert StartAlreadyCalledInThisTx(); + if (upgradeBlockNumber != UPGRADE_NOT_STARTED) revert UpgradeAlreadyStarted(); + + assembly { tstore(UPGRADE_STARTED_SLOT, 1) } + upgradeBlockNumber = block.number; + + initialBufferedEther = ILidoUpgrade(g.lido).getBufferedEther(); + (initialDepositedValidators, initialBeaconValidators, initialBeaconBalance) = + ILidoUpgrade(g.lido).getBeaconStat(); + + IStakingRouterUpgrade sr = IStakingRouterUpgrade(g.stakingRouter); + initialWithdrawalCredentials = sr.getWithdrawalCredentials(); + initialModulesCount = sr.getStakingModulesCount(); + + _assertPreUpgradeState(g); + + emit UpgradeStarted(); + } + + function finishUpgrade() external { + UpgradeConfig config = UpgradeConfig(CONFIG); + GlobalConfig memory g = config.getGlobalConfig(); + + if (msg.sender != g.agent) revert OnlyAgentCanUpgrade(); + if (isUpgradeFinished) revert UpgradeAlreadyFinished(); + if (!_isStartCalledInThisTx()) revert StartAndFinishMustBeInSameTx(); + + isUpgradeFinished = true; + + _assertPostUpgradeState(g); + + emit UpgradeFinished(); + } + + // + // Assertions + // + + function _assertPreUpgradeState(GlobalConfig memory g) internal view { + CoreUpgradeConfig memory c = UpgradeConfig(CONFIG).getCoreUpgradeConfig(); + // Check initial implementations of the proxies to be upgraded + _assertAragonKernelImplementation(IAragonKernel(c.kernel), c.lidoAppId, c.oldLidoImpl); + + _assertProxyImplementation(c.locator, c.oldLocatorImpl); + _assertProxyImplementation(c.accounting, c.oldAccountingImpl); + _assertProxyImplementation(c.accountingOracle, c.oldAccountingOracleImpl); + _assertProxyImplementation(g.stakingRouter, c.oldStakingRouterImpl); + _assertProxyImplementation(c.validatorsExitBusOracle, c.oldValidatorsExitBusOracleImpl); + + _assertWithdrawalsManagerProxyImplementation(c.withdrawalVault, c.oldWithdrawalVaultImpl); + } + + function _assertPostUpgradeState(GlobalConfig memory g) internal view { + CoreUpgradeConfig memory c = UpgradeConfig(CONFIG).getCoreUpgradeConfig(); + + _assertCoreFinalState(g, c); + _assertCSMFinalState(g); + _assertCMFinalState(g); + + _checkSRMigration(g, c); + _checkLidoMigration(g, c); + _checkDSMMigration(g, c); + } + + function _assertCoreFinalState(GlobalConfig memory g, CoreUpgradeConfig memory c) internal view { + address agent = g.agent; + + // Locator + ILidoLocator locator = ILidoLocator(c.locator); + _assertProxyImplementation(address(locator), c.newLocatorImpl); + _assertLocatorAddress(locator.depositSecurityModule(), c.newDepositSecurityModule); + + // Lido + _assertAragonKernelImplementation(IAragonKernel(c.kernel), c.lidoAppId, c.newLidoImpl); + _assertContractVersion(g.lido, EXPECTED_FINAL_LIDO_VERSION); + _assertAragonPermissionManager(c.acl, g.lido, BUFFER_RESERVE_MANAGER_ROLE, agent); + _assertHasAragonPermission(c.acl, g.lido, BUFFER_RESERVE_MANAGER_ROLE, agent); + + // Accounting + _assertProxyImplementation(c.accounting, c.newAccountingImpl); + _assertProxyAdmin(c.accounting, agent); + + // Accounting Oracle + { + address ao = c.accountingOracle; + _assertProxyImplementation(ao, c.newAccountingOracleImpl); + _assertProxyAdmin(ao, agent); + _assertContractVersion(ao, EXPECTED_FINAL_ACCOUNTING_ORACLE_VERSION); + _assertOracleConsensusVersion(ao, EXPECTED_FINAL_ACCOUNTING_ORACLE_CONSENSUS_VERSION); + _assertSingleOZRoleHolder(ao, DEFAULT_ADMIN_ROLE, agent); + } + + // ValidatorsExitBusOracle + { + address vebo = c.validatorsExitBusOracle; + _assertProxyImplementation(vebo, c.newValidatorsExitBusOracleImpl); + _assertContractVersion(vebo, EXPECTED_FINAL_VALIDATORS_EXIT_BUS_ORACLE_VERSION); + _assertOracleConsensusVersion(vebo, EXPECTED_FINAL_VALIDATORS_EXIT_BUS_ORACLE_CONSENSUS_VERSION); + _assertProxyAdmin(vebo, agent); + _assertSingleOZRoleHolder(vebo, DEFAULT_ADMIN_ROLE, agent); + } + + // WithdrawalVault + _assertWithdrawalsManagerProxyImplementation(c.withdrawalVault, c.newWithdrawalVaultImpl); + _assertWithdrawalsManagerProxyAdmin(c.withdrawalVault, agent); + _assertContractVersion(c.withdrawalVault, EXPECTED_FINAL_WITHDRAWAL_VAULT_VERSION); + + // SR + { + address sr = g.stakingRouter; + _assertProxyImplementation(sr, c.newStakingRouterImpl); + _assertProxyAdmin(sr, agent); + _assertContractVersion(sr, EXPECTED_FINAL_STAKING_ROUTER_VERSION); + _assertSingleOZRoleHolder(sr, DEFAULT_ADMIN_ROLE, agent); + /// @dev _assertSingleOZRoleHolder not works on hoodi! + _assertHasOZRole(sr, STAKING_MODULE_MANAGE_ROLE, agent); + _assertSingleOZRoleHolder(sr, STAKING_MODULE_UNVETTING_ROLE, c.newDepositSecurityModule); + _assertSingleOZRoleHolder(sr, STAKING_MODULE_SHARE_MANAGE_ROLE, g.easyTrackEVMScriptExecutor); + _assertZeroOZRoleHolders(sr, MANAGE_WITHDRAWAL_CREDENTIALS_ROLE); + } + + // Consolidation + { + address consGw = c.consolidationGateway; + address consBus = c.consolidationBus; + address consMigrator = c.consolidationMigrator; + address resealManager = g.resealManager; + address cb = g.circuitBreaker; + + _assertProxyImplementation(consBus, c.consolidationBusImpl); + _assertProxyAdmin(consBus, agent); + _assertSingleOZRoleHolder(consBus, DEFAULT_ADMIN_ROLE, agent); + _assertSingleOZRoleHolder(consBus, PUBLISH_ROLE, consMigrator); + _assertZeroOZRoleHolders(consBus, MANAGE_ROLE); + _assertZeroOZRoleHolders(consBus, REMOVE_ROLE); + + _assertProxyImplementation(consMigrator, c.consolidationMigratorImpl); + _assertProxyAdmin(consMigrator, agent); + _assertSingleOZRoleHolder(consMigrator, DEFAULT_ADMIN_ROLE, agent); + _assertSingleOZRoleHolder(consMigrator, ALLOW_PAIR_ROLE, g.easyTrackEVMScriptExecutor); + _assertSingleOZRoleHolder(consMigrator, DISALLOW_PAIR_ROLE, c.curatedModuleCommittee); + /// @note correctness of TARGET_MODULE_ID is checked inside the SR migration checks + + _assertLocatorAddress(locator.consolidationGateway(), consGw); + _assertSingleOZRoleHolder(consGw, DEFAULT_ADMIN_ROLE, agent); + _assertTwoOZRoleHolders(consGw, PAUSE_ROLE, cb, resealManager); + _assertSingleOZRoleHolder(consGw, RESUME_ROLE, resealManager); + _assertSingleOZRoleHolder(consGw, ADD_CONSOLIDATION_REQUEST_ROLE, consBus); + + _assertCircuitBreakerPauser(cb, consGw, c.consolidationGatewayPauser); + } + + // TopUps + { + address tuGw = c.topUpGateway; + _assertProxyImplementation(tuGw, c.topUpGatewayImpl); + _assertProxyAdmin(tuGw, agent); + _assertLocatorAddress(locator.topUpGateway(), tuGw); + + _assertSingleOZRoleHolder(tuGw, DEFAULT_ADMIN_ROLE, agent); + _assertSingleOZRoleHolder(tuGw, TOP_UP_ROLE, c.topUpGatewayDepositor); + } + + // TW + { + _assertSingleOZRoleHolder(g.triggerableWithdrawalsGateway, TW_EXIT_LIMIT_MANAGER_ROLE, agent); + } + + { + // OracleReportSanityChecker + address checker = c.newOracleReportSanityChecker; + _assertLocatorAddress(locator.oracleReportSanityChecker(), checker); + _assertSingleOZRoleHolder(checker, DEFAULT_ADMIN_ROLE, agent); + bytes32[12] memory roles = [ + ALL_LIMITS_MANAGER_ROLE, + EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE, + APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE, + ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE, + SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE, + MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE, + MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE, + MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE, + REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE, + MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE, + SECOND_OPINION_MANAGER_ROLE, + INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE + ]; + for (uint256 i = 0; i < roles.length; ++i) { + _assertZeroOZRoleHolders(checker, roles[i]); + } + } + } + + function _assertCSMFinalState(GlobalConfig memory g) internal view { + CSMUpgradeConfig memory csm = UpgradeConfig(CONFIG).getCSMUpgradeConfig(); + address agent = g.agent; + address resealManager = g.resealManager; + address cb = g.circuitBreaker; + address csModule = csm.csm; + + _assertProxyImplementation(csModule, csm.csmImpl); + _assertProxyImplementation(csm.parametersRegistry, csm.parametersRegistryImpl); + _assertProxyImplementation(csm.feeOracle, csm.feeOracleImpl); + _assertProxyImplementation(csm.vettedGate, csm.vettedGateImpl); + _assertProxyImplementation(csm.accounting, csm.accountingImpl); + _assertProxyImplementation(csm.feeDistributor, csm.feeDistributorImpl); + _assertProxyImplementation(csm.exitPenalties, csm.exitPenaltiesImpl); + _assertProxyImplementation(csm.strikes, csm.strikesImpl); + + _assertProxyAdmin(csModule, agent); + _assertProxyAdmin(csm.parametersRegistry, agent); + _assertProxyAdmin(csm.feeOracle, agent); + _assertProxyAdmin(csm.vettedGate, agent); + _assertProxyAdmin(csm.accounting, agent); + _assertProxyAdmin(csm.feeDistributor, agent); + _assertProxyAdmin(csm.exitPenalties, agent); + _assertProxyAdmin(csm.strikes, agent); + + _assertInitializedContractVersion(csModule, EXPECTED_FINAL_CSM_MODULE_INITIALIZED_VERSION); + _assertInitializedContractVersion( + csm.parametersRegistry, EXPECTED_FINAL_CSM_PARAMETERS_REGISTRY_INITIALIZED_VERSION + ); + _assertInitializedContractVersion(csm.vettedGate, EXPECTED_FINAL_CSM_VETTED_GATE_INITIALIZED_VERSION); + _assertInitializedContractVersion(csm.accounting, EXPECTED_FINAL_CSM_ACCOUNTING_INITIALIZED_VERSION); + _assertInitializedContractVersion(csm.feeDistributor, EXPECTED_FINAL_CSM_FEE_DISTRIBUTOR_INITIALIZED_VERSION); + _assertInitializedContractVersion(csm.strikes, EXPECTED_FINAL_CSM_VALIDATOR_STRIKES_INITIALIZED_VERSION); + _assertContractVersion(csm.feeOracle, EXPECTED_FINAL_COMMUNITY_FEE_ORACLE_VERSION); + _assertOracleConsensusVersion(csm.feeOracle, csm.feeOracleConsensusVersion); + + _assertZeroOZRoleHolders(csModule, REPORT_EL_REWARDS_STEALING_PENALTY_ROLE); + _assertZeroOZRoleHolders(csModule, SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE); + _assertSingleOZRoleHolder(csModule, REPORT_GENERAL_DELAYED_PENALTY_ROLE, csm.csmCommittee); + _assertSingleOZRoleHolder(csModule, SETTLE_GENERAL_DELAYED_PENALTY_ROLE, g.easyTrackEVMScriptExecutor); + _assertSingleOZRoleHolder(csModule, VERIFIER_ROLE, csm.newVerifier); + _assertSingleOZRoleHolder(csModule, REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE, csm.newVerifier); + _assertSingleOZRoleHolder(csModule, REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE, g.easyTrackEVMScriptExecutor); + _assertTwoOZRoleHolders(csModule, PAUSE_ROLE, cb, resealManager); + _assertThreeOZRoleHolders( + csModule, CREATE_NODE_OPERATOR_ROLE, csm.vettedGate, csm.newPermissionlessGate, csm.identifiedDVTClusterGate + ); + + _assertTwoOZRoleHolders(csm.accounting, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(csm.feeOracle, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(csm.vettedGate, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(csm.identifiedDVTClusterGate, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(csm.newVerifier, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(csm.ejector, PAUSE_ROLE, cb, resealManager); + + _assertCircuitBreakerPauser(cb, csm.identifiedDVTClusterGate, csm.csmCommittee); + _assertCircuitBreakerPauser(cb, csm.newVerifier, csm.csmCommittee); + _assertCircuitBreakerPauser(cb, csm.ejector, csm.csmCommittee); + + _assertNotOZRoleHolder(csm.vettedGate, START_REFERRAL_SEASON_ROLE, agent); + _assertNotOZRoleHolder(csm.vettedGate, END_REFERRAL_SEASON_ROLE, csm.csmCommittee); + + _assertHasOZRole(csm.accounting, SET_BOND_CURVE_ROLE, csm.identifiedDVTClusterGate); + + _assertNotOZRoleHolder(csm.accounting, MANAGE_BOND_CURVES_ROLE, csm.identifiedDVTClusterCurveSetup); + _assertNotOZRoleHolder(csm.parametersRegistry, MANAGE_CURVE_PARAMETERS_ROLE, csm.identifiedDVTClusterCurveSetup); + _assertIdentifiedDVTClusterCurve(csm); + + _assertSingleOZRoleHolder(csm.parametersRegistry, MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE, csm.csmCommittee); + + _assertNotOZRoleHolder(g.burner, REQUEST_BURN_SHARES_ROLE, csm.accounting); + _assertHasOZRole(g.burner, REQUEST_BURN_MY_STETH_ROLE, csm.accounting); + + _assertNotOZRoleHolder(g.triggerableWithdrawalsGateway, ADD_FULL_WITHDRAWAL_REQUEST_ROLE, csm.oldEjector); + _assertHasOZRole(g.triggerableWithdrawalsGateway, ADD_FULL_WITHDRAWAL_REQUEST_ROLE, csm.ejector); + } + + function _assertCMFinalState(GlobalConfig memory g) internal view { + CuratedModuleConfig memory cm = UpgradeConfig(CONFIG).getCuratedModuleConfig(); + address agent = g.agent; + address resealManager = g.resealManager; + address cb = g.circuitBreaker; + address cModule = cm.module; + address cbPauser = cm.circuitBreakerPauser; + + _assertInitializedContractVersion(cModule, EXPECTED_FINAL_CM_MODULE_INITIALIZED_VERSION); + _assertInitializedContractVersion( + cm.parametersRegistry, EXPECTED_FINAL_CM_PARAMETERS_REGISTRY_INITIALIZED_VERSION + ); + _assertInitializedContractVersion(cm.accounting, EXPECTED_FINAL_CM_ACCOUNTING_INITIALIZED_VERSION); + _assertInitializedContractVersion(cm.feeDistributor, EXPECTED_FINAL_CM_FEE_DISTRIBUTOR_INITIALIZED_VERSION); + _assertInitializedContractVersion(cm.strikes, EXPECTED_FINAL_CM_VALIDATOR_STRIKES_INITIALIZED_VERSION); + _assertContractVersion(cm.feeOracle, EXPECTED_FINAL_COMMUNITY_FEE_ORACLE_VERSION); + _assertOracleConsensusVersion(cm.feeOracle, cm.feeOracleConsensusVersion); + + _assertHasOZRole(g.burner, REQUEST_BURN_MY_STETH_ROLE, cm.accounting); + _assertHasOZRole(g.triggerableWithdrawalsGateway, ADD_FULL_WITHDRAWAL_REQUEST_ROLE, cm.ejector); + + _assertTwoOZRoleHolders(cModule, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(cm.accounting, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(cm.feeOracle, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(cm.verifier, PAUSE_ROLE, cb, resealManager); + _assertTwoOZRoleHolders(cm.ejector, PAUSE_ROLE, cb, resealManager); + + _assertCircuitBreakerPauser(cb, cModule, cbPauser); + _assertCircuitBreakerPauser(cb, cm.accounting, cbPauser); + _assertCircuitBreakerPauser(cb, cm.feeOracle, cbPauser); + _assertCircuitBreakerPauser(cb, cm.verifier, cbPauser); + _assertCircuitBreakerPauser(cb, cm.ejector, cbPauser); + + _assertNotOZRoleHolder(cModule, RESUME_ROLE, agent); + if (IPausableUntil(cModule).isPaused()) { + revert CMModuleIsPaused(cModule); + } + + // slither-disable-next-line unused-return + (uint256 initialEpoch,) = IHashConsensus(cm.hashConsensus).getFrameConfig(); + if (initialEpoch != cm.hashConsensusInitialEpoch) { + revert InvalidHashConsensusInitialEpoch(cm.hashConsensus, initialEpoch, cm.hashConsensusInitialEpoch); + } + } + + function _checkSRMigration(GlobalConfig memory g, CoreUpgradeConfig memory c) internal view { + CuratedModuleConfig memory cm = UpgradeConfig(CONFIG).getCuratedModuleConfig(); + + IStakingRouterUpgrade sr = IStakingRouterUpgrade(g.stakingRouter); + bytes32 newWithdrawalCredentials = sr.getWithdrawalCredentials(); + if (newWithdrawalCredentials != initialWithdrawalCredentials) { + revert SRMigrationIncorrectWithdrawalCredentials(); + } + uint256[] memory moduleIds = sr.getStakingModuleIds(); + if (moduleIds.length != initialModulesCount + 1) { + // 1 new module is added in this upgrade + revert SRMigrationIncorrectModulesCount(); + } + + uint256 newModuleId = moduleIds[moduleIds.length - 1]; + + { + uint256 targetModuleId = IConsolidationMigrator(c.consolidationMigrator).targetModuleId(); + if (newModuleId != targetModuleId) { + revert SRMigrationIncorrectConsolidationMigratorTargetModuleId(newModuleId, targetModuleId); + } + } + + ModuleStateConfig memory stateConfig = sr.getStakingModuleStateConfig(newModuleId); + if ( + stateConfig.moduleAddress != cm.module || stateConfig.moduleFee != cm.stakingModuleFee + || stateConfig.treasuryFee != cm.treasuryFee || stateConfig.stakeShareLimit != cm.stakeShareLimit + || stateConfig.priorityExitShareThreshold != cm.priorityExitShareThreshold + || stateConfig.status != StakingModuleStatus.Active || stateConfig.withdrawalCredentialsType != 0x02 + ) { + revert SRMigrationIncorrectAddStakingModule(); + } + } + + function _checkLidoMigration(GlobalConfig memory g, CoreUpgradeConfig memory) internal view { + uint256 bufferedEther = ILidoUpgrade(g.lido).getBufferedEther(); + if (bufferedEther != initialBufferedEther) { + revert LidoMigrationIncorrectBufferedEther(); + } + + // slither-disable-next-line unused-return + (uint256 depositedValidators, uint256 clValidators,) = ILidoUpgrade(g.lido).getBeaconStat(); + + if (depositedValidators != initialDepositedValidators || clValidators != depositedValidators) { + revert LidoMigrationIncorrectDepositedValidators(); + } + + ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) = ILidoUpgrade(g.lido).getBalanceStats(); + + if (clValidatorsBalanceAtLastReport != initialBeaconBalance || clPendingBalanceAtLastReport != 0) { + revert LidoMigrationIncorrectBeaconBalance(); + } + + if ( + depositedSinceLastReport != (initialDepositedValidators - initialBeaconValidators) * 32 ether + || depositedForCurrentReport != 0 + ) { + revert LidoMigrationIncorrectDepositedSinceLastReport(); + } + } + + function _checkDSMMigration(GlobalConfig memory g, CoreUpgradeConfig memory c) internal view { + IDepositSecurityModule dsm = IDepositSecurityModule(c.newDepositSecurityModule); + IDepositSecurityModule oldDsm = IDepositSecurityModule(c.oldDepositSecurityModule); + if (dsm.getOwner() != g.agent) { + revert DSMMigrationIncorrectOwner(); + } + + address[] memory guardians = dsm.getGuardians(); + if (dsm.getGuardianQuorum() != oldDsm.getGuardianQuorum()) { + revert DSMMigrationIncorrectGuardianQuorum(); + } + for (uint256 i = 0; i < guardians.length; ++i) { + if (!oldDsm.isGuardian(guardians[i])) { + revert DSMMigrationIncorrectGuardians(); + } + } + } + + function _assertProxyAdmin(address _proxy, address _admin) internal view { + if (IOssifiableProxy(_proxy).proxy__getAdmin() != _admin) revert IncorrectProxyAdmin(_proxy); + } + + function _assertProxyImplementation(address _proxy, address _implementation) internal view { + address actualImplementation = IOssifiableProxy(_proxy).proxy__getImplementation(); + if (actualImplementation != _implementation) { + revert IncorrectProxyImplementation(_proxy, actualImplementation); + } + } + + function _assertAragonKernelImplementation(IAragonKernel _kernel, bytes32 appId, address _implementation) + internal + view + { + if (_kernel.getApp(_kernel.APP_BASES_NAMESPACE(), appId) != _implementation) { + revert IncorrectAragonKernelImplementation(address(_kernel), _implementation); + } + } + + function _assertWithdrawalsManagerProxyAdmin(address _proxy, address _admin) internal view { + if (IWithdrawalsManagerProxy(_proxy).proxy_getAdmin() != _admin) revert IncorrectProxyAdmin(_proxy); + } + + function _assertWithdrawalsManagerProxyImplementation(address _proxy, address _implementation) internal view { + address actualImplementation = IWithdrawalsManagerProxy(_proxy).implementation(); + if (actualImplementation != _implementation) { + revert IncorrectProxyImplementation(_proxy, actualImplementation); + } + } + + function _assertHasAragonPermission(address _acl, address _accessControlled, bytes32 _role, address _holder) + internal + view + { + if (!IAragonACL(_acl).hasPermission(_holder, _accessControlled, _role)) { + revert MissingAragonPermissionHolder(_accessControlled, _role, _holder); + } + } + + function _assertAragonPermissionManager(address _acl, address _accessControlled, bytes32 _role, address _holder) + internal + view + { + address permissionManager = IAragonACL(_acl).getPermissionManager(_accessControlled, _role); + if (permissionManager != _holder) { + revert UnexpectedAragonPermissionManager(_accessControlled, _role, permissionManager, _holder); + } + } + + function _assertZeroOZRoleHolders(address _accessControlled, bytes32 _role) internal view { + if (_getRoleMemberCount(_accessControlled, _role) != 0) { + revert NonZeroRoleHolders(_accessControlled, _role); + } + } + + function _assertSingleOZRoleHolder(address _accessControlled, bytes32 _role, address _holder) internal view { + if ( + _getRoleMemberCount(_accessControlled, _role) != 1 || _getRoleMember(_accessControlled, _role, 0) != _holder + ) { + revert IncorrectOZAccessControlRoleHolders(_accessControlled, _role); + } + } + + function _assertTwoOZRoleHolders(address _accessControlled, bytes32 _role, address _holder1, address _holder2) + internal + view + { + address[] memory holders = new address[](2); + holders[0] = _holder1; + holders[1] = _holder2; + _assertOZRoleHolders(_accessControlled, _role, holders); + } + + function _assertThreeOZRoleHolders( + address _accessControlled, + bytes32 _role, + address _holder1, + address _holder2, + address _holder3 + ) internal view { + address[] memory holders = new address[](3); + holders[0] = _holder1; + holders[1] = _holder2; + holders[2] = _holder3; + _assertOZRoleHolders(_accessControlled, _role, holders); + } + + function _assertOZRoleHolders(address _accessControlled, bytes32 _role, address[] memory _holders) internal view { + if (_getRoleMemberCount(_accessControlled, _role) != _holders.length) { + revert IncorrectOZAccessControlRoleHolders(_accessControlled, _role); + } + for (uint256 i = 0; i < _holders.length; i++) { + if (!_hasRole(_accessControlled, _role, _holders[i])) { + revert IncorrectOZAccessControlRoleHolders(_accessControlled, _role); + } + } + } + + function _assertHasOZRole(address _accessControlled, bytes32 _role, address _holder) internal view { + if (!_hasRole(_accessControlled, _role, _holder)) { + revert MissingOZAccessControlRoleHolder(_accessControlled, _role, _holder); + } + } + + function _assertNotOZRoleHolder(address _accessControlled, bytes32 _role, address _holder) internal view { + if (_hasRole(_accessControlled, _role, _holder)) { + revert UnexpectedOZAccessControlRoleHolder(_accessControlled, _role, _holder); + } + } + + function _assertCircuitBreakerPauser(address _circuitBreaker, address _pausable, address _expectedPauser) + internal + view + { + address actualPauser = ICircuitBreaker(_circuitBreaker).getPauser(_pausable); + if (actualPauser != _expectedPauser) { + revert InvalidCircuitBreakerPauser(_pausable, actualPauser, _expectedPauser); + } + } + + function _assertIdentifiedDVTClusterCurve(CSMUpgradeConfig memory _csm) internal view { + IOneShotCurveSetup curveSetup = IOneShotCurveSetup(_csm.identifiedDVTClusterCurveSetup); + if (!curveSetup.executed()) { + revert IdentifiedDVTClusterCurveSetupNotExecuted(_csm.identifiedDVTClusterCurveSetup); + } + + uint256 expectedCurveId = _csm.identifiedDVTClusterBondCurveId; + uint256 actualSetupCurveId = curveSetup.deployedCurveId(); + if (actualSetupCurveId != expectedCurveId) { + revert InvalidIdentifiedDVTClusterCurveId( + _csm.identifiedDVTClusterCurveSetup, actualSetupCurveId, expectedCurveId + ); + } + + uint256 actualGateCurveId = IMerkleGate(_csm.identifiedDVTClusterGate).curveId(); + if (actualGateCurveId != expectedCurveId) { + revert InvalidIdentifiedDVTClusterCurveId(_csm.identifiedDVTClusterGate, actualGateCurveId, expectedCurveId); + } + } + + function _assertInitializedContractVersion(address _versioned, uint64 _expectedVersion) internal view { + uint64 actualVersion = IInitializedVersionView(_versioned).getInitializedVersion(); + if (actualVersion != _expectedVersion) { + revert InvalidInitializedContractVersion(_versioned, actualVersion, _expectedVersion); + } + } + + function _assertContractVersion(address _versioned, uint256 _expectedVersion) internal view { + if (IVersioned(_versioned).getContractVersion() != _expectedVersion) { + revert InvalidContractVersion(_versioned, _expectedVersion); + } + } + + function _assertOracleConsensusVersion(address _oracle, uint256 _expectedVersion) internal view { + if (IBaseOracle(_oracle).getConsensusVersion() != _expectedVersion) { + revert InvalidOracleConsensusVersion(_oracle, _expectedVersion); + } + } + + function _assertLocatorAddress(address _locatorAddress, address _appAddress) internal pure { + if (_locatorAddress != _appAddress) { + revert InvalidLocatorAppAddress(_locatorAddress, _appAddress); + } + } + + // OZ IAccessControlEnumerable wrappers + function _hasRole(address _accessControlled, bytes32 _role, address _account) internal view returns (bool) { + return IAccessControl(_accessControlled).hasRole(_role, _account); + } + + function _getRoleMemberCount(address _accessControlled, bytes32 _role) internal view returns (uint256) { + return IAccessControlEnumerable(_accessControlled).getRoleMemberCount(_role); + } + + function _getRoleMember(address _accessControlled, bytes32 _role, uint256 _index) internal view returns (address) { + return IAccessControlEnumerable(_accessControlled).getRoleMember(_role, _index); + } + + function _isStartCalledInThisTx() internal view returns (bool isStartCalledInThisTx) { + assembly { + isStartCalledInThisTx := tload(UPGRADE_STARTED_SLOT) + } + } + + error OnlyAgentCanUpgrade(); + error StartAndFinishMustBeInSameTx(); + error StartAlreadyCalledInThisTx(); + error Expired(); + error UpgradeAlreadyStarted(); + error UpgradeAlreadyFinished(); + + error IncorrectProxyAdmin(address proxy); + error IncorrectProxyImplementation(address proxy, address implementation); + error InvalidContractVersion(address contractAddress, uint256 actualVersion); + error InvalidOracleConsensusVersion(address oracle, uint256 actualVersion); + error InvalidLocatorAppAddress(address locatorAddress, address appAddress); + error MissingAragonPermissionHolder(address contractAddress, bytes32 role, address holder); + error UnexpectedAragonPermissionManager( + address contractAddress, bytes32 role, address actualManager, address expectedManager + ); + error IncorrectOZAccessControlRoleHolders(address contractAddress, bytes32 role); + error MissingOZAccessControlRoleHolder(address contractAddress, bytes32 role, address holder); + error UnexpectedOZAccessControlRoleHolder(address contractAddress, bytes32 role, address holder); + error NonZeroRoleHolders(address contractAddress, bytes32 role); + error IncorrectAragonKernelImplementation(address kernel, address implementation); + error IncorrectLinkedContractAddress(address contractAddress, address actualAddress, address expectedAddress); + error InvalidHashConsensusInitialEpoch(address consensus, uint256 actualEpoch, uint256 expectedEpoch); + error CMModuleIsPaused(address module); + error InvalidInitializedContractVersion(address contractAddress, uint64 actualVersion, uint64 expectedVersion); + error InvalidCircuitBreakerPauser(address pausable, address actualPauser, address expectedPauser); + error IdentifiedDVTClusterCurveSetupNotExecuted(address curveSetup); + error InvalidIdentifiedDVTClusterCurveId(address contractAddress, uint256 actualCurveId, uint256 expectedCurveId); + + error LidoMigrationIncorrectBufferedEther(); + error LidoMigrationIncorrectDepositedValidators(); + error LidoMigrationIncorrectBeaconBalance(); + error LidoMigrationIncorrectDepositedSinceLastReport(); + + error SRMigrationIncorrectAddStakingModule(); + error SRMigrationIncorrectModulesCount(); + error SRMigrationIncorrectWithdrawalCredentials(); + error SRMigrationIncorrectConsolidationMigratorTargetModuleId(uint256 newModuleId, uint256 targetModuleId); + + error DSMMigrationIncorrectOwner(); + error DSMMigrationIncorrectGuardianQuorum(); + error DSMMigrationIncorrectGuardians(); +} diff --git a/contracts/upgrade/UpgradeTemporaryAdmin.sol b/contracts/upgrade/UpgradeTemporaryAdmin.sol new file mode 100644 index 0000000000..60c863d479 --- /dev/null +++ b/contracts/upgrade/UpgradeTemporaryAdmin.sol @@ -0,0 +1,126 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +pragma solidity 0.8.25; + +import {IAccessControl} from "@openzeppelin/contracts-v5.2/access/IAccessControl.sol"; +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {IEasyTrack, IDepositSecurityModule} from "./UpgradeTypes.sol"; + +/** + * @title UpgradeTemporaryAdmin + * @notice Auxiliary contract that serves as temporary admin during upgrade deployment + * @dev Used to perform intermediate admin tasks (like setting roles) + * and then transfer admin role to the final agent, reducing deployer privileges + */ +contract UpgradeTemporaryAdmin { + bytes32 public constant DEFAULT_ADMIN_ROLE = 0x00; + + bytes32 internal constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + bytes32 internal constant RESUME_ROLE = keccak256("RESUME_ROLE"); + bytes32 internal constant ALLOW_PAIR_ROLE = keccak256("ALLOW_PAIR_ROLE"); + bytes32 internal constant DISALLOW_PAIR_ROLE = keccak256("DISALLOW_PAIR_ROLE"); + bytes32 internal constant PUBLISH_ROLE = keccak256("PUBLISH_ROLE"); + bytes32 internal constant MANAGE_ROLE = keccak256("MANAGE_ROLE"); + bytes32 internal constant REMOVE_ROLE = keccak256("REMOVE_ROLE"); + bytes32 internal constant ADD_CONSOLIDATION_REQUEST_ROLE = keccak256("ADD_CONSOLIDATION_REQUEST_ROLE"); + bytes32 internal constant TOP_UP_ROLE = keccak256("TOP_UP_ROLE"); + + address public immutable AGENT; + + bool public isSetupComplete; + + constructor(address _agent) { + if (_agent == address(0)) revert ZeroAddress(); + AGENT = _agent; + } + + /** + * @notice Complete setup for all contracts - grants all roles and transfers admin to agent + * @dev This is the main external function that should be called after deployment + */ + function completeSetup( + address _lidoLocatorImpl, + address _easyTrack, + address _resealManager, + address _circuitBreaker, + address _consolidationMigrator, + address _consolidationMigratorCommittee, + address _consolidationBus, + address _topUpGatewayDepositor, + address _oldDepositSecurityModule + ) external { + if (isSetupComplete) revert SetupAlreadyCompleted(); + if (_lidoLocatorImpl == address(0)) revert ZeroLidoLocator(); + if (_easyTrack == address(0)) revert ZeroEasyTrack(); + + isSetupComplete = true; + + ILidoLocator locator = ILidoLocator(_lidoLocatorImpl); + address evmScriptExecutor = IEasyTrack(_easyTrack).evmScriptExecutor(); + address consolidationGateway = locator.consolidationGateway(); + address topUpGateway = locator.topUpGateway(); + address depositSecurityModule = locator.depositSecurityModule(); + + _setupDSM(depositSecurityModule, _oldDepositSecurityModule); + _setupConsolidationMigrator(_consolidationMigrator, evmScriptExecutor, _consolidationMigratorCommittee); + _setupConsolidationBus(_consolidationBus, _consolidationMigrator); + _setupConsolidationGateway(consolidationGateway, _consolidationBus, _circuitBreaker, _resealManager); + _setupTopUpGateway(topUpGateway, _topUpGatewayDepositor); + + emit SetupCompleted(_consolidationMigrator, _consolidationBus, consolidationGateway, topUpGateway); + } + + function _setupDSM(address _dsm, address _oldDsm) private { + IDepositSecurityModule dsm = IDepositSecurityModule(_dsm); + IDepositSecurityModule oldDsm = IDepositSecurityModule(_oldDsm); + + dsm.addGuardians(oldDsm.getGuardians(), oldDsm.getGuardianQuorum()); + dsm.setOwner(AGENT); + } + + function _setupConsolidationMigrator(address _migrator, address _evmScriptExecutor, address _committee) private { + IAccessControl(_migrator).grantRole(ALLOW_PAIR_ROLE, _evmScriptExecutor); + IAccessControl(_migrator).grantRole(DISALLOW_PAIR_ROLE, _committee); + + _transferAdminToAgent(_migrator); + } + + function _setupConsolidationBus(address _bus, address _migrator) private { + IAccessControl(_bus).grantRole(PUBLISH_ROLE, _migrator); + IAccessControl(_bus).renounceRole(MANAGE_ROLE, address(this)); + IAccessControl(_bus).renounceRole(REMOVE_ROLE, address(this)); + + _transferAdminToAgent(_bus); + } + + function _setupConsolidationGateway(address _gateway, address _bus, address _cb, address _resealManager) private { + IAccessControl(_gateway).grantRole(PAUSE_ROLE, _cb); + IAccessControl(_gateway).grantRole(PAUSE_ROLE, _resealManager); + IAccessControl(_gateway).grantRole(RESUME_ROLE, _resealManager); + IAccessControl(_gateway).grantRole(ADD_CONSOLIDATION_REQUEST_ROLE, _bus); + + _transferAdminToAgent(_gateway); + } + + function _setupTopUpGateway(address _gateway, address _depositor) private { + IAccessControl(_gateway).grantRole(TOP_UP_ROLE, _depositor); + + _transferAdminToAgent(_gateway); + } + + function _transferAdminToAgent(address _contract) private { + IAccessControl(_contract).grantRole(DEFAULT_ADMIN_ROLE, AGENT); + IAccessControl(_contract).renounceRole(DEFAULT_ADMIN_ROLE, address(this)); + } + + error ZeroAddress(); + error ZeroLidoLocator(); + error ZeroEasyTrack(); + error SetupAlreadyCompleted(); + + event SetupCompleted( + address consolidationMigrator, address consolidationBus, address consolidationGateway, address topUpGateway + ); +} diff --git a/contracts/upgrade/UpgradeTypes.sol b/contracts/upgrade/UpgradeTypes.sol new file mode 100644 index 0000000000..75a57260e3 --- /dev/null +++ b/contracts/upgrade/UpgradeTypes.sol @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {IAccessControlEnumerable} from "@openzeppelin/contracts-v5.2/access/extensions/IAccessControlEnumerable.sol"; +import {IAccessControlEnumerable as IAccessControlEnumerableV4} from "@openzeppelin/contracts-v4.4/access/IAccessControlEnumerable.sol"; + +import {IVersioned} from "contracts/common/interfaces/IVersioned.sol"; +import {ILido} from "contracts/common/interfaces/ILido.sol"; +import {ModuleStateConfig, StakingModuleConfig} from "contracts/0.8.25/sr/SRTypes.sol"; + +// ============================ +// Interfaces +// ============================ + +interface IAragonKernel { + function acl() external view returns (address); //IAragonACL + function getApp(bytes32 _namespace, bytes32 _appId) external view returns (address); + function setApp(bytes32 _namespace, bytes32 _appId, address _app) external; + function APP_BASES_NAMESPACE() external view returns (bytes32); + function APP_MANAGER_ROLE() external view returns (bytes32); +} + +interface IAragonACL { + function hasPermission(address _who, address _where, bytes32 _what) external view returns (bool); + function getPermissionManager(address _app, bytes32 _role) external view returns (address); + function createPermission(address _entity, address _app, bytes32 _role, address _manager) external; + function grantPermission(address _entity, address _app, bytes32 _role) external; + function revokePermission(address _entity, address _app, bytes32 _role) external; +} + +interface IAragonApp { + function kernel() external view returns (address); //IAragonKernel + function appId() external view returns (bytes32); +} + +interface ITimeConstraints { + function checkTimeAfterTimestampAndEmit(uint40 timestamp) external; + function checkTimeBeforeTimestampAndEmit(uint40 timestamp) external; + function checkTimeWithinDayTimeAndEmit(uint32 startDayTime, uint32 endDayTime) external; +} + +interface IBaseOracle is IAccessControlEnumerableV4, IVersioned { + function getConsensusContract() external view returns (address); + function getConsensusVersion() external view returns (uint256); +} + +interface IEasyTrack { + function evmScriptExecutor() external view returns (address); + function isEVMScriptFactory(address _maybeEVMScriptFactory) external view returns (bool); + function getEVMScriptFactories() external view returns (address[] memory); + function addEVMScriptFactory(address _evmScriptFactory, bytes memory _permissions) external; + function removeEVMScriptFactory(address _evmScriptFactory) external; +} + +interface IStakingRouterUpgrade is IAccessControlEnumerable { + // existing roles + + function getWithdrawalCredentials() external view returns (bytes32); + function finalizeUpgrade_v4() external; + function updateModuleShares(uint256 _stakingModuleId, uint16 _stakeShareLimit, uint16 _priorityExitShareThreshold) + external; + + function addStakingModule( + string calldata _name, + address _stakingModuleAddress, + StakingModuleConfig calldata _stakingModuleConfig + ) external; + + function getStakingModulesCount() external view returns (uint256); + function getStakingModuleIds() external view returns (uint256[] memory); + function getStakingModuleStateConfig(uint256 _stakingModuleId) + external + view + returns (ModuleStateConfig memory stateConfig); + + function STAKING_MODULE_SHARE_MANAGE_ROLE() external view returns (bytes32); + function STAKING_MODULE_UNVETTING_ROLE() external view returns (bytes32); +} + +interface IDepositSecurityModule { + function getOwner() external view returns (address); + function setOwner(address newValue) external; + function isGuardian(address addr) external view returns (bool); + function getGuardianQuorum() external view returns (uint256); + function getGuardians() external view returns (address[] memory); + function addGuardians(address[] memory addresses, uint256 newQuorum) external; +} + +interface IConsolidationMigrator { + function allowPair(uint256 sourceOperatorId, uint256 targetOperatorId, address submitter) external; + function disallowPair(uint256 sourceOperatorId, uint256 targetOperatorId) external; + function sourceModuleId() external view returns (uint256); + function targetModuleId() external view returns (uint256); +} + +interface IMerkleGate { + function curveId() external view returns (uint256); + function setTreeParams(bytes32 treeRoot, string calldata treeCid) external; +} + +interface IOneShotCurveSetup { + function executed() external view returns (bool); + function deployedCurveId() external view returns (uint256); + function execute() external returns (uint256 curveId); +} + +interface ILidoUpgrade is ILido { + function getBufferedEther() external view returns (uint256); + function finalizeUpgrade_v4() external; +} + +interface IAccountingOracleUpgrade is IBaseOracle { + function finalizeUpgrade_v5(uint256 consensusVersion) external; +} + +interface IValidatorsExitBusOracleUpgrade is IBaseOracle { + function finalizeUpgrade_v3( + uint256 maxValidatorsPerReport, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, + uint256 frameDurationInSec, + uint256 consensusVersion + ) external; +} + +interface IWithdrawalVaultUpgrade { + function finalizeUpgrade_v3() external; +} + +interface IWithdrawalsManagerProxy { + function proxy_getAdmin() external view returns (address); + function implementation() external view returns (address); + function proxy_upgradeTo(address newImplementation, bytes memory setupCalldata) external; +} + +struct WithdrawnValidatorInfo { + uint256 nodeOperatorId; + uint256 keyIndex; + uint256 exitBalance; + uint256 slashingPenalty; + bool isSlashed; +} + +interface IBaseModuleV3 { + function LIDO_LOCATOR() external view returns (address); + function PARAMETERS_REGISTRY() external view returns (address); + function ACCOUNTING() external view returns (address); + function EXIT_PENALTIES() external view returns (address); + function FEE_DISTRIBUTOR() external view returns (address); + function reportSlashedWithdrawnValidators(WithdrawnValidatorInfo[] calldata validatorInfos) external; + function settleGeneralDelayedPenalty(uint256[] calldata nodeOperatorIds, uint256[] calldata maxAmounts) external; +} + +interface ICuratedModule is IBaseModuleV3 { + function META_REGISTRY() external view returns (address); +} + +interface IOssifiableProxyV2 { + function proxy__upgradeTo(address newImplementation_) external; + function proxy__upgradeToAndCall(address newImplementation_, bytes calldata setupCalldata_) external; +} + +interface ICSModuleV3 { + function finalizeUpgradeV3() external; + function resume() external; +} + +interface IParametersRegistryV3 { + function finalizeUpgradeV3() external; +} + +interface IFeeOracleV3 { + function finalizeUpgradeV3(uint256 consensusVersion) external; + function STRIKES() external view returns (address); + function getConsensusContract() external view returns (address); +} + +interface IAccountingV3 { + function finalizeUpgradeV3() external; + function FEE_DISTRIBUTOR() external view returns (address); +} + +interface IFeeDistributorV3 { + function ORACLE() external view returns (address); + function finalizeUpgradeV3() external; +} + +interface IValidatorStrikesV3 { + function ejector() external view returns (address); + function setEjector(address newEjector) external; +} + +interface IUpdateStakingModuleShareLimits { + struct ModuleShareParams { + uint16 currentStakeShareLimit; + uint16 newStakeShareLimit; + uint16 currentPriorityExitShareThreshold; + uint16 newPriorityExitShareThreshold; + } + + function validateParams(ModuleShareParams calldata params) external view; +} + +interface ITriggerableWithdrawalsGatewayUpgrade is IAccessControlEnumerable { + function setExitRequestLimit(uint256 maxExitRequestsLimit, uint256 exitsPerFrame, uint256 frameDurationInSec) + external; + function TW_EXIT_LIMIT_MANAGER_ROLE() external view returns (bytes32); +} + +interface IHashConsensusV3 { + function updateInitialEpoch(uint256 epoch) external; +} + +interface IMetaRegistry { + struct SubNodeOperator { + uint64 nodeOperatorId; + uint16 share; + } + + struct ExternalOperator { + bytes data; + } + + struct OperatorGroup { + SubNodeOperator[] subNodeOperators; + ExternalOperator[] externalOperators; + } + + function createOrUpdateOperatorGroup(uint256 groupId, OperatorGroup calldata groupInfo) external; +} + +interface IInitializedVersionView { + function getInitializedVersion() external view returns (uint64); +} + +// +// ------ Template deploy configuration params ------ +// + +struct UpgradeParameters { + // Existing contracts + address locator; + address agent; + address voting; + address dualGovernance; + address resealManager; + address circuitBreaker; + address easyTrack; + + EasyTrackNewFactories newFactories; + EasyTrackOldFactories oldFactories; + // Upgrade config for protocol core + CoreUpgradeParams coreUpgrade; + + // Upgrade config for CSM/CMv2 + CSMUpgradeParams csmUpgrade; + CuratedModuleParams curatedModule; +} + +struct EasyTrackNewFactories { + // EasyTrack new factories + address UpdateStakingModuleShareLimits; + address AllowConsolidationPair; + // CSM + address SetMerkleGateTreeForCSM; + address ReportWithdrawalsForSlashedValidatorsForCSM; + address SettleGeneralDelayedPenaltyForCSM; + // CM + address SetMerkleGateTreeForCM; + address ReportWithdrawalsForSlashedValidatorsForCM; + address SettleGeneralDelayedPenaltyForCM; + address CreateOrUpdateOperatorGroupForCM; +} + +struct EasyTrackOldFactories { + address CSMSettleElStealingPenalty; + address CSMSetVettedGateTree; +} + +struct CoreUpgradeParams { + // Old implementations + address oldLocatorImpl; + address oldLidoImpl; + address oldAccountingImpl; + address oldAccountingOracleImpl; + address oldStakingRouterImpl; + address oldWithdrawalVaultImpl; + address oldValidatorsExitBusOracleImpl; + + // New implementations + address newLocatorImpl; + address newLidoImpl; + address newAccountingImpl; + address newAccountingOracleImpl; + address newStakingRouterImpl; + address newWithdrawalVaultImpl; + address newValidatorsExitBusOracleImpl; + address consolidationBusImpl; + address consolidationMigratorImpl; + address topUpGatewayImpl; + + // New fancy proxy and blueprint contracts + address consolidationBus; + address consolidationMigrator; + address topUpGateway; + + // params + uint256 lidoDepositsReserveTarget; + address curatedModuleCommittee; + address topUpGatewayDepositor; + address consolidationGatewayPauser; + + // twGateway limits + uint256 twMaxExitRequestsLimit; + uint256 twExitsPerFrame; + uint256 twFrameDurationInSec; + + // accounting oracle + uint256 aoConsensusVersion; + + // validators exit bus oracle + uint256 veboMaxValidatorsPerReport; + uint256 veboMaxExitBalanceEth; + uint256 veboBalancePerFrameEth; + uint256 veboFrameDurationInSec; + uint256 veboConsensusVersion; +} + +struct CSMUpgradeParams { + address csmProxy; + address csmImpl; + address vettedGateProxy; + address identifiedDVTClusterGate; + address identifiedDVTClusterCurveSetup; + uint256 identifiedDVTClusterBondCurveId; + address parametersRegistryImpl; + address feeOracleImpl; + uint256 feeOracleConsensusVersion; + address vettedGateImpl; + address accountingImpl; + address feeDistributorImpl; + address exitPenaltiesImpl; + address strikesImpl; + address oldPermissionlessGate; + address newPermissionlessGate; + address oldVerifier; + address newVerifier; + address ejector; + address csmCommittee; +} + +struct CuratedModuleParams { + address module; + address[] curatedGates; + address verifier; + address circuitBreakerPauser; + string moduleName; + uint256 stakeShareLimit; + uint256 priorityExitShareThreshold; + uint256 stakingModuleFee; + uint256 treasuryFee; + uint256 maxDepositsPerBlock; + uint256 minDepositBlockDistance; + uint256 feeOracleConsensusVersion; + uint256 hashConsensusInitialEpoch; +} + +// +// ------ Shared configs for VotingScript ------ +// + +struct GlobalConfig { + address agent; + address lido; + address burner; + address resealManager; + address circuitBreaker; + address easyTrack; + address easyTrackEVMScriptExecutor; + address stakingRouter; + address triggerableWithdrawalsGateway; +} + +struct CoreUpgradeConfig { + address kernel; + address acl; + bytes32 lidoAppId; + + address locator; + + address oldLocatorImpl; + address oldLidoImpl; + address oldAccountingImpl; + address oldAccountingOracleImpl; + address oldStakingRouterImpl; + address oldWithdrawalVaultImpl; + address oldValidatorsExitBusOracleImpl; + address oldOracleReportSanityChecker; + address oldDepositSecurityModule; + + address newLocatorImpl; + address newLidoImpl; + address newAccountingImpl; + address newAccountingOracleImpl; + address newStakingRouterImpl; + address newWithdrawalVaultImpl; + address newValidatorsExitBusOracleImpl; + address newOracleReportSanityChecker; + address newDepositSecurityModule; + address consolidationBusImpl; + address consolidationMigratorImpl; + address topUpGatewayImpl; + + address accounting; + address accountingOracle; + address validatorsExitBusOracle; + address withdrawalVault; + address consolidationGateway; + address consolidationBus; + address consolidationMigrator; + address topUpGateway; + + uint256 lidoDepositsReserveTarget; + address curatedModuleCommittee; + address topUpGatewayDepositor; + address consolidationGatewayPauser; + + uint256 twMaxExitRequestsLimit; + uint256 twExitsPerFrame; + uint256 twFrameDurationInSec; + + uint256 aoConsensusVersion; + uint256 veboMaxValidatorsPerReport; + uint256 veboMaxExitBalanceEth; + uint256 veboBalancePerFrameEth; + uint256 veboFrameDurationInSec; + uint256 veboConsensusVersion; +} + +struct CSMUpgradeConfig { + address csm; + address csmImpl; + address parametersRegistry; + address parametersRegistryImpl; + address feeOracle; + address feeOracleImpl; + uint256 feeOracleConsensusVersion; + address vettedGate; + address identifiedDVTClusterGate; + address identifiedDVTClusterCurveSetup; + uint256 identifiedDVTClusterBondCurveId; + address vettedGateImpl; + address accounting; + address accountingImpl; + address feeDistributor; + address feeDistributorImpl; + address exitPenalties; + address exitPenaltiesImpl; + address strikes; + address strikesImpl; + address oldPermissionlessGate; + address oldVerifier; + address newVerifier; + address newPermissionlessGate; + address oldEjector; + address ejector; + address csmCommittee; +} + +struct CuratedModuleConfig { + address module; + address[] curatedGates; + address parametersRegistry; + address accounting; + address ejector; + address verifier; + address circuitBreakerPauser; + address feeDistributor; + address feeOracle; + address hashConsensus; + address strikes; + string moduleName; + uint256 stakeShareLimit; + uint256 priorityExitShareThreshold; + uint256 stakingModuleFee; + uint256 treasuryFee; + uint256 maxDepositsPerBlock; + uint256 minDepositBlockDistance; + uint256 feeOracleConsensusVersion; + uint256 hashConsensusInitialEpoch; + address metaRegistry; +} diff --git a/contracts/upgrade/UpgradeVoteScript.sol b/contracts/upgrade/UpgradeVoteScript.sol new file mode 100644 index 0000000000..4e671db302 --- /dev/null +++ b/contracts/upgrade/UpgradeVoteScript.sol @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {Strings} from "@openzeppelin/contracts-v5.2/utils/Strings.sol"; +import {IAccessControl} from "@openzeppelin/contracts-v5.2/access/IAccessControl.sol"; +import {IOssifiableProxy} from "contracts/common/interfaces/IOssifiableProxy.sol"; +import {ICircuitBreaker} from "contracts/common/interfaces/ICircuitBreaker.sol"; +import {StakingModuleConfig} from "contracts/0.8.25/sr/SRTypes.sol"; +import {OmnibusBase} from "./utils/OmnibusBase.sol"; +import {UpgradeTemplate, UpgradeConfig} from "./UpgradeTemplate.sol"; +import {CallsScriptBuilder} from "./utils/CallScriptBuilder.sol"; +import {IForwarder} from "./interfaces/IForwarder.sol"; +import { + + // ITimeConstraints, + GlobalConfig, + EasyTrackNewFactories, + EasyTrackOldFactories, + CoreUpgradeConfig, + CSMUpgradeConfig, + CuratedModuleConfig, + IAragonKernel, + IAragonACL, + ILidoUpgrade, + IEasyTrack, + IStakingRouterUpgrade, + IAccountingOracleUpgrade, + IValidatorsExitBusOracleUpgrade, + IWithdrawalVaultUpgrade, + IConsolidationMigrator, + IWithdrawalsManagerProxy, + IOssifiableProxyV2, + ICSModuleV3, + IHashConsensusV3, + IParametersRegistryV3, + IFeeOracleV3, + IAccountingV3, + IFeeDistributorV3, + IValidatorStrikesV3, + IUpdateStakingModuleShareLimits, + IBaseModuleV3, + IMerkleGate, + IOneShotCurveSetup, + IMetaRegistry, + ITriggerableWithdrawalsGatewayUpgrade +} from "./UpgradeTypes.sol"; + +/// @title UpgradeVoteScript +/// @notice Script for upgrading Lido protocol components +contract UpgradeVoteScript is OmnibusBase { + using Strings for uint256; + using CallsScriptBuilder for CallsScriptBuilder.Context; + + error InvalidItemsCount(uint256 actual, uint256 expected); + error InvalidMerkleGateAddress(); + // + // Constants + // + // TODO set upon finish with items + uint256 public constant DG_ITEMS_COUNT = 65; + uint256 public constant VOTING_ITEMS_COUNT = 11; + + // Aragon Kernel APP_BASES_NAMESPACE + bytes32 internal constant KERNEL_APP_BASES_NAMESPACE = keccak256("base"); + bytes32 internal constant APP_MANAGER_ROLE = keccak256("APP_MANAGER_ROLE"); + bytes32 internal constant BUFFER_RESERVE_MANAGER_ROLE = keccak256("BUFFER_RESERVE_MANAGER_ROLE"); + bytes32 internal constant STAKING_MODULE_SHARE_MANAGE_ROLE = keccak256("STAKING_MODULE_SHARE_MANAGE_ROLE"); + bytes32 internal constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); + + bytes32 internal constant REPORT_EL_REWARDS_STEALING_PENALTY_ROLE = + keccak256("REPORT_EL_REWARDS_STEALING_PENALTY_ROLE"); + bytes32 internal constant SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE = + keccak256("SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE"); + bytes32 internal constant REPORT_GENERAL_DELAYED_PENALTY_ROLE = keccak256("REPORT_GENERAL_DELAYED_PENALTY_ROLE"); + bytes32 internal constant SETTLE_GENERAL_DELAYED_PENALTY_ROLE = keccak256("SETTLE_GENERAL_DELAYED_PENALTY_ROLE"); + bytes32 internal constant REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE = + keccak256("REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE"); + bytes32 internal constant REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE = + keccak256("REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE"); + bytes32 internal constant START_REFERRAL_SEASON_ROLE = keccak256("START_REFERRAL_SEASON_ROLE"); + bytes32 internal constant END_REFERRAL_SEASON_ROLE = keccak256("END_REFERRAL_SEASON_ROLE"); + bytes32 internal constant ADD_FULL_WITHDRAWAL_REQUEST_ROLE = keccak256("ADD_FULL_WITHDRAWAL_REQUEST_ROLE"); + bytes32 internal constant CREATE_NODE_OPERATOR_ROLE = keccak256("CREATE_NODE_OPERATOR_ROLE"); + bytes32 internal constant SET_BOND_CURVE_ROLE = keccak256("SET_BOND_CURVE_ROLE"); + bytes32 internal constant MANAGE_BOND_CURVES_ROLE = keccak256("MANAGE_BOND_CURVES_ROLE"); + bytes32 internal constant MANAGE_CURVE_PARAMETERS_ROLE = keccak256("MANAGE_CURVE_PARAMETERS_ROLE"); + bytes32 internal constant MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE = + keccak256("MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE"); + bytes32 internal constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + bytes32 internal constant RESUME_ROLE = keccak256("RESUME_ROLE"); + bytes32 internal constant REQUEST_BURN_MY_STETH_ROLE = keccak256("REQUEST_BURN_MY_STETH_ROLE"); + bytes32 internal constant REQUEST_BURN_SHARES_ROLE = keccak256("REQUEST_BURN_SHARES_ROLE"); + bytes32 internal constant VERIFIER_ROLE = keccak256("VERIFIER_ROLE"); + bytes32 internal constant SET_TREE_ROLE = keccak256("SET_TREE_ROLE"); + bytes32 internal constant MANAGE_OPERATOR_GROUPS_ROLE = keccak256("MANAGE_OPERATOR_GROUPS_ROLE"); + bytes32 internal constant TW_EXIT_LIMIT_MANAGER_ROLE = keccak256("TW_EXIT_LIMIT_MANAGER_ROLE"); + // + // Immutables + // + address public immutable TEMPLATE; + address public immutable CONFIG; + address public immutable TIME_CONSTRAINTS; + uint32 public immutable ENABLED_DAY_SPAN_START; // = 50400; // 14:00 UTC + uint32 public immutable ENABLED_DAY_SPAN_END; // = 82800; // 23:00 UTC + address internal immutable AGENT; + + struct ScriptParams { + address upgradeTemplate; + address timeConstraints; + uint32 enabledDaySpanStart; + uint32 enabledDaySpanEnd; + } + + constructor(ScriptParams memory _params) + OmnibusBase( + UpgradeConfig(UpgradeTemplate(_params.upgradeTemplate).CONFIG()).VOTING(), + UpgradeConfig(UpgradeTemplate(_params.upgradeTemplate).CONFIG()).DUAL_GOVERNANCE() + ) + { + UpgradeTemplate template = UpgradeTemplate(_params.upgradeTemplate); + UpgradeConfig config = UpgradeConfig(template.CONFIG()); + TEMPLATE = address(template); + CONFIG = address(config); + AGENT = config.AGENT(); + TIME_CONSTRAINTS = _params.timeConstraints; + ENABLED_DAY_SPAN_START = _params.enabledDaySpanStart; // e.g. 50400 = 14:00 UTC + ENABLED_DAY_SPAN_END = _params.enabledDaySpanEnd; // e.g. 82800 = 23:00 UTC + } + + /// @dev Non DG voting items + function getVotingVoteItems() public view override returns (VoteItem[] memory) { + // start from `2` as `1` is reserved for DG submission item + return _wrapItemsNumber(_getVotingVoteItems(), 2); + } + + /// @dev DG voting items + function getVoteItemsRaw() external view returns (VoteItem[] memory) { + // set prefix to `1`, so all item's description will transform to `1.N. Description...` + return _wrapItemsPrefixNumber(_getVoteItems(), 1, 1); + } + + function getVoteItems() public view override returns (VoteItem[] memory) { + string memory description = "1. Submit a Dual Governance proposal"; + return _wrapItemsForwardPacked(_getVoteItems(), AGENT, description); + + /// @dev kept for future use + // set prefix to `1`, so all item's description will transform to `1.N. Description...` + // return _wrapItemsPrefixNumberForward(_getVoteItems(), AGENT, 1, 1); + } + + function _getVotingVoteItems() internal view returns (VoteItem[] memory items) { + items = new VoteItem[](VOTING_ITEMS_COUNT); + uint256 i = 0; + + UpgradeConfig config = UpgradeConfig(CONFIG); + GlobalConfig memory g = config.getGlobalConfig(); + address easyTrack = g.easyTrack; + + (EasyTrackNewFactories memory etn, EasyTrackOldFactories memory eto) = config.getEasyTrackConfig(); + + // + // Delete old EasyTrack Factories + // + items[i++] = _delETFactoryItem( + "Remove CSMSettleElStealingPenalty ET factory", easyTrack, eto.CSMSettleElStealingPenalty + ); + items[i++] = _delETFactoryItem("Remove CSMSetVettedGateTree ET factory", easyTrack, eto.CSMSetVettedGateTree); + + // + // Add new EasyTrack Factories + // + { + CoreUpgradeConfig memory c = config.getCoreUpgradeConfig(); + + items[i++] = _addETFactoryItem( + "Add UpdateStakingModuleShareLimits ET factory", + easyTrack, + etn.UpdateStakingModuleShareLimits, + bytes.concat( + bytes20(etn.UpdateStakingModuleShareLimits), + bytes4(IUpdateStakingModuleShareLimits.validateParams.selector), + bytes20(g.stakingRouter), + bytes4(IStakingRouterUpgrade.updateModuleShares.selector) + ) + ); + + items[i++] = _addETFactoryItem( + "Add AllowConsolidationPair ET factory", + easyTrack, + etn.AllowConsolidationPair, + bytes.concat(bytes20(c.consolidationMigrator), bytes4(IConsolidationMigrator.allowPair.selector)) + ); + } + + { + CSMUpgradeConfig memory c = config.getCSMUpgradeConfig(); + + address[] memory csmGates = new address[](2); + csmGates[0] = c.vettedGate; + csmGates[1] = c.identifiedDVTClusterGate; + + items[i++] = _addETFactoryItem( + "Add SetMerkleGateTree CSM ET factory", + easyTrack, + etn.SetMerkleGateTreeForCSM, + _setMerkleGateTreePermissions(csmGates) + ); + + items[i++] = _addETFactoryItem( + "Add ReportWithdrawalsForSlashedValidators CSM ET factory", + easyTrack, + etn.ReportWithdrawalsForSlashedValidatorsForCSM, + bytes.concat(bytes20(c.csm), bytes4(IBaseModuleV3.reportSlashedWithdrawnValidators.selector)) + ); + + items[i++] = _addETFactoryItem( + "Add SettleGeneralDelayedPenalty CSM ET factory", + easyTrack, + etn.SettleGeneralDelayedPenaltyForCSM, + bytes.concat(bytes20(c.csm), bytes4(IBaseModuleV3.settleGeneralDelayedPenalty.selector)) + ); + } + + { + CuratedModuleConfig memory c = config.getCuratedModuleConfig(); + + items[i++] = _addETFactoryItem( + "Add SetMerkleGateTree CM ET factory", + easyTrack, + etn.SetMerkleGateTreeForCM, + _setMerkleGateTreePermissions(c.curatedGates) + ); + + items[i++] = _addETFactoryItem( + "Add ReportWithdrawalsForSlashedValidators CM ET factory", + easyTrack, + etn.ReportWithdrawalsForSlashedValidatorsForCM, + bytes.concat(bytes20(c.module), bytes4(IBaseModuleV3.reportSlashedWithdrawnValidators.selector)) + ); + + items[i++] = _addETFactoryItem( + "Add SettleGeneralDelayedPenalty CM ET factory", + easyTrack, + etn.SettleGeneralDelayedPenaltyForCM, + bytes.concat(bytes20(c.module), bytes4(IBaseModuleV3.settleGeneralDelayedPenalty.selector)) + ); + + items[i++] = _addETFactoryItem( + "Add CreateOrUpdateOperatorGroup CM ET factory", + easyTrack, + etn.CreateOrUpdateOperatorGroupForCM, + bytes.concat(bytes20(c.metaRegistry), bytes4(IMetaRegistry.createOrUpdateOperatorGroup.selector)) + ); + } + if (i != VOTING_ITEMS_COUNT) revert InvalidItemsCount(i, VOTING_ITEMS_COUNT); + } + + function _getVoteItems() internal view returns (VoteItem[] memory items) { + items = new VoteItem[](DG_ITEMS_COUNT); + uint256 i = 0; + + UpgradeConfig config = UpgradeConfig(CONFIG); + GlobalConfig memory g = config.getGlobalConfig(); + address agent = g.agent; + address evmScriptExecutor = g.easyTrackEVMScriptExecutor; + address stakingRouter = g.stakingRouter; + + // TODO time constraints are not relevant on Hoodi testnet, but can be re-vised on mainnet + // items[i++] = _item({ + // description: "Ensure DG proposal execution is within defined time window", + // to: TIME_CONSTRAINTS, + // data: abi.encodeCall( + // ITimeConstraints.checkTimeWithinDayTimeAndEmit, (ENABLED_DAY_SPAN_START, ENABLED_DAY_SPAN_END) + // ) + // }); + + items[i++] = _item({ + description: "Call UpgradeTemplate.startUpgrade", + to: TEMPLATE, + data: abi.encodeCall(UpgradeTemplate.startUpgrade, ()) + }); + + // + // Core upgrade + // + { + CoreUpgradeConfig memory c = config.getCoreUpgradeConfig(); + + items[i++] = _proxyUpgradeToItem({ + description: "Upgrade LidoLocator implementation", to: c.locator, impl: c.newLocatorImpl + }); + + /// @notice updating StakingRouter implementation and call finalizeUpgrade_v4 + items[i++] = _proxyUpgradeToAndCallItem({ + description: "Upgrade StakingRouter implementation", + to: stakingRouter, + impl: c.newStakingRouterImpl, + data: abi.encodeCall(IStakingRouterUpgrade.finalizeUpgrade_v4, ()) + }); + + /// @notice updating AccountingOracle implementation and call finalizeUpgrade_v5 + items[i++] = _proxyUpgradeToAndCallItem({ + description: "Upgrade AccountingOracle implementation", + to: c.accountingOracle, + impl: c.newAccountingOracleImpl, + data: abi.encodeCall(IAccountingOracleUpgrade.finalizeUpgrade_v5, (c.aoConsensusVersion)) + }); + + /// @notice updating ValidatorsExitBusOracle implementation and call finalizeUpgrade_v3 + items[i++] = _proxyUpgradeToAndCallItem({ + description: "Upgrade ValidatorsExitBusOracle implementation", + to: c.validatorsExitBusOracle, + impl: c.newValidatorsExitBusOracleImpl, + data: abi.encodeCall( + IValidatorsExitBusOracleUpgrade.finalizeUpgrade_v3, + ( + c.veboMaxValidatorsPerReport, + c.veboMaxExitBalanceEth, + c.veboBalancePerFrameEth, + c.veboFrameDurationInSec, + c.veboConsensusVersion + ) + ) + }); + + /// @notice updating Accounting implementation (no finalizeUpgrade) + items[i++] = _proxyUpgradeToItem({ + description: "Upgrade Accounting implementation", to: c.accounting, impl: c.newAccountingImpl + }); + + /// @notice updating WithdrawalVault implementation and call finalizeUpgrade_v3 + items[i++] = _item({ + description: "Upgrade WithdrawalVault implementation", + to: c.withdrawalVault, + data: abi.encodeCall( + IWithdrawalsManagerProxy.proxy_upgradeTo, + (c.newWithdrawalVaultImpl, abi.encodeCall(IWithdrawalVaultUpgrade.finalizeUpgrade_v3, ())) + ) + }); + + items[i++] = _item({ + description: "Grant Aragon APP_MANAGER_ROLE to the AGENT", + to: c.acl, + data: abi.encodeCall(IAragonACL.grantPermission, (agent, c.kernel, APP_MANAGER_ROLE)) + }); + + items[i++] = _item({ + description: "Set Lido implementation in Kernel", + to: c.kernel, + data: abi.encodeCall(IAragonKernel.setApp, (KERNEL_APP_BASES_NAMESPACE, c.lidoAppId, c.newLidoImpl)) + }); + + items[i++] = _item({ + description: "Revoke Aragon APP_MANAGER_ROLE from the AGENT", + to: c.acl, + data: abi.encodeCall(IAragonACL.revokePermission, (agent, c.kernel, APP_MANAGER_ROLE)) + }); + + items[i++] = _item({ + description: "Create and grant Aragon BUFFER_RESERVE_MANAGER_ROLE to the AGENT", + to: c.acl, + data: abi.encodeCall(IAragonACL.createPermission, (agent, g.lido, BUFFER_RESERVE_MANAGER_ROLE, agent)) + }); + + items[i++] = _item({ + description: "Call finalizeUpgrade_v4 on Lido", + to: g.lido, + data: abi.encodeCall(ILidoUpgrade.finalizeUpgrade_v4, ()) + }); + + /// @notice grant STAKING_MODULE_SHARE_MANAGE_ROLE to EasyTrack executor + items[i++] = _ozGrantRoleItem({ + description: "Grant STAKING_MODULE_SHARE_MANAGE_ROLE to EasyTrack executor", + to: stakingRouter, + role: STAKING_MODULE_SHARE_MANAGE_ROLE, + account: evmScriptExecutor + }); + + /// @notice revoke STAKING_MODULE_UNVETTING_ROLE from old DSM + items[i++] = _ozRevokeRoleItem({ + description: "Revoke STAKING_MODULE_UNVETTING_ROLE from old DSM", + to: stakingRouter, + role: STAKING_MODULE_UNVETTING_ROLE, + account: c.oldDepositSecurityModule + }); + + /// @notice grant STAKING_MODULE_UNVETTING_ROLE to new DSM + items[i++] = _ozGrantRoleItem({ + description: "Grant STAKING_MODULE_UNVETTING_ROLE to new DSM", + to: stakingRouter, + role: STAKING_MODULE_UNVETTING_ROLE, + account: c.newDepositSecurityModule + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant TW_EXIT_LIMIT_MANAGER_ROLE to Agent on TWGateway", + to: g.triggerableWithdrawalsGateway, + role: TW_EXIT_LIMIT_MANAGER_ROLE, + account: agent + }); + + items[i++] = _item({ + description: "Set TWGateway exit request limits", + to: g.triggerableWithdrawalsGateway, + data: abi.encodeCall( + ITriggerableWithdrawalsGatewayUpgrade.setExitRequestLimit, + (c.twMaxExitRequestsLimit, c.twExitsPerFrame, c.twFrameDurationInSec) + ) + }); + + items[i++] = _registerCircuitBreakerPauserItem( + "ConsolidationGateway", g.circuitBreaker, c.consolidationGateway, c.consolidationGatewayPauser + ); + } + + // + // CSM Upgrade items + // + { + CSMUpgradeConfig memory c = config.getCSMUpgradeConfig(); + address csm = c.csm; + address vettedGate = c.vettedGate; + // --- Proxy upgrades --- + + items[i++] = _proxyUpgradeToAndCallV2Item({ + description: "Upgrade and finalize CSM v3", + to: csm, + impl: c.csmImpl, + data: abi.encodeCall(ICSModuleV3.finalizeUpgradeV3, ()) + }); + + items[i++] = _proxyUpgradeToAndCallV2Item({ + description: "Upgrade and finalize ParametersRegistry v3", + to: c.parametersRegistry, + impl: c.parametersRegistryImpl, + data: abi.encodeCall(IParametersRegistryV3.finalizeUpgradeV3, ()) + }); + + items[i++] = _proxyUpgradeToAndCallV2Item({ + description: "Upgrade and finalize FeeOracle v3", + to: c.feeOracle, + impl: c.feeOracleImpl, + data: abi.encodeCall(IFeeOracleV3.finalizeUpgradeV3, (c.feeOracleConsensusVersion)) + }); + + items[i++] = _proxyUpgradeToItem({ + description: "Upgrade VettedGate implementation", to: vettedGate, impl: c.vettedGateImpl + }); + + items[i++] = _proxyUpgradeToAndCallV2Item({ + description: "Upgrade and finalize Accounting v3", + to: c.accounting, + impl: c.accountingImpl, + data: abi.encodeCall(IAccountingV3.finalizeUpgradeV3, ()) + }); + + items[i++] = _proxyUpgradeToAndCallV2Item({ + description: "Upgrade and finalize FeeDistributor v3", + to: c.feeDistributor, + impl: c.feeDistributorImpl, + data: abi.encodeCall(IFeeDistributorV3.finalizeUpgradeV3, ()) + }); + + items[i++] = _proxyUpgradeToItem({ + description: "Upgrade ExitPenalties implementation", to: c.exitPenalties, impl: c.exitPenaltiesImpl + }); + + items[i++] = _proxyUpgradeToItem({ + description: "Upgrade ValidatorStrikes implementation", to: c.strikes, impl: c.strikesImpl + }); + + // --- Role & permission updates --- + + items[i++] = _item({ + description: "Point ValidatorStrikes to the new Ejector", + to: c.strikes, + data: abi.encodeCall(IValidatorStrikesV3.setEjector, (c.ejector)) + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke REPORT_EL_REWARDS_STEALING_PENALTY_ROLE", + to: csm, + role: REPORT_EL_REWARDS_STEALING_PENALTY_ROLE, + account: c.csmCommittee + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant REPORT_GENERAL_DELAYED_PENALTY_ROLE", + to: csm, + role: REPORT_GENERAL_DELAYED_PENALTY_ROLE, + account: c.csmCommittee + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE", + to: csm, + role: SETTLE_EL_REWARDS_STEALING_PENALTY_ROLE, + account: evmScriptExecutor + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant SETTLE_GENERAL_DELAYED_PENALTY_ROLE", + to: csm, + role: SETTLE_GENERAL_DELAYED_PENALTY_ROLE, + account: evmScriptExecutor + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke VERIFIER_ROLE from old verifier", + to: csm, + role: VERIFIER_ROLE, + account: c.oldVerifier + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant VERIFIER_ROLE to new verifier", to: csm, role: VERIFIER_ROLE, account: c.newVerifier + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE to VerifierV3", + to: csm, + role: REPORT_REGULAR_WITHDRAWN_VALIDATORS_ROLE, + account: c.newVerifier + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE to Easy Track", + to: csm, + role: REPORT_SLASHED_WITHDRAWN_VALIDATORS_ROLE, + account: evmScriptExecutor + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke CREATE_NODE_OPERATOR_ROLE from old PermissionlessGate", + to: csm, + role: CREATE_NODE_OPERATOR_ROLE, + account: c.oldPermissionlessGate + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant CREATE_NODE_OPERATOR_ROLE to new PermissionlessGate", + to: csm, + role: CREATE_NODE_OPERATOR_ROLE, + account: c.newPermissionlessGate + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke START_REFERRAL_SEASON_ROLE", + to: vettedGate, + role: START_REFERRAL_SEASON_ROLE, + account: agent + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke END_REFERRAL_SEASON_ROLE", + to: vettedGate, + role: END_REFERRAL_SEASON_ROLE, + account: c.csmCommittee + }); + + items[i++] = + _registerCircuitBreakerPauserItem("CSM new verifier", g.circuitBreaker, c.newVerifier, c.csmCommittee); + items[i++] = _registerCircuitBreakerPauserItem("CSM Ejector", g.circuitBreaker, c.ejector, c.csmCommittee); + items[i++] = _registerCircuitBreakerPauserItem( + "CSM identified DVT cluster gate", g.circuitBreaker, c.identifiedDVTClusterGate, c.csmCommittee + ); + + // --- New IDVTC type --- + items[i++] = _ozGrantRoleItem({ + description: "Grant CREATE_NODE_OPERATOR_ROLE to identified DVT cluster gate", + to: csm, + role: CREATE_NODE_OPERATOR_ROLE, + account: c.identifiedDVTClusterGate + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant SET_BOND_CURVE_ROLE to identified DVT cluster gate", + to: c.accounting, + role: SET_BOND_CURVE_ROLE, + account: c.identifiedDVTClusterGate + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant MANAGE_BOND_CURVES_ROLE to identified DVT cluster curve setup", + to: c.accounting, + role: MANAGE_BOND_CURVES_ROLE, + account: c.identifiedDVTClusterCurveSetup + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant MANAGE_CURVE_PARAMETERS_ROLE to identified DVT cluster curve setup", + to: c.parametersRegistry, + role: MANAGE_CURVE_PARAMETERS_ROLE, + account: c.identifiedDVTClusterCurveSetup + }); + + // The setup contract renounces its temporary Accounting/Registry roles during execute(). + items[i++] = _item({ + description: "Execute identified DVT cluster curve setup", + to: c.identifiedDVTClusterCurveSetup, + data: abi.encodeCall(IOneShotCurveSetup.execute, ()) + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE to CSM Committee", + to: c.parametersRegistry, + role: MANAGE_GENERAL_PENALTIES_AND_CHARGES_ROLE, + account: c.csmCommittee + }); + + // --- Burner role migration --- + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke REQUEST_BURN_SHARES_ROLE from CSM Accounting", + to: g.burner, + role: REQUEST_BURN_SHARES_ROLE, + account: c.accounting + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant REQUEST_BURN_MY_STETH_ROLE to CSM Accounting", + to: g.burner, + role: REQUEST_BURN_MY_STETH_ROLE, + account: c.accounting + }); + + // --- TWG role migration --- + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke TWG full-withdrawal role from old Ejector", + to: g.triggerableWithdrawalsGateway, + role: ADD_FULL_WITHDRAWAL_REQUEST_ROLE, + account: c.oldEjector + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant TWG full-withdrawal role to new Ejector", + to: g.triggerableWithdrawalsGateway, + role: ADD_FULL_WITHDRAWAL_REQUEST_ROLE, + account: c.ejector + }); + } + + // + // Curated Module items + // + { + CuratedModuleConfig memory c = config.getCuratedModuleConfig(); + + items[i++] = _item({ + description: "Add Curated module to StakingRouter", + to: stakingRouter, + data: abi.encodeCall( + IStakingRouterUpgrade.addStakingModule, + ( + c.moduleName, + c.module, + StakingModuleConfig({ + stakeShareLimit: c.stakeShareLimit, + priorityExitShareThreshold: c.priorityExitShareThreshold, + stakingModuleFee: c.stakingModuleFee, + treasuryFee: c.treasuryFee, + maxDepositsPerBlock: c.maxDepositsPerBlock, + minDepositBlockDistance: c.minDepositBlockDistance, + withdrawalCredentialsType: 0x02 + }) + ) + ) + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant REQUEST_BURN_MY_STETH_ROLE to Curated Accounting", + to: g.burner, + role: REQUEST_BURN_MY_STETH_ROLE, + account: c.accounting + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant TWG full-withdrawal role to Curated Ejector", + to: g.triggerableWithdrawalsGateway, + role: ADD_FULL_WITHDRAWAL_REQUEST_ROLE, + account: c.ejector + }); + + items[i++] = _ozGrantRoleItem({ + description: "Grant RESUME_ROLE to agent on Curated module", + to: c.module, + role: RESUME_ROLE, + account: agent + }); + + items[i++] = _item({ + description: "Resume Curated module", to: c.module, data: abi.encodeCall(ICSModuleV3.resume, ()) + }); + + items[i++] = _ozRevokeRoleItem({ + description: "Revoke RESUME_ROLE from agent on Curated module", + to: c.module, + role: RESUME_ROLE, + account: agent + }); + + items[i++] = _item({ + description: "Update Curated HashConsensus initial epoch", + to: c.hashConsensus, + data: abi.encodeCall(IHashConsensusV3.updateInitialEpoch, (c.hashConsensusInitialEpoch)) + }); + + items[i++] = + _registerCircuitBreakerPauserItem("CuratedModule", g.circuitBreaker, c.module, c.circuitBreakerPauser); + items[i++] = _registerCircuitBreakerPauserItem( + "Curated Accounting", g.circuitBreaker, c.accounting, c.circuitBreakerPauser + ); + items[i++] = _registerCircuitBreakerPauserItem( + "Curated FeeOracle", g.circuitBreaker, c.feeOracle, c.circuitBreakerPauser + ); + items[i++] = _registerCircuitBreakerPauserItem( + "Curated Verifier", g.circuitBreaker, c.verifier, c.circuitBreakerPauser + ); + items[i++] = _registerCircuitBreakerPauserItem( + "Curated Ejector", g.circuitBreaker, c.ejector, c.circuitBreakerPauser + ); + } + + // + // Template: finish upgrade + // + + items[i++] = _item({ + description: "Call UpgradeTemplate.finishUpgrade", + to: TEMPLATE, + data: abi.encodeCall(UpgradeTemplate.finishUpgrade, ()) + }); + + if (i != DG_ITEMS_COUNT) revert InvalidItemsCount(i, DG_ITEMS_COUNT); + } + + // + // Helpers + // + + function _addNumber(string memory s, uint256 n) internal pure returns (string memory) { + return string.concat(n.toString(), ". ", s); + } + + function _addPrefixedNumber(string memory s, string memory p, uint256 n) internal pure returns (string memory) { + return string.concat(p, n.toString(), ". ", s); + } + + function _wrapItemsNumber(VoteItem[] memory items, uint256 startNum) internal pure returns (VoteItem[] memory) { + for (uint256 i = 0; i < items.length; ++i) { + uint256 num = i + startNum; + items[i].description = _addNumber(items[i].description, num); + } + return items; + } + + function _wrapItemsPrefixNumber(VoteItem[] memory items, uint256 prefixNum, uint256 startNum) + internal + pure + returns (VoteItem[] memory) + { + string memory prefix = string.concat(prefixNum.toString(), "."); + for (uint256 i = 0; i < items.length; ++i) { + uint256 num = i + startNum; + items[i].description = _addPrefixedNumber(items[i].description, prefix, num); + } + + return items; + } + + /// @dev Wrap item with prefix, add number and forwarded `forwarder` + function _wrapItemsPrefixNumberForward( + VoteItem[] memory items, + address forwarder, + uint256 prefixNum, + uint256 startNum + ) internal pure returns (VoteItem[] memory) { + string memory prefix = string.concat(prefixNum.toString(), "."); + for (uint256 i = 0; i < items.length; ++i) { + uint256 num = i + startNum; + items[i].description = _addPrefixedNumber(items[i].description, prefix, num); + items[i].call = _forwardCall(forwarder, items[i].call.to, items[i].call.data); + } + + return items; + } + + function _wrapItemsForwardPacked(VoteItem[] memory items, address forwarder, string memory description) + internal + pure + returns (VoteItem[] memory) + { + VoteItem[] memory itemsPacked = new VoteItem[](1); + CallsScriptBuilder.Context memory scriptBuilder = CallsScriptBuilder.create(); + for (uint256 i = 0; i < items.length; ++i) { + // slither-disable-next-line unused-return + scriptBuilder.addCall(items[i].call.to, items[i].call.data); + } + + itemsPacked[0].description = description; + itemsPacked[0].call = _votingCall(forwarder, abi.encodeCall(IForwarder.forward, scriptBuilder.getResult())); + + return itemsPacked; + } + + function _item(string memory description, address to, bytes memory data) internal pure returns (VoteItem memory) { + return VoteItem({description: description, call: _votingCall(to, data)}); + } + + function _registerCircuitBreakerPauserItem( + string memory label, + address circuitBreaker, + address pausable, + address pauser + ) private pure returns (VoteItem memory) { + return _item({ + description: string.concat("Register CircuitBreaker pauser for ", label), + to: circuitBreaker, + data: abi.encodeCall(ICircuitBreaker.registerPauser, (pausable, pauser)) + }); + } + + function _setMerkleGateTreePermissions(address[] memory gates) private pure returns (bytes memory permissions) { + for (uint256 i = 0; i < gates.length; ++i) { + if (gates[i] == address(0)) revert InvalidMerkleGateAddress(); + permissions = bytes.concat(permissions, bytes20(gates[i]), bytes4(IMerkleGate.setTreeParams.selector)); + } + } + + function _addETFactoryItem(string memory description, address easyTrack, address factory, bytes memory permissions) + private + pure + returns (VoteItem memory) + { + return _item(description, easyTrack, abi.encodeCall(IEasyTrack.addEVMScriptFactory, (factory, permissions))); + } + + function _delETFactoryItem(string memory description, address easyTrack, address factory) + private + pure + returns (VoteItem memory) + { + return _item(description, easyTrack, abi.encodeCall(IEasyTrack.removeEVMScriptFactory, (factory))); + } + + function _ozGrantRoleItem(string memory description, address to, bytes32 role, address account) + internal + pure + returns (VoteItem memory) + { + return _item(description, to, abi.encodeCall(IAccessControl.grantRole, (role, account))); + } + + function _ozRevokeRoleItem(string memory description, address to, bytes32 role, address account) + internal + pure + returns (VoteItem memory) + { + return _item(description, to, abi.encodeCall(IAccessControl.revokeRole, (role, account))); + } + + function _proxyUpgradeToItem(string memory description, address to, address impl) + internal + pure + returns (VoteItem memory) + { + return _item(description, to, abi.encodeCall(IOssifiableProxy.proxy__upgradeTo, (impl))); + } + + /// @dev wraps call to the IOssifiableProxy.proxy__upgradeToAndCall + function _proxyUpgradeToAndCallItem(string memory description, address to, address impl, bytes memory data) + internal + pure + returns (VoteItem memory) + { + return _item(description, to, abi.encodeCall(IOssifiableProxy.proxy__upgradeToAndCall, (impl, data, false))); + } + + /// @dev wraps call to the modified IOssifiableProxyV2.proxy__upgradeToAndCall (used in CSM/CM) + function _proxyUpgradeToAndCallV2Item(string memory description, address to, address impl, bytes memory data) + internal + pure + returns (VoteItem memory) + { + return _item(description, to, abi.encodeCall(IOssifiableProxyV2.proxy__upgradeToAndCall, (impl, data))); + } +} diff --git a/contracts/upgrade/V3Addresses.sol b/contracts/upgrade/V3Addresses.sol deleted file mode 100644 index 4021b654fc..0000000000 --- a/contracts/upgrade/V3Addresses.sol +++ /dev/null @@ -1,280 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0 -pragma solidity 0.8.25; - -import {IAccessControlEnumerable} from "@openzeppelin/contracts-v4.4/access/AccessControlEnumerable.sol"; -import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; - -interface IVaultsAdapter { - function evmScriptExecutor() external view returns (address); -} - -interface IStakingRouter is IAccessControlEnumerable { - struct StakingModule { - uint24 id; - address stakingModuleAddress; - uint16 stakingModuleFee; - uint16 treasuryFee; - uint16 stakeShareLimit; - uint8 status; - string name; - uint64 lastDepositAt; - uint256 lastDepositBlock; - uint256 exitedValidatorsCount; - uint16 priorityExitShareThreshold; - uint64 maxDepositsPerBlock; - uint64 minDepositBlockDistance; - } - - function getStakingModules() external view returns (StakingModule[] memory res); -} - -interface ICSModule { - function accounting() external view returns (address); -} - -/** - * @title V3UpgradeAddresses - * @notice Stores immutable addresses required for the V3 upgrade process. - * This contract centralizes address management for V3Template and V3VoteScript. - */ -contract V3Addresses { - - struct V3AddressesParams { - // Old implementations - address oldLocatorImpl; - address oldLidoImpl; - address oldAccountingOracleImpl; - address oldTokenRateNotifier; - - // New implementations - address newLocatorImpl; - address newLidoImpl; - address newAccountingOracleImpl; - address newTokenRateNotifier; - - // New fancy proxy and blueprint contracts - address upgradeableBeacon; - address stakingVaultImpl; - address dashboardImpl; - address gateSealForVaults; - - // Existing proxies and contracts - address kernel; - bytes32 lidoAppId; - address agent; - address locator; - address voting; - address dualGovernance; - address acl; - address resealManager; - - // EasyTrack addresses - address easyTrack; - address vaultsAdapter; - - // EasyTrack new factories - address etfAlterTiersInOperatorGrid; - address etfRegisterGroupsInOperatorGrid; - address etfRegisterTiersInOperatorGrid; - address etfUpdateGroupsShareLimitInOperatorGrid; - address etfSetJailStatusInOperatorGrid; - address etfUpdateVaultsFeesInOperatorGrid; - address etfForceValidatorExitsInVaultHub; - address etfSocializeBadDebtInVaultHub; - } - - string public constant CURATED_MODULE_NAME = "curated-onchain-v1"; - string public constant SIMPLE_DVT_MODULE_NAME = "SimpleDVT"; - string public constant CSM_MODULE_NAME = "Community Staking"; - - // - // -------- Pre-upgrade old contracts -------- - // - address public immutable OLD_LOCATOR_IMPL; - address public immutable OLD_BURNER; - address public immutable OLD_ACCOUNTING_ORACLE_IMPL; - address public immutable OLD_LIDO_IMPL; - address public immutable OLD_TOKEN_RATE_NOTIFIER; - - // - // -------- Upgraded contracts -------- - // - address public immutable LOCATOR; - address public immutable NEW_LOCATOR_IMPL; - address public immutable LIDO; - address public immutable ACCOUNTING_ORACLE; - address public immutable BURNER; - address public immutable ORACLE_REPORT_SANITY_CHECKER; - address public immutable NEW_LIDO_IMPL; - address public immutable NEW_ACCOUNTING_ORACLE_IMPL; - address public immutable NEW_TOKEN_RATE_NOTIFIER; - - // - // -------- New V3 contracts -------- - // - address public immutable ACCOUNTING; - address payable public immutable VAULT_HUB; - address public immutable PREDEPOSIT_GUARANTEE; - address public immutable OPERATOR_GRID; - address public immutable LAZY_ORACLE; - address public immutable VAULT_FACTORY; - address public immutable UPGRADEABLE_BEACON; - address public immutable STAKING_VAULT_IMPL; - address public immutable DASHBOARD_IMPL; - address public immutable GATE_SEAL; - - // - // -------- EasyTrack addresses -------- - // - - address public immutable EASY_TRACK; - - address public immutable EVM_SCRIPT_EXECUTOR; - - address public immutable VAULTS_ADAPTER; - - // ETF = EasyTrack Factory - address public immutable ETF_ALTER_TIERS_IN_OPERATOR_GRID; - address public immutable ETF_REGISTER_GROUPS_IN_OPERATOR_GRID; - address public immutable ETF_REGISTER_TIERS_IN_OPERATOR_GRID; - address public immutable ETF_SET_JAIL_STATUS_IN_OPERATOR_GRID; - address public immutable ETF_SOCIALIZE_BAD_DEBT_IN_VAULT_HUB; - address public immutable ETF_UPDATE_GROUPS_SHARE_LIMIT_IN_OPERATOR_GRID; - address public immutable ETF_UPDATE_VAULTS_FEES_IN_OPERATOR_GRID; - address public immutable ETF_FORCE_VALIDATOR_EXITS_IN_VAULT_HUB; - - // - // -------- Unchanged contracts -------- - // - address public immutable KERNEL; - bytes32 public immutable LIDO_APP_ID; - address public immutable AGENT; - address public immutable VOTING; - address public immutable DUAL_GOVERNANCE; - address public immutable ACL; - address public immutable EL_REWARDS_VAULT; - address public immutable STAKING_ROUTER; - address public immutable VALIDATORS_EXIT_BUS_ORACLE; - address public immutable WITHDRAWAL_QUEUE; - address public immutable WSTETH; - address public immutable NODE_OPERATORS_REGISTRY; - address public immutable SIMPLE_DVT; - address public immutable CSM_ACCOUNTING; - address public immutable ORACLE_DAEMON_CONFIG; - address public immutable RESEAL_MANAGER; - - constructor( - V3AddressesParams memory params - ) { - if (params.newLocatorImpl == params.oldLocatorImpl) { - revert NewAndOldLocatorImplementationsMustBeDifferent(); - } - - if (params.oldTokenRateNotifier == params.newTokenRateNotifier) { - revert OldAndNewTokenRateNotifiersMustBeDifferent(); - } - - // - // Set directly from passed parameters - // - - ILidoLocator newLocatorImpl = ILidoLocator(params.newLocatorImpl); - OLD_LOCATOR_IMPL = params.oldLocatorImpl; - OLD_ACCOUNTING_ORACLE_IMPL = params.oldAccountingOracleImpl; - OLD_LIDO_IMPL = params.oldLidoImpl; - OLD_TOKEN_RATE_NOTIFIER = params.oldTokenRateNotifier; - LOCATOR = params.locator; - NEW_LOCATOR_IMPL = params.newLocatorImpl; - NEW_LIDO_IMPL = params.newLidoImpl; - NEW_ACCOUNTING_ORACLE_IMPL = params.newAccountingOracleImpl; - NEW_TOKEN_RATE_NOTIFIER = params.newTokenRateNotifier; - KERNEL = params.kernel; - LIDO_APP_ID = params.lidoAppId; - AGENT = params.agent; - VOTING = params.voting; - DUAL_GOVERNANCE = params.dualGovernance; - ACL = params.acl; - UPGRADEABLE_BEACON = params.upgradeableBeacon; - STAKING_VAULT_IMPL = params.stakingVaultImpl; - DASHBOARD_IMPL = params.dashboardImpl; - GATE_SEAL = params.gateSealForVaults; - EVM_SCRIPT_EXECUTOR = IVaultsAdapter(params.vaultsAdapter).evmScriptExecutor(); - - EASY_TRACK = params.easyTrack; - VAULTS_ADAPTER = params.vaultsAdapter; - ETF_ALTER_TIERS_IN_OPERATOR_GRID = params.etfAlterTiersInOperatorGrid; - ETF_REGISTER_GROUPS_IN_OPERATOR_GRID = params.etfRegisterGroupsInOperatorGrid; - ETF_REGISTER_TIERS_IN_OPERATOR_GRID = params.etfRegisterTiersInOperatorGrid; - ETF_SET_JAIL_STATUS_IN_OPERATOR_GRID = params.etfSetJailStatusInOperatorGrid; - ETF_SOCIALIZE_BAD_DEBT_IN_VAULT_HUB = params.etfSocializeBadDebtInVaultHub; - ETF_UPDATE_GROUPS_SHARE_LIMIT_IN_OPERATOR_GRID = params.etfUpdateGroupsShareLimitInOperatorGrid; - ETF_UPDATE_VAULTS_FEES_IN_OPERATOR_GRID = params.etfUpdateVaultsFeesInOperatorGrid; - ETF_FORCE_VALIDATOR_EXITS_IN_VAULT_HUB = params.etfForceValidatorExitsInVaultHub; - - // - // Discovered via other contracts - // - - OLD_BURNER = ILidoLocator(params.oldLocatorImpl).burner(); - - LIDO = newLocatorImpl.lido(); - ACCOUNTING_ORACLE = newLocatorImpl.accountingOracle(); - BURNER = newLocatorImpl.burner(); - ORACLE_REPORT_SANITY_CHECKER = newLocatorImpl.oracleReportSanityChecker(); - - ACCOUNTING = newLocatorImpl.accounting(); - VAULT_HUB = payable(newLocatorImpl.vaultHub()); - VAULT_FACTORY = newLocatorImpl.vaultFactory(); - PREDEPOSIT_GUARANTEE = newLocatorImpl.predepositGuarantee(); - OPERATOR_GRID = newLocatorImpl.operatorGrid(); - LAZY_ORACLE = newLocatorImpl.lazyOracle(); - - EL_REWARDS_VAULT = newLocatorImpl.elRewardsVault(); - STAKING_ROUTER = newLocatorImpl.stakingRouter(); - VALIDATORS_EXIT_BUS_ORACLE = newLocatorImpl.validatorsExitBusOracle(); - WITHDRAWAL_QUEUE = newLocatorImpl.withdrawalQueue(); - WSTETH = newLocatorImpl.wstETH(); - ORACLE_DAEMON_CONFIG = newLocatorImpl.oracleDaemonConfig(); - RESEAL_MANAGER = params.resealManager; - - { - // Retrieve contracts with burner allowances to migrate: NOR, SDVT and CSM ACCOUNTING - bytes32 curatedHash = _hash(CURATED_MODULE_NAME); - bytes32 simpleDvtHash = _hash(SIMPLE_DVT_MODULE_NAME); - bytes32 csmHash = _hash(CSM_MODULE_NAME); - - address nodeOperatorsRegistry; - address simpleDvt; - address csmAccounting; - - IStakingRouter.StakingModule[] memory stakingModules = IStakingRouter(STAKING_ROUTER).getStakingModules(); - - for (uint256 i = 0; i < stakingModules.length; i++) { - bytes32 nameHash = _hash(stakingModules[i].name); - if (nameHash == curatedHash) { - nodeOperatorsRegistry = stakingModules[i].stakingModuleAddress; - } else if (nameHash == simpleDvtHash) { - simpleDvt = stakingModules[i].stakingModuleAddress; - } else if (nameHash == csmHash) { - csmAccounting = ICSModule(stakingModules[i].stakingModuleAddress).accounting(); - } - } - - if (nodeOperatorsRegistry == address(0)) revert StakingModuleNotFound(CURATED_MODULE_NAME); - if (simpleDvt == address(0)) revert StakingModuleNotFound(SIMPLE_DVT_MODULE_NAME); - if (csmAccounting == address(0)) revert StakingModuleNotFound(CSM_MODULE_NAME); - - NODE_OPERATORS_REGISTRY = nodeOperatorsRegistry; - SIMPLE_DVT = simpleDvt; - CSM_ACCOUNTING = csmAccounting; - } - } - - function _hash(string memory input) internal pure returns (bytes32) { - return keccak256(bytes(input)); - } - - error NewAndOldLocatorImplementationsMustBeDifferent(); - error OldAndNewTokenRateNotifiersMustBeDifferent(); - error StakingModuleNotFound(string moduleName); -} diff --git a/contracts/upgrade/V3Template.sol b/contracts/upgrade/V3Template.sol deleted file mode 100644 index f6d549d054..0000000000 --- a/contracts/upgrade/V3Template.sol +++ /dev/null @@ -1,514 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -// See contracts/COMPILERS.md -pragma solidity 0.8.25; - -import {IAccessControlEnumerable} from "@openzeppelin/contracts-v4.4/access/AccessControlEnumerable.sol"; -import {UpgradeableBeacon} from "@openzeppelin/contracts-v5.2/proxy/beacon/UpgradeableBeacon.sol"; - -import {IBurner as IBurnerWithoutAccessControl} from "contracts/common/interfaces/IBurner.sol"; -import {IVersioned} from "contracts/common/interfaces/IVersioned.sol"; -import {IOssifiableProxy} from "contracts/common/interfaces/IOssifiableProxy.sol"; -import {ILido} from "contracts/common/interfaces/ILido.sol"; - -import {VaultHub} from "contracts/0.8.25/vaults/VaultHub.sol"; -import {VaultFactory} from "contracts/0.8.25/vaults/VaultFactory.sol"; -import {OperatorGrid} from "contracts/0.8.25/vaults/OperatorGrid.sol"; -import {PausableUntilWithRoles} from "contracts/0.8.25/utils/PausableUntilWithRoles.sol"; - -import {V3Addresses} from "./V3Addresses.sol"; - -interface IBaseOracle is IAccessControlEnumerable, IVersioned { - function getConsensusContract() external view returns (address); -} - -interface IEasyTrack { - function getEVMScriptFactories() external view returns (address[] memory); -} - -interface IStakingRouter is IAccessControlEnumerable { - function REPORT_REWARDS_MINTED_ROLE() external view returns (bytes32); -} - -interface IBurner is IBurnerWithoutAccessControl, IAccessControlEnumerable { - function REQUEST_BURN_SHARES_ROLE() external view returns (bytes32); - function isMigrationAllowed() external view returns (bool); -} - -interface ILidoWithFinalizeUpgrade is ILido { - function finalizeUpgrade_v3(address _oldBurner, address[] calldata _contractsWithBurnerAllowances, uint256 _initialMaxExternalRatioBP) external; -} - -interface IAccountingOracle is IBaseOracle { - function finalizeUpgrade_v4(uint256 consensusVersion) external; -} - -interface IAragonKernel { - function getApp(bytes32 _namespace, bytes32 _appId) external view returns (address); - function APP_BASES_NAMESPACE() external view returns (bytes32); -} - -interface IWithdrawalsManagerProxy { - function proxy_getAdmin() external view returns (address); - function implementation() external view returns (address); -} - -interface IOracleReportSanityChecker is IAccessControlEnumerable { - function ALL_LIMITS_MANAGER_ROLE() external view returns (bytes32); - function EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE() external view returns (bytes32); - function APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE() external view returns (bytes32); - function ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE() external view returns (bytes32); - function SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE() external view returns (bytes32); - function MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE() external view returns (bytes32); - function MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE() external view returns (bytes32); - function MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE() external view returns (bytes32); - function REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE() external view returns (bytes32); - function MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE() external view returns (bytes32); - function SECOND_OPINION_MANAGER_ROLE() external view returns (bytes32); - function INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE() external view returns (bytes32); -} - -interface ITokenRateNotifier { - function owner() external view returns (address); - function observers(uint256 index) external view returns (address); - function observersLength() external view returns (uint256); -} - -interface ILazyOracle { - function UPDATE_SANITY_PARAMS_ROLE() external view returns (bytes32); -} - - -/** -* @title Lido V3 Upgrade Template -* -* @dev Must be used by means of two calls: -* - `startUpgrade()` before upgrading LidoLocator and before everything else -* - `finishUpgrade()` as the last step of the upgrade -*/ -contract V3Template is V3Addresses { - // - // Events - // - - event UpgradeStarted(); - event UpgradeFinished(); - - // - // -------- Constants -------- - // - - uint256 public constant EXPECTED_FINAL_LIDO_VERSION = 3; - uint256 public constant EXPECTED_FINAL_ACCOUNTING_ORACLE_VERSION = 4; - uint256 public constant EXPECTED_FINAL_ACCOUNTING_ORACLE_CONSENSUS_VERSION = 5; - - bytes32 public constant DEFAULT_ADMIN_ROLE = 0x00; - - // Timestamp since which startUpgrade() - // This behavior is introduced to disarm the template if the upgrade voting creation or enactment - // didn't happen in proper time period - uint256 public immutable EXPIRE_SINCE_INCLUSIVE; - - // Initial value of upgradeBlockNumber storage variable - uint256 public constant UPGRADE_NOT_STARTED = 0; - - uint256 public constant INFINITE_ALLOWANCE = type(uint256).max; - - // - // Structured storage - // - - uint256 public upgradeBlockNumber = UPGRADE_NOT_STARTED; - bool public isUpgradeFinished; - uint256 public initialOldBurnerStethSharesBalance; - uint256 public initialTotalShares; - uint256 public initialTotalPooledEther; - address[] public contractsWithBurnerAllowances; - uint256 public immutable INITIAL_MAX_EXTERNAL_RATIO_BP; - - // - // Slots for transient storage - // - - // Slot for the upgrade started flag - // keccak256("V3Template.upgradeStartedFlag") - bytes32 public constant UPGRADE_STARTED_SLOT = - 0x058d69f67a3d86c424c516d23a070ff8bed34431617274caa2049bd702675e3f; - - - /// @param _params Params required to initialize the addresses contract - /// @param _expireSinceInclusive Unix timestamp after which upgrade actions revert - /// @param _initialMaxExternalRatioBP Initial maximum external ratio in basis points - constructor(V3AddressesParams memory _params, uint256 _expireSinceInclusive, uint256 _initialMaxExternalRatioBP) V3Addresses(_params) { - EXPIRE_SINCE_INCLUSIVE = _expireSinceInclusive; - INITIAL_MAX_EXTERNAL_RATIO_BP = _initialMaxExternalRatioBP; - contractsWithBurnerAllowances.push(WITHDRAWAL_QUEUE); - // NB: NOR and SIMPLE_DVT allowances are set to 0 in TW upgrade, so they are not migrated - contractsWithBurnerAllowances.push(CSM_ACCOUNTING); - } - - /// @notice Must be called before LidoLocator is upgraded - function startUpgrade() external { - if (msg.sender != AGENT) revert OnlyAgentCanUpgrade(); - if (block.timestamp >= EXPIRE_SINCE_INCLUSIVE) revert Expired(); - if (isUpgradeFinished) revert UpgradeAlreadyFinished(); - if (_isStartCalledInThisTx()) revert StartAlreadyCalledInThisTx(); - if (upgradeBlockNumber != UPGRADE_NOT_STARTED) revert UpgradeAlreadyStarted(); - - assembly { tstore(UPGRADE_STARTED_SLOT, 1) } - upgradeBlockNumber = block.number; - - initialTotalShares = ILidoWithFinalizeUpgrade(LIDO).getTotalShares(); - initialTotalPooledEther = ILidoWithFinalizeUpgrade(LIDO).getTotalPooledEther(); - - _assertPreUpgradeState(); - - // Save initial state for the check after burner migration - initialOldBurnerStethSharesBalance = ILidoWithFinalizeUpgrade(LIDO).sharesOf(OLD_BURNER); - - emit UpgradeStarted(); - } - - function finishUpgrade() external { - if (msg.sender != AGENT) revert OnlyAgentCanUpgrade(); - if (isUpgradeFinished) revert UpgradeAlreadyFinished(); - if (!_isStartCalledInThisTx()) revert StartAndFinishMustBeInSameTx(); - - isUpgradeFinished = true; - - ILidoWithFinalizeUpgrade(LIDO).finalizeUpgrade_v3(OLD_BURNER, contractsWithBurnerAllowances, INITIAL_MAX_EXTERNAL_RATIO_BP); - - IAccountingOracle(ACCOUNTING_ORACLE).finalizeUpgrade_v4(EXPECTED_FINAL_ACCOUNTING_ORACLE_CONSENSUS_VERSION); - - _assertPostUpgradeState(); - - emit UpgradeFinished(); - } - - function _assertPreUpgradeState() internal view { - // Check initial implementations of the proxies to be upgraded - _assertProxyImplementation(IOssifiableProxy(LOCATOR), OLD_LOCATOR_IMPL); - _assertProxyImplementation(IOssifiableProxy(ACCOUNTING_ORACLE), OLD_ACCOUNTING_ORACLE_IMPL); - _assertAragonKernelImplementation(IAragonKernel(KERNEL), OLD_LIDO_IMPL); - - // Check allowances of the old burner - address[] memory contractsWithBurnerAllowances_ = contractsWithBurnerAllowances; - for (uint256 i = 0; i < contractsWithBurnerAllowances_.length; ++i) { - if (ILidoWithFinalizeUpgrade(LIDO).allowance(contractsWithBurnerAllowances_[i], OLD_BURNER) != INFINITE_ALLOWANCE) { - revert IncorrectBurnerAllowance(contractsWithBurnerAllowances_[i], OLD_BURNER); - } - } - if (ILidoWithFinalizeUpgrade(LIDO).allowance(NODE_OPERATORS_REGISTRY, OLD_BURNER) != 0) { - revert IncorrectBurnerAllowance(NODE_OPERATORS_REGISTRY, OLD_BURNER); - } - if (ILidoWithFinalizeUpgrade(LIDO).allowance(SIMPLE_DVT, OLD_BURNER) != 0) { - revert IncorrectBurnerAllowance(SIMPLE_DVT, OLD_BURNER); - } - - if (!IBurner(BURNER).isMigrationAllowed()) revert BurnerMigrationNotAllowed(); - } - - function _assertPostUpgradeState() internal view { - if ( - ILidoWithFinalizeUpgrade(LIDO).getTotalShares() != initialTotalShares || - ILidoWithFinalizeUpgrade(LIDO).getTotalPooledEther() != initialTotalPooledEther - ) { - revert TotalSharesOrPooledEtherChanged(); - } - - _assertProxyImplementation(IOssifiableProxy(LOCATOR), NEW_LOCATOR_IMPL); - _assertProxyImplementation(IOssifiableProxy(ACCOUNTING_ORACLE), NEW_ACCOUNTING_ORACLE_IMPL); - - _assertAragonKernelImplementation(IAragonKernel(KERNEL), NEW_LIDO_IMPL); - - _assertContractVersion(IVersioned(LIDO), EXPECTED_FINAL_LIDO_VERSION); - _assertContractVersion(IVersioned(ACCOUNTING_ORACLE), EXPECTED_FINAL_ACCOUNTING_ORACLE_VERSION); - - _assertFinalACL(); - - _checkTokenRateNotifierMigratedCorrectly(); - _checkBurnerMigratedCorrectly(); - - if (VaultFactory(VAULT_FACTORY).BEACON() != UPGRADEABLE_BEACON) { - revert IncorrectVaultFactoryBeacon(VAULT_FACTORY, UPGRADEABLE_BEACON); - } - if (VaultFactory(VAULT_FACTORY).DASHBOARD_IMPL() != DASHBOARD_IMPL) { - revert IncorrectVaultFactoryDashboardImplementation(VAULT_FACTORY, DASHBOARD_IMPL); - } - if (UpgradeableBeacon(UPGRADEABLE_BEACON).owner() != AGENT) { - revert IncorrectUpgradeableBeaconOwner(UPGRADEABLE_BEACON, AGENT); - } - if (UpgradeableBeacon(UPGRADEABLE_BEACON).implementation() != STAKING_VAULT_IMPL) { - revert IncorrectUpgradeableBeaconImplementation(UPGRADEABLE_BEACON, STAKING_VAULT_IMPL); - } - } - - function _assertFinalACL() internal view { - // Burner - bytes32 requestBurnSharesRole = IBurner(BURNER).REQUEST_BURN_SHARES_ROLE(); - _assertZeroOZRoleHolders(OLD_BURNER, requestBurnSharesRole); - - _assertProxyAdmin(IOssifiableProxy(BURNER), AGENT); - _assertSingleOZRoleHolder(BURNER, DEFAULT_ADMIN_ROLE, AGENT); - { - address[] memory holders = new address[](2); - holders[0] = ACCOUNTING; - holders[1] = CSM_ACCOUNTING; - _assertOZRoleHolders(BURNER, requestBurnSharesRole, holders); - } - - // VaultHub - _assertProxyAdmin(IOssifiableProxy(VAULT_HUB), AGENT); - _assertSingleOZRoleHolder(VAULT_HUB, DEFAULT_ADMIN_ROLE, AGENT); - - _assertSingleOZRoleHolder(VAULT_HUB, VaultHub(VAULT_HUB).VALIDATOR_EXIT_ROLE(), VAULTS_ADAPTER); - _assertSingleOZRoleHolder(VAULT_HUB, VaultHub(VAULT_HUB).BAD_DEBT_MASTER_ROLE(), VAULTS_ADAPTER); - _assertZeroOZRoleHolders(VAULT_HUB, VaultHub(VAULT_HUB).REDEMPTION_MASTER_ROLE()); - _assertZeroOZRoleHolders(VAULT_HUB, VaultHub(VAULT_HUB).VAULT_MASTER_ROLE()); - _assertTwoOZRoleHolders(VAULT_HUB, PausableUntilWithRoles(VAULT_HUB).PAUSE_ROLE(), GATE_SEAL, RESEAL_MANAGER); - _assertSingleOZRoleHolder(VAULT_HUB, PausableUntilWithRoles(VAULT_HUB).RESUME_ROLE(), RESEAL_MANAGER); - - // OperatorGrid - _assertProxyAdmin(IOssifiableProxy(OPERATOR_GRID), AGENT); - _assertSingleOZRoleHolder(OPERATOR_GRID, DEFAULT_ADMIN_ROLE, AGENT); - _assertTwoOZRoleHolders(OPERATOR_GRID, OperatorGrid(OPERATOR_GRID).REGISTRY_ROLE(), EVM_SCRIPT_EXECUTOR, VAULTS_ADAPTER); - - // LazyOracle - _assertProxyAdmin(IOssifiableProxy(LAZY_ORACLE), AGENT); - _assertSingleOZRoleHolder(LAZY_ORACLE, DEFAULT_ADMIN_ROLE, AGENT); - _assertZeroOZRoleHolders(LAZY_ORACLE, ILazyOracle(LAZY_ORACLE).UPDATE_SANITY_PARAMS_ROLE()); - - // AccountingOracle - _assertProxyAdmin(IOssifiableProxy(ACCOUNTING_ORACLE), AGENT); - _assertSingleOZRoleHolder(ACCOUNTING_ORACLE, DEFAULT_ADMIN_ROLE, AGENT); - - // OracleReportSanityChecker - IOracleReportSanityChecker checker = IOracleReportSanityChecker(ORACLE_REPORT_SANITY_CHECKER); - _assertSingleOZRoleHolder(ORACLE_REPORT_SANITY_CHECKER, DEFAULT_ADMIN_ROLE, AGENT); - bytes32[12] memory roles = [ - checker.ALL_LIMITS_MANAGER_ROLE(), - checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), - checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), - checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), - checker.SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE(), - checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), - checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), - checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), - checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), - checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), - checker.SECOND_OPINION_MANAGER_ROLE(), - checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE() - ]; - for (uint256 i = 0; i < roles.length; ++i) { - _assertZeroOZRoleHolders(ORACLE_REPORT_SANITY_CHECKER, roles[i]); - } - - // Accounting - _assertProxyAdmin(IOssifiableProxy(ACCOUNTING), AGENT); - - // PredepositGuarantee - _assertProxyAdmin(IOssifiableProxy(PREDEPOSIT_GUARANTEE), AGENT); - _assertSingleOZRoleHolder(PREDEPOSIT_GUARANTEE, DEFAULT_ADMIN_ROLE, AGENT); - _assertTwoOZRoleHolders(PREDEPOSIT_GUARANTEE, PausableUntilWithRoles(PREDEPOSIT_GUARANTEE).PAUSE_ROLE(), GATE_SEAL, RESEAL_MANAGER); - _assertSingleOZRoleHolder(PREDEPOSIT_GUARANTEE, PausableUntilWithRoles(PREDEPOSIT_GUARANTEE).RESUME_ROLE(), RESEAL_MANAGER); - - // StakingRouter - bytes32 reportRewardsMintedRole = IStakingRouter(STAKING_ROUTER).REPORT_REWARDS_MINTED_ROLE(); - _assertSingleOZRoleHolder(STAKING_ROUTER, reportRewardsMintedRole, ACCOUNTING); - - _assertEasyTrackFactoriesAdded(); - } - - function _assertEasyTrackFactoriesAdded() internal view { - IEasyTrack easyTrack = IEasyTrack(EASY_TRACK); - address[] memory factories = easyTrack.getEVMScriptFactories(); - - // The expected order of the last 8 EasyTrack factories - address[8] memory expectedFactories = [ - ETF_ALTER_TIERS_IN_OPERATOR_GRID, - ETF_REGISTER_GROUPS_IN_OPERATOR_GRID, - ETF_REGISTER_TIERS_IN_OPERATOR_GRID, - ETF_UPDATE_GROUPS_SHARE_LIMIT_IN_OPERATOR_GRID, - ETF_SET_JAIL_STATUS_IN_OPERATOR_GRID, - ETF_UPDATE_VAULTS_FEES_IN_OPERATOR_GRID, - ETF_FORCE_VALIDATOR_EXITS_IN_VAULT_HUB, - ETF_SOCIALIZE_BAD_DEBT_IN_VAULT_HUB - ]; - - uint256 numFactories = factories.length; - if (numFactories < expectedFactories.length) { - revert UnexpectedEasyTrackFactories(); - } - - for (uint256 i = 0; i < expectedFactories.length; ++i) { - if (factories[numFactories - expectedFactories.length + i] != expectedFactories[i]) { - revert UnexpectedEasyTrackFactories(); - } - } - } - - function _checkTokenRateNotifierMigratedCorrectly() internal view { - ITokenRateNotifier oldNotifier = ITokenRateNotifier(OLD_TOKEN_RATE_NOTIFIER); - ITokenRateNotifier newNotifier = ITokenRateNotifier(NEW_TOKEN_RATE_NOTIFIER); - - if (newNotifier.owner() != AGENT) { - revert IncorrectTokenRateNotifierOwnerMigration(NEW_TOKEN_RATE_NOTIFIER, AGENT); - } - - if (oldNotifier.observersLength() != newNotifier.observersLength()) { - revert IncorrectTokenRateNotifierObserversLengthMigration(); - } - - for (uint256 i = 0; i < oldNotifier.observersLength(); i++) { - if (oldNotifier.observers(i) != newNotifier.observers(i)) { - revert IncorrectTokenRateNotifierObserversMigration(); - } - } - } - - function _checkBurnerMigratedCorrectly() internal view { - if (IBurner(OLD_BURNER).getCoverSharesBurnt() != IBurner(BURNER).getCoverSharesBurnt()) { - revert IncorrectBurnerSharesMigration("Cover shares burnt mismatch"); - } - - if (IBurner(OLD_BURNER).getNonCoverSharesBurnt() != IBurner(BURNER).getNonCoverSharesBurnt()) { - revert IncorrectBurnerSharesMigration("Non-cover shares burnt mismatch"); - } - - (uint256 oldCoverShares, uint256 oldNonCoverShares) = IBurner(OLD_BURNER).getSharesRequestedToBurn(); - (uint256 newCoverShares, uint256 newNonCoverShares) = IBurner(BURNER).getSharesRequestedToBurn(); - if (oldCoverShares != newCoverShares) { - revert IncorrectBurnerSharesMigration("Cover shares requested to burn mismatch"); - } - - if (oldNonCoverShares != newNonCoverShares) { - revert IncorrectBurnerSharesMigration("Non-cover shares requested to burn mismatch"); - } - - if (ILidoWithFinalizeUpgrade(LIDO).balanceOf(OLD_BURNER) != 0) { - revert IncorrectBurnerSharesMigration("Old burner stETH balance is not zero"); - } - - if (ILidoWithFinalizeUpgrade(LIDO).sharesOf(BURNER) != initialOldBurnerStethSharesBalance) { - revert IncorrectBurnerSharesMigration("New burner stETH balance mismatch"); - } - - if (IBurner(BURNER).isMigrationAllowed()) { - revert IncorrectBurnerSharesMigration("Burner migration is still allowed"); - } - - address[] memory contractsWithBurnerAllowances_ = contractsWithBurnerAllowances; - for (uint256 i = 0; i < contractsWithBurnerAllowances_.length; i++) { - if (ILidoWithFinalizeUpgrade(LIDO).allowance(contractsWithBurnerAllowances_[i], OLD_BURNER) != 0) { - revert IncorrectBurnerAllowance(contractsWithBurnerAllowances_[i], OLD_BURNER); - } - if (ILidoWithFinalizeUpgrade(LIDO).allowance(contractsWithBurnerAllowances_[i], BURNER) != INFINITE_ALLOWANCE) { - revert IncorrectBurnerAllowance(contractsWithBurnerAllowances_[i], BURNER); - } - } - - // NO and SimpleDVT new Burner allowances are to be zero the same as old Burner on pre upgrade state - if (ILidoWithFinalizeUpgrade(LIDO).allowance(NODE_OPERATORS_REGISTRY, BURNER) != 0) { - revert IncorrectBurnerAllowance(NODE_OPERATORS_REGISTRY, BURNER); - } - if (ILidoWithFinalizeUpgrade(LIDO).allowance(SIMPLE_DVT, BURNER) != 0) { - revert IncorrectBurnerAllowance(SIMPLE_DVT, BURNER); - } - } - - function _assertProxyAdmin(IOssifiableProxy _proxy, address _admin) internal view { - if (_proxy.proxy__getAdmin() != _admin) revert IncorrectProxyAdmin(address(_proxy)); - } - - function _assertProxyImplementation(IOssifiableProxy _proxy, address _implementation) internal view { - address actualImplementation = _proxy.proxy__getImplementation(); - if (actualImplementation != _implementation) { - revert IncorrectProxyImplementation(address(_proxy), actualImplementation); - } - } - - function _assertZeroOZRoleHolders(address _accessControlled, bytes32 _role) internal view { - IAccessControlEnumerable accessControlled = IAccessControlEnumerable(_accessControlled); - if (accessControlled.getRoleMemberCount(_role) != 0) { - revert NonZeroRoleHolders(address(accessControlled), _role); - } - } - - function _assertSingleOZRoleHolder( - address _accessControlled, bytes32 _role, address _holder - ) internal view { - IAccessControlEnumerable accessControlled = IAccessControlEnumerable(_accessControlled); - if (accessControlled.getRoleMemberCount(_role) != 1 - || accessControlled.getRoleMember(_role, 0) != _holder - ) { - revert IncorrectOZAccessControlRoleHolders(address(accessControlled), _role); - } - } - - function _assertTwoOZRoleHolders( - address _accessControlled, bytes32 _role, address _holder1, address _holder2 - ) internal view { - address[] memory holders = new address[](2); - holders[0] = _holder1; - holders[1] = _holder2; - _assertOZRoleHolders(_accessControlled, _role, holders); - } - - function _assertOZRoleHolders( - address _accessControlled, bytes32 _role, address[] memory _holders - ) internal view { - IAccessControlEnumerable accessControlled = IAccessControlEnumerable(_accessControlled); - if (accessControlled.getRoleMemberCount(_role) != _holders.length) { - revert IncorrectOZAccessControlRoleHolders(address(accessControlled), _role); - } - for (uint256 i = 0; i < _holders.length; i++) { - if (accessControlled.getRoleMember(_role, i) != _holders[i]) { - revert IncorrectOZAccessControlRoleHolders(address(accessControlled), _role); - } - } - } - - function _assertAragonKernelImplementation(IAragonKernel _kernel, address _implementation) internal view { - if (_kernel.getApp(_kernel.APP_BASES_NAMESPACE(), LIDO_APP_ID) != _implementation) { - revert IncorrectAragonKernelImplementation(address(_kernel), _implementation); - } - } - - function _assertContractVersion(IVersioned _versioned, uint256 _expectedVersion) internal view { - if (_versioned.getContractVersion() != _expectedVersion) { - revert InvalidContractVersion(address(_versioned), _expectedVersion); - } - } - - function _isStartCalledInThisTx() internal view returns (bool isStartCalledInThisTx) { - assembly { - isStartCalledInThisTx := tload(UPGRADE_STARTED_SLOT) - } - } - - error OnlyAgentCanUpgrade(); - error UpgradeAlreadyStarted(); - error UpgradeAlreadyFinished(); - error IncorrectProxyAdmin(address proxy); - error IncorrectProxyImplementation(address proxy, address implementation); - error InvalidContractVersion(address contractAddress, uint256 actualVersion); - error IncorrectOZAccessControlRoleHolders(address contractAddress, bytes32 role); - error NonZeroRoleHolders(address contractAddress, bytes32 role); - error IncorrectAragonKernelImplementation(address kernel, address implementation); - error StartAndFinishMustBeInSameTx(); - error StartAlreadyCalledInThisTx(); - error Expired(); - error IncorrectBurnerSharesMigration(string reason); - error IncorrectBurnerAllowance(address contractAddress, address burner); - error BurnerMigrationNotAllowed(); - error IncorrectVaultFactoryBeacon(address factory, address beacon); - error IncorrectVaultFactoryDashboardImplementation(address factory, address delegation); - error IncorrectUpgradeableBeaconOwner(address beacon, address owner); - error IncorrectUpgradeableBeaconImplementation(address beacon, address implementation); - error TotalSharesOrPooledEtherChanged(); - error UnexpectedEasyTrackFactories(); - error IncorrectTokenRateNotifierOwnerMigration(address notifier, address owner); - error IncorrectTokenRateNotifierObserversLengthMigration(); - error IncorrectTokenRateNotifierObserversMigration(); -} diff --git a/contracts/upgrade/V3TemporaryAdmin.sol b/contracts/upgrade/V3TemporaryAdmin.sol deleted file mode 100644 index bf2c5ac507..0000000000 --- a/contracts/upgrade/V3TemporaryAdmin.sol +++ /dev/null @@ -1,271 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -// See contracts/COMPILERS.md -pragma solidity 0.8.25; - -import {IAccessControl} from "@openzeppelin/contracts-v4.4/access/AccessControl.sol"; - -interface IVaultHub { - function VAULT_MASTER_ROLE() external view returns (bytes32); - function VALIDATOR_EXIT_ROLE() external view returns (bytes32); - function BAD_DEBT_MASTER_ROLE() external view returns (bytes32); -} - -interface IPausableUntilWithRoles { - function PAUSE_ROLE() external view returns (bytes32); - function RESUME_ROLE() external view returns (bytes32); -} - -interface IOperatorGrid { - function REGISTRY_ROLE() external view returns (bytes32); -} - -interface IBurner { - function REQUEST_BURN_SHARES_ROLE() external view returns (bytes32); -} - -interface IStakingRouter { - struct StakingModule { - uint24 id; - address stakingModuleAddress; - uint16 stakingModuleFee; - uint16 treasuryFee; - uint16 stakeShareLimit; - uint8 status; - string name; - uint64 lastDepositAt; - uint256 lastDepositBlock; - uint256 exitedValidatorsCount; - uint16 priorityExitShareThreshold; - uint64 maxDepositsPerBlock; - uint64 minDepositBlockDistance; - } - - function getStakingModules() external view returns (StakingModule[] memory res); -} - -interface ICSModule { - function accounting() external view returns (address); -} - -interface IVaultsAdapter { - function evmScriptExecutor() external view returns (address); -} - -interface ITokenRateNotifier { - function observers(uint256 index) external view returns (address); - function observersLength() external view returns (uint256); - function addObserver(address observer) external; - function transferOwnership(address newOwner) external; -} - -interface ILidoLocator { - function vaultHub() external view returns (address); - function predepositGuarantee() external view returns (address); - function lazyOracle() external view returns (address); - function operatorGrid() external view returns (address); - function burner() external view returns (address); - function accounting() external view returns (address); - function stakingRouter() external view returns (address); - function vaultFactory() external view returns (address); - function postTokenRebaseReceiver() external view returns (address); -} - -/** - * @title V3TemporaryAdmin - * @notice Auxiliary contract that serves as temporary admin during deployment - * @dev Used to perform intermediate admin tasks (like setting PAUSE_ROLE for gateSeal) - * and then transfer admin role to the final agent, reducing deployer privileges - */ -contract V3TemporaryAdmin { - bytes32 public constant DEFAULT_ADMIN_ROLE = 0x00; - string public constant CSM_MODULE_NAME = "Community Staking"; - - address public immutable AGENT; - - bool public isSetupComplete; - - constructor(address _agent) { - if (_agent == address(0)) revert ZeroAddress(); - AGENT = _agent; - } - - /** - * @notice Get the CSM accounting address from the staking router - * @param _stakingRouter The StakingRouter contract address - * @return The address of the CSM accounting contract - */ - function getCsmAccountingAddress(address _stakingRouter) public view returns (address) { - if (_stakingRouter == address(0)) revert ZeroStakingRouter(); - IStakingRouter.StakingModule[] memory stakingModules = IStakingRouter(_stakingRouter).getStakingModules(); - - bytes32 csmModuleNameHash = keccak256(bytes(CSM_MODULE_NAME)); - for (uint256 i = 0; i < stakingModules.length; i++) { - if (keccak256(bytes(stakingModules[i].name)) == csmModuleNameHash) { - return ICSModule(stakingModules[i].stakingModuleAddress).accounting(); - } - } - - revert CsmModuleNotFound(); - } - - /** - * @notice Complete setup for all contracts - grants all roles and transfers admin to agent - * @dev This is the main external function that should be called after deployment - * @param _lidoLocatorImpl The new LidoLocator implementation address - * @param _vaultsAdapter The vaults' adapter address for EasyTrack - * @param _gateSeal The GateSeal contract address - * @param _resealManager The ResealManager for extra pause/resume roles - * @param _oldTokenRateNotifier The old TokenRateNotifier contract address - */ - function completeSetup( - address _lidoLocatorImpl, - address _vaultsAdapter, - address _gateSeal, - address _resealManager, - address _oldTokenRateNotifier - ) external { - if (isSetupComplete) revert SetupAlreadyCompleted(); - if (_lidoLocatorImpl == address(0)) revert ZeroLidoLocator(); - if (_vaultsAdapter == address(0)) revert ZeroVaultsAdapter(); - - isSetupComplete = true; - - ILidoLocator locator = ILidoLocator(_lidoLocatorImpl); - - address csmAccounting = getCsmAccountingAddress(locator.stakingRouter()); - - address vaultHub = locator.vaultHub(); - address operatorGrid = locator.operatorGrid(); - address burner = locator.burner(); - address predepositGuarantee = locator.predepositGuarantee(); - address tokenRateNotifier = locator.postTokenRebaseReceiver(); - - _setupPredepositGuarantee(predepositGuarantee, _gateSeal, _resealManager); - _setupOperatorGrid(operatorGrid, IVaultsAdapter(_vaultsAdapter).evmScriptExecutor(), _vaultsAdapter); - _setupBurner(burner, locator.accounting(), csmAccounting); - _setupVaultHub(vaultHub, _vaultsAdapter, _gateSeal, _resealManager); - _migrateTokenRateNotifier(_oldTokenRateNotifier, tokenRateNotifier); - - emit SetupCompleted(vaultHub, operatorGrid, burner, predepositGuarantee, tokenRateNotifier); - } - - /** - * @notice Setup VaultHub with all required roles and transfer admin to agent - * @param _vaultHub The VaultHub contract address - * @param _vaultsAdapter The vaults' adapter address - * @param _gateSeal The GateSeal contract address - * @param _resealManager The ResealManager contract address that can pause and resume - */ - function _setupVaultHub( - address _vaultHub, - address _vaultsAdapter, - address _gateSeal, - address _resealManager - ) private { - // Get roles from the contract - bytes32 pauseRole = IPausableUntilWithRoles(_vaultHub).PAUSE_ROLE(); - bytes32 resumeRole = IPausableUntilWithRoles(_vaultHub).RESUME_ROLE(); - bytes32 validatorExitRole = IVaultHub(_vaultHub).VALIDATOR_EXIT_ROLE(); - bytes32 badDebtMasterRole = IVaultHub(_vaultHub).BAD_DEBT_MASTER_ROLE(); - - IAccessControl(_vaultHub).grantRole(pauseRole, _gateSeal); - IAccessControl(_vaultHub).grantRole(pauseRole, _resealManager); - IAccessControl(_vaultHub).grantRole(resumeRole, _resealManager); - - IAccessControl(_vaultHub).grantRole(validatorExitRole, _vaultsAdapter); - IAccessControl(_vaultHub).grantRole(badDebtMasterRole, _vaultsAdapter); - - _transferAdminToAgent(_vaultHub); - } - - /** - * @notice Setup PredepositGuarantee with PAUSE_ROLE for gateSeal and transfer admin to agent - * @param _predepositGuarantee The PredepositGuarantee contract address - * @param _gateSeal The GateSeal contract address - * @param _resealManager The ResealManager contract address that can pause and resume - */ - function _setupPredepositGuarantee( - address _predepositGuarantee, - address _gateSeal, - address _resealManager - ) private { - bytes32 pauseRole = IPausableUntilWithRoles(_predepositGuarantee).PAUSE_ROLE(); - bytes32 resumeRole = IPausableUntilWithRoles(_predepositGuarantee).RESUME_ROLE(); - - IAccessControl(_predepositGuarantee).grantRole(pauseRole, _gateSeal); - IAccessControl(_predepositGuarantee).grantRole(pauseRole, _resealManager); - IAccessControl(_predepositGuarantee).grantRole(resumeRole, _resealManager); - - _transferAdminToAgent(_predepositGuarantee); - } - - /** - * @notice Setup OperatorGrid with required roles and transfer admin to agent - * @param _operatorGrid The OperatorGrid contract address - * @param _evmScriptExecutor The EVM script executor address - * @param _vaultsAdapter The vaults' adapter address - */ - function _setupOperatorGrid(address _operatorGrid, address _evmScriptExecutor, address _vaultsAdapter) private { - bytes32 registryRole = IOperatorGrid(_operatorGrid).REGISTRY_ROLE(); - IAccessControl(_operatorGrid).grantRole(registryRole, _evmScriptExecutor); - IAccessControl(_operatorGrid).grantRole(registryRole, _vaultsAdapter); - _transferAdminToAgent(_operatorGrid); - } - - /** - * @notice Setup Burner with required roles and transfer admin to agent - * @param _burner The Burner contract address - * @param _accounting The Accounting contract address - * @param _csmAccounting The CSM Accounting contract address - */ - function _setupBurner( - address _burner, - address _accounting, - address _csmAccounting - ) private { - // Get role from the contract - bytes32 requestBurnSharesRole = IBurner(_burner).REQUEST_BURN_SHARES_ROLE(); - - IAccessControl(_burner).grantRole(requestBurnSharesRole, _accounting); - IAccessControl(_burner).grantRole(requestBurnSharesRole, _csmAccounting); - - _transferAdminToAgent(_burner); - } - - function _migrateTokenRateNotifier(address _oldTokenRateNotifier, address _newTokenRateNotifier) private { - ITokenRateNotifier oldNotifier = ITokenRateNotifier(_oldTokenRateNotifier); - ITokenRateNotifier newNotifier = ITokenRateNotifier(_newTokenRateNotifier); - - assert(newNotifier.observersLength() == 0); - uint256 observersLength = oldNotifier.observersLength(); - - for (uint256 i = 0; i < observersLength; i++) { - address observer = oldNotifier.observers(i); - newNotifier.addObserver(observer); - } - - newNotifier.transferOwnership(AGENT); - } - - function _transferAdminToAgent(address _contract) private { - IAccessControl(_contract).grantRole(DEFAULT_ADMIN_ROLE, AGENT); - IAccessControl(_contract).renounceRole(DEFAULT_ADMIN_ROLE, address(this)); - } - - error ZeroAddress(); - error ZeroLidoLocator(); - error ZeroStakingRouter(); - error ZeroVaultsAdapter(); - error CsmModuleNotFound(); - error SetupAlreadyCompleted(); - - event SetupCompleted( - address vaultHub, - address operatorGrid, - address burner, - address predepositGuarantee, - address newTokenRateNotifier - ); -} diff --git a/contracts/upgrade/V3VoteScript.sol b/contracts/upgrade/V3VoteScript.sol deleted file mode 100644 index 84cea64886..0000000000 --- a/contracts/upgrade/V3VoteScript.sol +++ /dev/null @@ -1,421 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0 -pragma solidity 0.8.25; - -import {IAccessControl} from "@openzeppelin/contracts-v5.2/access/IAccessControl.sol"; - -import {IBurner} from "contracts/common/interfaces/IBurner.sol"; -import {IOssifiableProxy} from "contracts/common/interfaces/IOssifiableProxy.sol"; - -import {OmnibusBase} from "./utils/OmnibusBase.sol"; -import {V3Template} from "./V3Template.sol"; - -import {OperatorGrid} from "contracts/0.8.25/vaults/OperatorGrid.sol"; - -interface ITimeConstraints { - function checkTimeAfterTimestampAndEmit(uint40 timestamp) external; - function checkTimeBeforeTimestampAndEmit(uint40 timestamp) external; - function checkTimeWithinDayTimeAndEmit(uint32 startDayTime, uint32 endDayTime) external; -} - -interface IEasyTrack { - function addEVMScriptFactory(address _evmScriptFactory, bytes memory _permissions) external; -} - -interface IKernel { - function setApp(bytes32 _namespace, bytes32 _appId, address _app) external; - function APP_BASES_NAMESPACE() external view returns (bytes32); -} - -interface IOracleDaemonConfig { - function CONFIG_MANAGER_ROLE() external view returns (bytes32); - function set(string calldata _key, bytes calldata _value) external; -} - -interface IStakingRouter { - function REPORT_REWARDS_MINTED_ROLE() external view returns (bytes32); -} - -interface IVaultsAdapter { - function setVaultJailStatus(address _vault, bool _isInJail) external; - function updateVaultFees(address _vault, uint256 _infrastructureFeeBP, uint256 _liquidityFeeBP, uint256 _reservationFeeBP) external; - function forceValidatorExit(address _vault, bytes calldata _pubkeys) external payable; - function socializeBadDebt(address _debtVault, address _acceptorVault, uint256 _shares) external; -} - -interface IPausableUntilWithRoles { - function PAUSE_ROLE() external view returns (bytes32); - function pauseFor(uint256 _duration) external; -} - -/// @title V3VoteScript -/// @notice Script for upgrading Lido protocol components -contract V3VoteScript is OmnibusBase { - - struct ScriptParams { - address upgradeTemplate; - address timeConstraints; - uint256 odcSlashingReserveWeRightShiftEpochs; - uint256 odcSlashingReserveWeLeftShiftEpochs; - } - - // - // Execution window - // - uint32 public constant ENABLED_DAY_SPAN_START = 50400; // 14:00 - uint32 public constant ENABLED_DAY_SPAN_END = 82800; // 23:00 - - // - // Constants - // - uint256 public constant DG_ITEMS_COUNT = 21; - uint256 public constant VOTING_ITEMS_COUNT = 8; - - // - // Immutables - // - V3Template public immutable TEMPLATE; - - // - // Structured storage - // - ScriptParams public params; - - constructor( - ScriptParams memory _params - ) OmnibusBase(V3Template(_params.upgradeTemplate).VOTING(), V3Template(_params.upgradeTemplate).DUAL_GOVERNANCE()) { - TEMPLATE = V3Template(_params.upgradeTemplate); - - params = _params; - } - - function getVotingVoteItems() public view override returns (VoteItem[] memory votingVoteItems) { - votingVoteItems = new VoteItem[](VOTING_ITEMS_COUNT); - address easyTrack = TEMPLATE.EASY_TRACK(); - address operatorGrid = TEMPLATE.OPERATOR_GRID(); - address vaultsAdapter = TEMPLATE.VAULTS_ADAPTER(); - uint256 index = 0; - - votingVoteItems[index++] = VoteItem({ - description: "2. Add AlterTiersInOperatorGrid factory to Easy Track (permissions: operatorGrid, alterTiers)", // 1 is reserved for DG submission item - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_ALTER_TIERS_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(operatorGrid), - bytes4(OperatorGrid.alterTiers.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "3. Add RegisterGroupsInOperatorGrid factory to Easy Track (permissions: operatorGrid, registerGroup + registerTiers)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_REGISTER_GROUPS_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(operatorGrid), - bytes4(OperatorGrid.registerGroup.selector), - bytes20(operatorGrid), - bytes4(OperatorGrid.registerTiers.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "4. Add RegisterTiersInOperatorGrid factory to Easy Track (permissions: operatorGrid, registerTiers)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_REGISTER_TIERS_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(operatorGrid), - bytes4(OperatorGrid.registerTiers.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "5. Add UpdateGroupsShareLimitInOperatorGrid factory to Easy Track (permissions: operatorGrid, updateGroupShareLimit)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_UPDATE_GROUPS_SHARE_LIMIT_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(operatorGrid), - bytes4(OperatorGrid.updateGroupShareLimit.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "6. Add SetJailStatusInOperatorGrid factory to Easy Track (permissions: vaultsAdapter, setVaultJailStatus)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_SET_JAIL_STATUS_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(vaultsAdapter), - bytes4(IVaultsAdapter.setVaultJailStatus.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "7. Add UpdateVaultsFeesInOperatorGrid factory to Easy Track (permissions: vaultsAdapter, updateVaultFees)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_UPDATE_VAULTS_FEES_IN_OPERATOR_GRID(), - bytes.concat( - bytes20(vaultsAdapter), - bytes4(IVaultsAdapter.updateVaultFees.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "8. Add ForceValidatorExitsInVaultHub factory to Easy Track (permissions: vaultsAdapter, forceValidatorExit)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_FORCE_VALIDATOR_EXITS_IN_VAULT_HUB(), - bytes.concat( - bytes20(vaultsAdapter), - bytes4(IVaultsAdapter.forceValidatorExit.selector) - ) - )) - }) - }); - - votingVoteItems[index++] = VoteItem({ - description: "9. Add SocializeBadDebtInVaultHub factory to Easy Track (permissions: vaultsAdapter, socializeBadDebt)", - call: ScriptCall({ - to: easyTrack, - data: abi.encodeCall(IEasyTrack.addEVMScriptFactory, ( - TEMPLATE.ETF_SOCIALIZE_BAD_DEBT_IN_VAULT_HUB(), - bytes.concat( - bytes20(vaultsAdapter), - bytes4(IVaultsAdapter.socializeBadDebt.selector) - ) - )) - }) - }); - - assert(index == VOTING_ITEMS_COUNT); - } - - function getVoteItems() public view override returns (VoteItem[] memory voteItems) { - voteItems = new VoteItem[](DG_ITEMS_COUNT); - uint256 index = 0; - - voteItems[index++] = VoteItem({ - description: "1.1. Ensure DG proposal execution is within daily time window (14:00 UTC - 23:00 UTC)", - call: ScriptCall({ - to: params.timeConstraints, - data: abi.encodeCall( - ITimeConstraints.checkTimeWithinDayTimeAndEmit, - ( - ENABLED_DAY_SPAN_START, - ENABLED_DAY_SPAN_END - ) - ) - }) - }); - - voteItems[index++] = VoteItem({ - description: "1.2. Call V3Template.startUpgrade", - call: _forwardCall(TEMPLATE.AGENT(), params.upgradeTemplate, abi.encodeCall(V3Template.startUpgrade, ())) - }); - - voteItems[index++] = VoteItem({ - description: "1.3. Upgrade LidoLocator implementation", - call: _forwardCall(TEMPLATE.AGENT(), TEMPLATE.LOCATOR(), abi.encodeCall(IOssifiableProxy.proxy__upgradeTo, (TEMPLATE.NEW_LOCATOR_IMPL()))) - }); - - voteItems[index++] = VoteItem({ - description: "1.4. Grant Aragon APP_MANAGER_ROLE to the AGENT", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ACL(), - abi.encodeWithSignature( - "grantPermission(address,address,bytes32)", - TEMPLATE.AGENT(), - TEMPLATE.KERNEL(), - keccak256("APP_MANAGER_ROLE") - ) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.5. Set Lido implementation in Kernel", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.KERNEL(), - abi.encodeCall(IKernel.setApp, (IKernel(TEMPLATE.KERNEL()).APP_BASES_NAMESPACE(), TEMPLATE.LIDO_APP_ID(), TEMPLATE.NEW_LIDO_IMPL())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.6. Revoke Aragon APP_MANAGER_ROLE from the AGENT", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ACL(), - abi.encodeWithSignature( - "revokePermission(address,address,bytes32)", - TEMPLATE.AGENT(), - TEMPLATE.KERNEL(), - keccak256("APP_MANAGER_ROLE") - ) - ) - }); - - bytes32 requestBurnSharesRole = IBurner(TEMPLATE.OLD_BURNER()).REQUEST_BURN_SHARES_ROLE(); - voteItems[index++] = VoteItem({ - description: "1.7. Revoke REQUEST_BURN_SHARES_ROLE from Lido", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.OLD_BURNER(), - abi.encodeCall(IAccessControl.revokeRole, (requestBurnSharesRole, TEMPLATE.LIDO())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.8. Revoke REQUEST_BURN_SHARES_ROLE from Curated staking module", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.OLD_BURNER(), - abi.encodeCall(IAccessControl.revokeRole, (requestBurnSharesRole, TEMPLATE.NODE_OPERATORS_REGISTRY())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.9. Revoke REQUEST_BURN_SHARES_ROLE from SimpleDVT", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.OLD_BURNER(), - abi.encodeCall(IAccessControl.revokeRole, (requestBurnSharesRole, TEMPLATE.SIMPLE_DVT())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.10. Revoke REQUEST_BURN_SHARES_ROLE from Community Staking Accounting", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.OLD_BURNER(), - abi.encodeCall(IAccessControl.revokeRole, (requestBurnSharesRole, TEMPLATE.CSM_ACCOUNTING())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.11. Upgrade AccountingOracle implementation", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ACCOUNTING_ORACLE(), - abi.encodeCall(IOssifiableProxy.proxy__upgradeTo, (TEMPLATE.NEW_ACCOUNTING_ORACLE_IMPL())) - ) - }); - - bytes32 reportRewardsMintedRole = IStakingRouter(TEMPLATE.STAKING_ROUTER()).REPORT_REWARDS_MINTED_ROLE(); - voteItems[index++] = VoteItem({ - description: "1.12. Revoke REPORT_REWARDS_MINTED_ROLE from Lido", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.STAKING_ROUTER(), - abi.encodeCall(IAccessControl.revokeRole, (reportRewardsMintedRole, TEMPLATE.LIDO())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.13. Grant REPORT_REWARDS_MINTED_ROLE to Accounting", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.STAKING_ROUTER(), - abi.encodeCall(IAccessControl.grantRole, (reportRewardsMintedRole, TEMPLATE.ACCOUNTING())) - ) - }); - - bytes32 configManagerRole = IOracleDaemonConfig(TEMPLATE.ORACLE_DAEMON_CONFIG()).CONFIG_MANAGER_ROLE(); - - voteItems[index++] = VoteItem({ - description: "1.14. Grant OracleDaemonConfig's CONFIG_MANAGER_ROLE to Agent", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ORACLE_DAEMON_CONFIG(), - abi.encodeCall(IAccessControl.grantRole, (configManagerRole, TEMPLATE.AGENT())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.15. Set SLASHING_RESERVE_WE_RIGHT_SHIFT at OracleDaemonConfig", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ORACLE_DAEMON_CONFIG(), - abi.encodeCall(IOracleDaemonConfig.set, ("SLASHING_RESERVE_WE_RIGHT_SHIFT", abi.encode(params.odcSlashingReserveWeRightShiftEpochs))) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.16. Set SLASHING_RESERVE_WE_LEFT_SHIFT at OracleDaemonConfig", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ORACLE_DAEMON_CONFIG(), - abi.encodeCall(IOracleDaemonConfig.set, ("SLASHING_RESERVE_WE_LEFT_SHIFT", abi.encode(params.odcSlashingReserveWeLeftShiftEpochs))) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.17. Revoke OracleDaemonConfig's CONFIG_MANAGER_ROLE from Agent", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.ORACLE_DAEMON_CONFIG(), - abi.encodeCall(IAccessControl.revokeRole, (configManagerRole, TEMPLATE.AGENT())) - ) - }); - - bytes32 pdgPauseRole = IPausableUntilWithRoles(TEMPLATE.PREDEPOSIT_GUARANTEE()).PAUSE_ROLE(); - - voteItems[index++] = VoteItem({ - description: "1.18. Grant PredepositGuarantee's PAUSE_ROLE to Agent", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.PREDEPOSIT_GUARANTEE(), - abi.encodeCall(IAccessControl.grantRole, (pdgPauseRole, TEMPLATE.AGENT())) - ) - }); - - uint256 PAUSE_INFINITELY = type(uint256).max; - - voteItems[index++] = VoteItem({ - description: "1.19. Pause PredepositGuarantee", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.PREDEPOSIT_GUARANTEE(), - abi.encodeCall(IPausableUntilWithRoles.pauseFor, (PAUSE_INFINITELY)) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.20. Revoke PredepositGuarantee's PAUSE_ROLE from Agent", - call: _forwardCall( - TEMPLATE.AGENT(), - TEMPLATE.PREDEPOSIT_GUARANTEE(), - abi.encodeCall(IAccessControl.revokeRole, (pdgPauseRole, TEMPLATE.AGENT())) - ) - }); - - voteItems[index++] = VoteItem({ - description: "1.21. Call V3Template.finishUpgrade", - call: _forwardCall(TEMPLATE.AGENT(), params.upgradeTemplate, abi.encodeCall(V3Template.finishUpgrade, ())) - }); - - assert(index == DG_ITEMS_COUNT); - } -} diff --git a/contracts/upgrade/interfaces/IDualGovernance.sol b/contracts/upgrade/interfaces/IDualGovernance.sol index e47e23acdf..377f88ba53 100644 --- a/contracts/upgrade/interfaces/IDualGovernance.sol +++ b/contracts/upgrade/interfaces/IDualGovernance.sol @@ -1,7 +1,6 @@ // SPDX-FileCopyrightText: 2025 Lido // SPDX-License-Identifier: UNLICENSED - // See contracts/COMPILERS.md // solhint-disable-next-line lido/fixed-compiler-version pragma solidity ^0.8.25; @@ -36,5 +35,5 @@ interface IDualGovernance { /// @return proposers An array of `Proposer` structs containing the data of all registered proposers. function getProposers() external view returns (Proposer[] memory proposers); - event ProposalSubmitted(uint256 indexed id, address indexed executor, ExternalCall[] calls); + event ProposalSubmitted(address indexed proposerAccount, uint256 indexed proposalId, string metadata); } diff --git a/contracts/upgrade/interfaces/IEmergencyProtectedTimelock.sol b/contracts/upgrade/interfaces/IEmergencyProtectedTimelock.sol deleted file mode 100644 index 396e1f54f1..0000000000 --- a/contracts/upgrade/interfaces/IEmergencyProtectedTimelock.sol +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: UNLICENSED - -// See contracts/COMPILERS.md -// solhint-disable-next-line lido/fixed-compiler-version -pragma solidity ^0.8.25; - -type Duration is uint32; - -interface IEmergencyProtectedTimelock { - function getAfterSubmitDelay() external view returns (Duration); - - function getAfterScheduleDelay() external view returns (Duration); - - function execute(uint256 proposalId) external; -} diff --git a/contracts/upgrade/interfaces/IOracleReportSanityChecker_preV4.sol b/contracts/upgrade/interfaces/IOracleReportSanityChecker_preV4.sol new file mode 100644 index 0000000000..ec7012f343 --- /dev/null +++ b/contracts/upgrade/interfaces/IOracleReportSanityChecker_preV4.sol @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.4.24 <0.9.0; + +struct LimitsList { + uint256 exitedValidatorsPerDayLimit; + uint256 appearedValidatorsPerDayLimit; + uint256 annualBalanceIncreaseBPLimit; + uint256 simulatedShareRateDeviationBPLimit; + uint256 maxValidatorExitRequestsPerReport; + uint256 maxItemsPerExtraDataTransaction; + uint256 maxNodeOperatorsPerExtraDataItem; + uint256 requestTimestampMargin; + uint256 maxPositiveTokenRebase; + uint256 initialSlashingAmountPWei; + uint256 inactivityPenaltiesAmountPWei; + uint256 clBalanceOraclesErrorUpperBPLimit; +} + +// solhint-disable contract-name-capwords +interface IOracleReportSanityChecker_preV4 { + /// @notice Returns the limits list for the Lido's oracle report sanity checks + function getOracleReportLimits() external view returns (LimitsList memory); +} diff --git a/contracts/upgrade/interfaces/ITimelock.sol b/contracts/upgrade/interfaces/ITimelock.sol new file mode 100644 index 0000000000..056fad1caa --- /dev/null +++ b/contracts/upgrade/interfaces/ITimelock.sol @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: UNLICENSED + +// See contracts/COMPILERS.md +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity ^0.8.25; + +type Duration is uint32; +type Timestamp is uint40; + +enum ProposalStatus { + NotExist, + Submitted, + Scheduled, + Executed, + Cancelled +} + +struct ExternalCall { + address target; + uint96 value; + bytes payload; +} + +interface ITimelock { + struct ProposalDetails { + uint256 id; + address executor; + Timestamp submittedAt; + Timestamp scheduledAt; + ProposalStatus status; + } + function getProposalDetails(uint256 proposalId) external view returns (ProposalDetails memory proposalDetails); + function getProposalsCount() external view returns (uint256 count); + function getAfterSubmitDelay() external view returns (Duration); + function getAfterScheduleDelay() external view returns (Duration); + function canSchedule(uint256 proposalId) external view returns (bool); + function canExecute(uint256 proposalId) external view returns (bool); + function execute(uint256 proposalId) external; +} diff --git a/contracts/upgrade/interfaces/IUpgradeConfig.sol b/contracts/upgrade/interfaces/IUpgradeConfig.sol new file mode 100644 index 0000000000..529fbea1b7 --- /dev/null +++ b/contracts/upgrade/interfaces/IUpgradeConfig.sol @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: UNLICENSED + +// See contracts/COMPILERS.md +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity ^0.8.25; + +import {GlobalConfig, CoreUpgradeConfig, CuratedModuleConfig, CSMUpgradeConfig} from "../UpgradeTypes.sol"; + +interface IUpgradeConfig { + function LOCATOR() external view returns (address); + function AGENT() external view returns (address); + function VOTING() external view returns (address); + function DUAL_GOVERNANCE() external view returns (address); + + function getGlobalConfig() external view returns (GlobalConfig memory); + function getCoreUpgradeConfig() external view returns (CoreUpgradeConfig memory); + function getCSMUpgradeConfig() external view returns (CSMUpgradeConfig memory); + function getCuratedModuleConfig() external view returns (CuratedModuleConfig memory); +} diff --git a/contracts/upgrade/interfaces/IUpgradeTemplate.sol b/contracts/upgrade/interfaces/IUpgradeTemplate.sol new file mode 100644 index 0000000000..31160b79cf --- /dev/null +++ b/contracts/upgrade/interfaces/IUpgradeTemplate.sol @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: UNLICENSED + +// See contracts/COMPILERS.md +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity ^0.8.25; + +interface IUpgradeTemplate { + function CONFIG() external view returns (address); + function isUpgradeFinished() external view returns (bool); +} diff --git a/contracts/upgrade/mocks/CircuitBreakerMock.sol b/contracts/upgrade/mocks/CircuitBreakerMock.sol new file mode 100644 index 0000000000..8ffad90d36 --- /dev/null +++ b/contracts/upgrade/mocks/CircuitBreakerMock.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {ICircuitBreaker} from "contracts/common/interfaces/ICircuitBreaker.sol"; +import {IPausableUntil} from "contracts/common/interfaces/IPausableUntil.sol"; + +contract CircuitBreakerMock is ICircuitBreaker { + error SenderNotPauser(); + error PauseFailed(); + + uint256 internal immutable PAUSE_DURATION; + mapping(address pausable => address pauser) private _pausers; + + constructor(uint256 _duration) { + PAUSE_DURATION = _duration; + } + + function pause(address _pausable) external { + if (msg.sender != _pausers[_pausable]) revert SenderNotPauser(); + + _pausers[_pausable] = address(0); + IPausableUntil pausable = IPausableUntil(_pausable); + pausable.pauseFor(PAUSE_DURATION); + if (!pausable.isPaused()) revert PauseFailed(); + } + + function registerPauser(address _pausable, address _newPauser) external { + _pausers[_pausable] = _newPauser; + } + + function getPauser(address _pausable) external view returns (address) { + return _pausers[_pausable]; + } +} diff --git a/contracts/upgrade/mocks/EasyTrackFactoryMock.sol b/contracts/upgrade/mocks/EasyTrackFactoryMock.sol new file mode 100644 index 0000000000..096e5b6048 --- /dev/null +++ b/contracts/upgrade/mocks/EasyTrackFactoryMock.sol @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +// solhint-disable-next-line +pragma solidity >=0.4.24 <0.9.0; + +/// @notice Minimal stub for IEVMScriptFactory. +contract EasyTrackFactoryMock { + function createEVMScript(address, bytes memory _evmScriptCallData) external returns (bytes memory) { + return _evmScriptCallData; + } +} diff --git a/deployed-devnet1.json b/deployed-devnet1.json new file mode 100644 index 0000000000..fa90584e45 --- /dev/null +++ b/deployed-devnet1.json @@ -0,0 +1,1142 @@ +{ + "accounting": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x4004e893E863ECA264ffA895566F0587B5F53b80", + "constructorArgs": [ + "0x6B3342821680031732Bc7d4E88A6528478aF9E38", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/Accounting.sol", + "address": "0xbEDe43672Ef2A96F7426F91C1F4F6D4618C3E688", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46"] + } + }, + "accountingOracle": { + "deployParameters": { + "consensusVersion": 5 + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x9D2ea2038CF6009F1Bc57E32818204726DfA63Cd", + "constructorArgs": [ + "0xc6F76D133052002abdBAda02ba35dB8b7414FcAa", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/oracle/AccountingOracle.sol", + "address": "0x8603eb738b381BCcbC264c4a4568650D99aec1e3", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", 12, 1775658511] + } + }, + "apmRegistryFactory": { + "contract": "@aragon/os/contracts/factory/APMRegistryFactory.sol", + "address": "0x63e6DDE6763C3466C7b45Be880f7eE5dC2ca3E25", + "constructorArgs": [ + "0x8F0342A7060e76dfc7F6e9dEbfAD9b9eC919952c", + "0x72ae2643518179cF01bcA3278a37ceAD408DE8b2", + "0x00c042C4D5D913277CE16611a2ce6e9003554aD5", + "0x9fCF7D13d10dEdF17d0f24C62f0cf4ED462f65b7", + "0xc3d8108FC7f92B936552d658fc3dBC834193f344", + "0x0000000000000000000000000000000000000000" + ] + }, + "app:aragon-agent": { + "implementation": { + "contract": "@aragon/apps-agent/contracts/Agent.sol", + "address": "0xB965D10739e19a9158e7f713720B0145D996E370", + "constructorArgs": [] + }, + "aragonApp": { + "name": "aragon-agent", + "fullName": "aragon-agent.lidopm.eth", + "id": "0x701a4fd1f5174d12a0f1d9ad2c88d0ad11ab6aad0ac72b7d9ce621815f8016a9", + "repo": { + "proxy": { + "address": "0x932928cD34f984c36C0424bEEec5491c305D77dc" + } + } + }, + "proxy": { + "address": "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0x701a4fd1f5174d12a0f1d9ad2c88d0ad11ab6aad0ac72b7d9ce621815f8016a9", + "0x8129fc1c" + ] + } + }, + "app:aragon-finance": { + "implementation": { + "contract": "@aragon/apps-finance/contracts/Finance.sol", + "address": "0x3A8C1bd531b5C1aeFBB9ebc3e021C1251cF4Ccb1", + "constructorArgs": [] + }, + "aragonApp": { + "name": "aragon-finance", + "fullName": "aragon-finance.lidopm.eth", + "id": "0x5c9918c99c4081ca9459c178381be71d9da40e49e151687da55099c49a4237f1", + "repo": { + "proxy": { + "address": "0xb7f3C9Bbd240A8CC1A472D8Ac0b281f975858CaF" + } + } + }, + "proxy": { + "address": "0x9284d9624609caa6550734Cb92FAD0c4Edf7B7A3", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0x5c9918c99c4081ca9459c178381be71d9da40e49e151687da55099c49a4237f1", + "0x1798de81000000000000000000000000105031feea0162de118c5edeed626e08bd55b2560000000000000000000000000000000000000000000000000000000000278d00" + ] + } + }, + "app:aragon-token-manager": { + "implementation": { + "contract": "@aragon/apps-lido/apps/token-manager/contracts/TokenManager.sol", + "address": "0x38435Ac0E0e9Bd8737c476F8F39a24b0735e00dc", + "constructorArgs": [] + }, + "aragonApp": { + "name": "aragon-token-manager", + "fullName": "aragon-token-manager.lidopm.eth", + "id": "0xcd567bdf93dd0f6acc3bc7f2155f83244d56a65abbfbefb763e015420102c67b", + "repo": { + "proxy": { + "address": "0x525901a80DE8476ab22713c87519F84d49771a38" + } + } + }, + "proxy": { + "address": "0x261fA5F46465c206635851d12958C4Cd5A6DD307", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0xcd567bdf93dd0f6acc3bc7f2155f83244d56a65abbfbefb763e015420102c67b", + "0x" + ] + } + }, + "app:aragon-voting": { + "implementation": { + "contract": "@aragon/apps-lido/apps/voting/contracts/Voting.sol", + "address": "0x1430c9c2143F97aaE765197e744BaBa7e78acaf0", + "constructorArgs": [] + }, + "aragonApp": { + "name": "aragon-voting", + "fullName": "aragon-voting.lidopm.eth", + "id": "0x0abcd104777321a82b010357f20887d61247493d89d2e987ff57bcecbde00e1e", + "repo": { + "proxy": { + "address": "0x5Ec7f51B2cd7e2aB9a59E1aa4c193848fF9165e0" + } + } + }, + "proxy": { + "address": "0xff396Fe634ca149492999280F4967683C5Ab4680", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0x0abcd104777321a82b010357f20887d61247493d89d2e987ff57bcecbde00e1e", + "0x13e09453000000000000000000000000ffb97f23ce70522eb854d3dae9be951789318b5100000000000000000000000000000000000000000000000006f05b59d3b2000000000000000000000000000000000000000000000000000000b1a2bc2ec50000000000000000000000000000000000000000000000000000000000000000012c0000000000000000000000000000000000000000000000000000000000000005" + ] + } + }, + "app:lido": { + "implementation": { + "contract": "contracts/0.4.24/Lido.sol", + "address": "0xa923c38662D9a0120629156a36c5029bA273d43f", + "constructorArgs": [] + }, + "aragonApp": { + "name": "lido", + "fullName": "lido.lidopm.eth", + "id": "0x3ca7c3e38968823ccb4c78ea688df41356f182ae1d159e4ee608d30d68cef320", + "repo": { + "proxy": { + "address": "0x9FF1641e8d4ec764E2F4562Ba9FCB4433cC4E191" + } + } + }, + "proxy": { + "address": "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0x3ca7c3e38968823ccb4c78ea688df41356f182ae1d159e4ee608d30d68cef320", + "0x" + ] + } + }, + "app:node-operators-registry": { + "implementation": { + "contract": "contracts/0.4.24/nos/NodeOperatorsRegistry.sol", + "address": "0x2A3365C575a5Fc8fD2842B82D29f8035E7f71CeC", + "constructorArgs": [] + }, + "aragonApp": { + "name": "node-operators-registry", + "fullName": "node-operators-registry.lidopm.eth", + "id": "0x7071f283424072341f856ac9e947e7ec0eb68719f757a7e785979b6b8717579d", + "repo": { + "proxy": { + "address": "0x8C5E2C6d50f3EfA1B616A294eC22A89D72E2A56F" + } + } + }, + "proxy": { + "address": "0x2AA77A8837ee41a2635307590Ee540248FBFE236", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0x7071f283424072341f856ac9e947e7ec0eb68719f757a7e785979b6b8717579d", + "0x" + ] + } + }, + "app:simple-dvt": { + "aragonApp": { + "name": "simple-dvt", + "fullName": "simple-dvt.lidopm.eth", + "id": "0xe1635b63b5f7b5e545f2a637558a4029dea7905361a2f0fc28c66e9136cf86a4" + }, + "proxy": { + "address": "0xe9410845D15D9217eB583Cf03a6e398fd813C6bb", + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0xe1635b63b5f7b5e545f2a637558a4029dea7905361a2f0fc28c66e9136cf86a4", + "0x" + ] + } + }, + "aragon-acl": { + "implementation": { + "contract": "@aragon/os/contracts/acl/ACL.sol", + "address": "0x0643D39D47CF0ea95Dbea69Bf11a7F8C4Bc34968", + "constructorArgs": [] + }, + "proxy": { + "address": "0x91510008D61106185441f914B942A744f41aBD46", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0xe3262375f45a6e2026b7e7b18c2b807434f2508fe1a2a3dfb493c7df8f4aad6a", + "0x00" + ], + "contract": "@aragon/os/contracts/apps/AppProxyUpgradeable.sol" + }, + "aragonApp": { + "name": "aragon-acl", + "id": "0xe3262375f45a6e2026b7e7b18c2b807434f2508fe1a2a3dfb493c7df8f4aad6a" + } + }, + "aragon-apm-registry": { + "implementation": { + "contract": "@aragon/os/contracts/apm/APMRegistry.sol", + "address": "0x72ae2643518179cF01bcA3278a37ceAD408DE8b2", + "constructorArgs": [] + }, + "proxy": { + "address": "0x499361fB1C5bA3cec6326a3EEfa4D170f8C02e9E", + "contract": "@aragon/os/contracts/apm/APMRegistry.sol" + } + }, + "aragon-evm-script-registry": { + "proxy": { + "address": "0xA855a45231F95Bc2f1374Dc1B138DADA41fE37DC", + "constructorArgs": [ + "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "0xddbcfd564f642ab5627cf68b9b7d374fb4f8a36e941a75d89c87998cef03bd61", + "0x8129fc1c" + ], + "contract": "@aragon/os/contracts/apps/AppProxyPinned.sol" + }, + "aragonApp": { + "name": "aragon-evm-script-registry", + "id": "0xddbcfd564f642ab5627cf68b9b7d374fb4f8a36e941a75d89c87998cef03bd61" + }, + "implementation": { + "address": "0x9Cc15D879F6D6e7c0EB9558a9d5fBca6553fF680", + "contract": "@aragon/os/contracts/evmscript/EVMScriptRegistry.sol", + "constructorArgs": [] + } + }, + "aragon-kernel": { + "implementation": { + "contract": "@aragon/os/contracts/kernel/Kernel.sol", + "address": "0x422A3492e218383753D8006C7Bfa97815B44373F", + "constructorArgs": [true] + }, + "proxy": { + "address": "0xb8966836EfFa491B5Fe4b4F0cc4Ff48cFab0c269", + "contract": "@aragon/os/contracts/kernel/KernelProxy.sol", + "constructorArgs": ["0x422A3492e218383753D8006C7Bfa97815B44373F"] + } + }, + "aragon-repo-base": { + "contract": "@aragon/os/contracts/apm/Repo.sol", + "address": "0x00c042C4D5D913277CE16611a2ce6e9003554aD5", + "constructorArgs": [] + }, + "aragonEnsLabelName": "aragonpm", + "aragonID": { + "address": "0xEE0fCB8E5cCAD0b4197BAabd633333886f5C364d", + "contract": "@aragon/id/contracts/FIFSResolvingRegistrar.sol", + "constructorArgs": [ + "0xc3d8108FC7f92B936552d658fc3dBC834193f344", + "0x0DB1c3b45e7870dDAf77F37cAf3a3B2dD81D77C3", + "0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86" + ] + }, + "beaconChainDepositor": { + "contract": "contracts/0.8.25/lib/BeaconChainDepositor.sol", + "address": "0x5eb256232f2a78607Fab70B4f4026684d0a6d0d7", + "constructorArgs": [] + }, + "burner": { + "deployParameters": { + "totalCoverSharesBurnt": "0", + "totalNonCoverSharesBurnt": "0" + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x9CA67e0547a42051227A7170e79b2C594411A1e2", + "constructorArgs": [ + "0xCd9f348aeBF4649BFF58654da5C7A008b9008e7d", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/Burner.sol", + "address": "0xCd9f348aeBF4649BFF58654da5C7A008b9008e7d", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46"] + } + }, + "callsScript": { + "address": "0xE656cf6D6A75A8459a4aF9770b435BDBB2CA652F", + "contract": "@aragon/os/contracts/evmscript/executors/CallsScript.sol", + "constructorArgs": [] + }, + "chainId": 32382, + "chainSpec": { + "slotsPerEpoch": 32, + "secondsPerSlot": 12, + "genesisTime": 1775658511, + "depositContract": "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "genesisForkVersion": "0x00000000" + }, + "circuitBreaker": { + "address": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "note": "The circuit breaker contract is not deployed, the address is set to the deployer address as a placeholder." + }, + "consolidationBus": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0xAd9cfEB98bD1D4769C45Bd2Ec551197096e26e64", + "constructorArgs": [ + "0x6BDfDD18391507eFc8534752328b8cbAbeBfc480", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "0x4ec81af1000000000000000000000000dd427698b14932dd5b88822ce9a9d98c35ccdd58000000000000000000000000000000000000000000000000000000000000015e000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/consolidation/ConsolidationBus.sol", + "address": "0x6BDfDD18391507eFc8534752328b8cbAbeBfc480", + "constructorArgs": ["0x91abAc209511A6b73A0b4722DC9a227a54944e7C"] + } + }, + "consolidationGateway": { + "contract": "contracts/0.8.25/consolidation/ConsolidationGateway.sol", + "address": "0x91abAc209511A6b73A0b4722DC9a227a54944e7C", + "constructorArgs": [ + "0xDD427698B14932DD5b88822cE9a9D98c35CcdD58", + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + 2900, + 1, + 36, + "0x0000000000000000000000000000000000000000000000000096000000000028", + "0x0000000000000000000000000000000000000000000000000096000000000028", + 0 + ] + }, + "consolidationMigrator": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x36dD4b2B46a27A33b1caAe819F53E4E7a0cE785b", + "constructorArgs": [ + "0xA08Ff9f3262A0E42e241cBDeBFBED1D3d601A12a", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "0xc4d66de8000000000000000000000000dd427698b14932dd5b88822ce9a9d98c35ccdd58" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/consolidation/ConsolidationMigrator.sol", + "address": "0xA08Ff9f3262A0E42e241cBDeBFBED1D3d601A12a", + "constructorArgs": [ + "0xb239879B1f9Cd1B72b16Bab768D29Ea3293282Af", + "0xAd9cfEB98bD1D4769C45Bd2Ec551197096e26e64", + 1, + 4 + ] + } + }, + "createAppReposTx": "0x33a2f40de9197365c9b529fd229a4583bfd7bad2d859cb8176d520b485870d9e", + "daoAragonId": "lido-dao", + "daoFactory": { + "address": "0x8F0342A7060e76dfc7F6e9dEbfAD9b9eC919952c", + "contract": "@aragon/os/contracts/factory/DAOFactory.sol", + "constructorArgs": [ + "0x422A3492e218383753D8006C7Bfa97815B44373F", + "0x0643D39D47CF0ea95Dbea69Bf11a7F8C4Bc34968", + "0x9f9F5Fd89ad648f2C000C954d8d9C87743243eC5" + ] + }, + "daoInitialSettings": { + "voting": { + "minSupportRequired": "500000000000000000", + "minAcceptanceQuorum": "50000000000000000", + "voteDuration": 300, + "objectionPhaseDuration": 5 + }, + "fee": { + "totalPercent": 10, + "treasuryPercent": 50, + "nodeOperatorsPercent": 50 + }, + "token": { + "name": "TEST Lido DAO Token", + "symbol": "TLDO" + } + }, + "dashboardImpl": { + "contract": "contracts/0.8.25/vaults/dashboard/Dashboard.sol", + "address": "0xc5092f6c1f30C8970dc835F0F754057f79b89CC6", + "constructorArgs": [ + "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "0xeB804d271b58d9405CD94e294504E56d55B6E35c", + "0xb34CcfE453E4D31A1A7B5CdDd79020eA669A2a03", + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9" + ] + }, + "deployer": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "depositSecurityModule": { + "deployParameters": { + "maxOperatorsPerUnvetting": 200, + "pauseIntentValidityPeriodBlocks": 6646, + "usePredefinedAddressInstead": null + }, + "contract": "contracts/0.8.9/DepositSecurityModule.sol", + "address": "0x9ec0E29F835207963010E48bEB6A066A6a2f3E1d", + "constructorArgs": [ + "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "0xb239879B1f9Cd1B72b16Bab768D29Ea3293282Af", + 6646, + 200 + ] + }, + "dg:dualGovernance": { + "proxy": { + "address": "0x8943545177806ED17B9F23F0a21ee5948eCaa776" + }, + "note": "The dual governance contract is not deployed, the address is set to the deployer address as a placeholder." + }, + "dg:emergencyProtectedTimelock": { + "proxy": { + "address": "0x8943545177806ED17B9F23F0a21ee5948eCaa776" + }, + "note": "The dual governance contract is not deployed, the address is set to the deployer address as a placeholder." + }, + "dummyEmptyContract": { + "contract": "contracts/0.8.9/utils/DummyEmptyContract.sol", + "address": "0x2694cfcc47aAbC5C042dea104b6ac37e9924a4B1", + "constructorArgs": [] + }, + "easyTrack": { + "address": "0x88E0e2E7C06D24E3746035eFA183bBD83F0373DE" + }, + "easyTrackEVMScriptExecutor": { + "address": "0x9c457ac681092D4Fb4e8C9Ac0Bdf5e59efBbE016" + }, + "eip712StETH": { + "contract": "contracts/0.8.9/EIP712StETH.sol", + "address": "0xa3c616dd54F6BB35a736cD6968c8EF7176faCACc", + "constructorArgs": ["0x8101cbB371bCFF480552B6B1CF847dF606c4AE46"] + }, + "ens": { + "address": "0xc3d8108FC7f92B936552d658fc3dBC834193f344", + "constructorArgs": [], + "contract": "@aragon/os/contracts/lib/ens/ENS.sol" + }, + "ensFactory": { + "contract": "@aragon/os/contracts/factory/ENSFactory.sol", + "address": "0x17435ccE3d1B4fA2e5f8A08eD921D57C6762A180", + "constructorArgs": [] + }, + "ensNode": { + "nodeName": "aragonpm.eth", + "nodeIs": "0x9065c3e7f7b7ef1ef4e53d2d0b8e0cef02874ab020c1ece79d5f0d3d0111c0ba" + }, + "ensSubdomainRegistrar": { + "implementation": { + "contract": "@aragon/os/contracts/ens/ENSSubdomainRegistrar.sol", + "address": "0x9fCF7D13d10dEdF17d0f24C62f0cf4ED462f65b7", + "constructorArgs": [] + } + }, + "evmScriptRegistryFactory": { + "contract": "@aragon/os/contracts/factory/EVMScriptRegistryFactory.sol", + "address": "0x9f9F5Fd89ad648f2C000C954d8d9C87743243eC5", + "constructorArgs": [] + }, + "executionLayerRewardsVault": { + "contract": "contracts/0.8.9/LidoExecutionLayerRewardsVault.sol", + "address": "0x57E5d642648F54973e504f10D21Ea06360151cAf", + "constructorArgs": ["0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", "0x105031FEEa0162de118C5EdEEd626E08BD55B256"] + }, + "gateSeal": { + "address": null, + "factoryAddress": null, + "sealDuration": 518400, + "expiryTimestamp": 1714521600, + "sealingCommittee": [] + }, + "hashConsensusForAccountingOracle": { + "deployParameters": { + "fastLaneLengthSlots": 10, + "epochsPerFrame": 8 + }, + "contract": "contracts/0.8.9/oracle/HashConsensus.sol", + "address": "0x1912A7496314854fB890B1B88C0f1Ced653C1830", + "constructorArgs": [ + 32, + 12, + 1775658511, + 8, + 10, + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x9D2ea2038CF6009F1Bc57E32818204726DfA63Cd" + ] + }, + "hashConsensusForValidatorsExitBusOracle": { + "deployParameters": { + "fastLaneLengthSlots": 10, + "epochsPerFrame": 8 + }, + "contract": "contracts/0.8.9/oracle/HashConsensus.sol", + "address": "0x556994566b825bECb7DB89c345Fc994F27e4BA09", + "constructorArgs": [ + 32, + 12, + 1775658511, + 8, + 10, + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x05cE5c99898b2a4376381C3c39217833ED0E15a4" + ] + }, + "lazyOracle": { + "deployParameters": { + "quarantinePeriod": 259200, + "maxRewardRatioBP": 350, + "maxLidoFeeRatePerSecond": "180000000000000000" + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x57718C9cE2CcC1D7a4a136ebECA892c8Dd9F67c7", + "constructorArgs": [ + "0xD9BFe39BA99503baA8cBA3DF08e3C9421889Fd44", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/vaults/LazyOracle.sol", + "address": "0xD9BFe39BA99503baA8cBA3DF08e3C9421889Fd44", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9"] + } + }, + "ldo": { + "address": "0xfFB97F23Ce70522eb854D3DAE9be951789318B51", + "contract": "@aragon/minime/contracts/MiniMeToken.sol", + "constructorArgs": [ + "0x72bCbB3f339aF622c28a26488Eed9097a2977404", + "0x0000000000000000000000000000000000000000", + 0, + "TEST Lido DAO Token", + 18, + "TLDO", + true + ] + }, + "lidoApm": { + "deployArguments": [ + "0x93cdeb708b7545dc668eb9280176169d1c33cfd8ed6f04690a0bcc88a93fc4ae", + "0x90a9580abeb24937fc658e497221c81ce8553b560304f9525821f32b17dbdaec" + ], + "deployTx": "0x1aa5934ace6a55702692894e13b88d7158e38e98a338d1d4d2ac615e2d7e9e34", + "address": "0x049E287ac6C2Bdd17b3Ec1E607822581E21Fb495" + }, + "lidoApmEnsName": "lidopm.eth", + "lidoApmEnsRegDurationSec": 94608000, + "lidoLocator": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "constructorArgs": [ + "0x2694cfcc47aAbC5C042dea104b6ac37e9924a4B1", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/LidoLocator.sol", + "address": "0xc50f6b8AcBc6C5852A44371ef31b52122F43c349", + "constructorArgs": [ + { + "accountingOracle": "0x9D2ea2038CF6009F1Bc57E32818204726DfA63Cd", + "depositSecurityModule": "0x9ec0E29F835207963010E48bEB6A066A6a2f3E1d", + "elRewardsVault": "0x57E5d642648F54973e504f10D21Ea06360151cAf", + "lido": "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "oracleReportSanityChecker": "0x12cD817dcaaBA5Ee101bb30bBBeccA859B999571", + "postTokenRebaseReceiver": "0x4b0Ead5b3CB32D243176c73dA6be0dECe837C36F", + "burner": "0x9CA67e0547a42051227A7170e79b2C594411A1e2", + "stakingRouter": "0xb239879B1f9Cd1B72b16Bab768D29Ea3293282Af", + "treasury": "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "validatorsExitBusOracle": "0x05cE5c99898b2a4376381C3c39217833ED0E15a4", + "withdrawalQueue": "0x86A0679C7987B5BA9600affA994B78D0660088ff", + "withdrawalVault": "0xbFaC9e95F250952630Eef4ef62E602d0D37844fe", + "oracleDaemonConfig": "0x3c0e871bB7337D5e6A18FDD73c4D9e7567961Ad3", + "validatorExitDelayVerifier": "0xA2C4Ef228de6BA701660e75Cb06f1c9b29E53069", + "triggerableWithdrawalsGateway": "0x2af486b3C64D73B03A01Ee8aBD5A94287a5BFD49", + "consolidationGateway": "0x91abAc209511A6b73A0b4722DC9a227a54944e7C", + "accounting": "0x4004e893E863ECA264ffA895566F0587B5F53b80", + "predepositGuarantee": "0x3754cb88Ff9e3A2916Eaa0C6430BB9b299DfB658", + "wstETH": "0xeB804d271b58d9405CD94e294504E56d55B6E35c", + "vaultHub": "0xb34CcfE453E4D31A1A7B5CdDd79020eA669A2a03", + "vaultFactory": "0x0cA0DC8810361829363AB17196A747fA1387D202", + "lazyOracle": "0x57718C9cE2CcC1D7a4a136ebECA892c8Dd9F67c7", + "operatorGrid": "0x00CfaC4fF61D52771eF27d07c5b6f1263C2994A1", + "topUpGateway": "0x6fBC704F5a3B7Cc7C1Cf6991483508EdeddBf169" + } + ] + } + }, + "lidoTemplate": { + "contract": "contracts/0.4.24/template/LidoTemplate.sol", + "address": "0x9ECB6f04D47FA2599449AaA523bF84476f7aD80f", + "constructorArgs": [ + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x8F0342A7060e76dfc7F6e9dEbfAD9b9eC919952c", + "0xc3d8108FC7f92B936552d658fc3dBC834193f344", + "0x72bCbB3f339aF622c28a26488Eed9097a2977404", + "0xEE0fCB8E5cCAD0b4197BAabd633333886f5C364d", + "0x63e6DDE6763C3466C7b45Be880f7eE5dC2ca3E25" + ], + "deployBlock": 38 + }, + "lidoTemplateCreateStdAppReposTx": "0x93adb22a33c7d4a42883ab176c91f20811a811f0638458f796ac85b3bb52a64d", + "lidoTemplateNewDaoTx": "0x955f082190614d205ba33ea4b44ab3d57257347d877da55b905462e357a6bcf8", + "minFirstAllocationStrategy": { + "contract": "contracts/common/lib/MinFirstAllocationStrategy.sol", + "address": "0xD99D7faDfA6c51262B87e645B5912947A8C8707B", + "constructorArgs": [] + }, + "miniMeTokenFactory": { + "address": "0x72bCbB3f339aF622c28a26488Eed9097a2977404", + "contract": "@aragon/minime/contracts/MiniMeToken.sol", + "constructorArgs": [], + "contractName": "MiniMeTokenFactory" + }, + "networkId": 32382, + "nodeOperatorsRegistry": { + "deployParameters": { + "stakingModuleName": "Curated", + "stakingModuleTypeId": "curated-onchain-v1", + "stuckPenaltyDelay": 172800 + } + }, + "operatorGrid": { + "deployParameters": { + "defaultTierParams": { + "shareLimitInEther": "0", + "reserveRatioBP": 5000, + "forcedRebalanceThresholdBP": 4975, + "infraFeeBP": 100, + "liquidityFeeBP": 650, + "reservationFeeBP": 0 + } + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x00CfaC4fF61D52771eF27d07c5b6f1263C2994A1", + "constructorArgs": [ + "0xD7d1678E63Ec1d3609F404659Ff6C4d6C590825b", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/vaults/OperatorGrid.sol", + "address": "0xD7d1678E63Ec1d3609F404659Ff6C4d6C590825b", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9"] + } + }, + "oracleDaemonConfig": { + "deployParameters": { + "NORMALIZED_CL_REWARD_PER_EPOCH": 64, + "NORMALIZED_CL_REWARD_MISTAKE_RATE_BP": 1000, + "REBASE_CHECK_NEAREST_EPOCH_DISTANCE": 1, + "REBASE_CHECK_DISTANT_EPOCH_DISTANCE": 2, + "VALIDATOR_DELAYED_TIMEOUT_IN_SLOTS": 7200, + "VALIDATOR_DELINQUENT_TIMEOUT_IN_SLOTS": 28800, + "NODE_OPERATOR_NETWORK_PENETRATION_THRESHOLD_BP": 100, + "PREDICTION_DURATION_IN_SLOTS": 50400, + "FINALIZATION_MAX_NEGATIVE_REBASE_EPOCH_SHIFT": 1350, + "EXIT_EVENTS_LOOKBACK_WINDOW_IN_SLOTS": 7200 + }, + "contract": "contracts/0.8.9/OracleDaemonConfig.sol", + "address": "0x3c0e871bB7337D5e6A18FDD73c4D9e7567961Ad3", + "constructorArgs": ["0x8943545177806ED17B9F23F0a21ee5948eCaa776", []] + }, + "oracleReportSanityChecker": { + "deployParameters": { + "exitedValidatorsPerDayLimit": 1500, + "appearedValidatorsPerDayLimit": 1500, + "annualBalanceIncreaseBPLimit": 1000, + "simulatedShareRateDeviationBPLimit": 250, + "maxValidatorExitRequestsPerReport": 2000, + "maxItemsPerExtraDataTransaction": 8, + "maxNodeOperatorsPerExtraDataItem": 24, + "requestTimestampMargin": 128, + "maxPositiveTokenRebase": 5000000, + "initialSlashingAmountPWei": 1000, + "inactivityPenaltiesAmountPWei": 101, + "clBalanceOraclesErrorUpperBPLimit": 50 + }, + "contract": "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol", + "address": "0x12cD817dcaaBA5Ee101bb30bBBeccA859B999571", + "constructorArgs": [ + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "0x4004e893E863ECA264ffA895566F0587B5F53b80", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + { + "exitedEthAmountPerDayLimit": 57600, + "appearedEthAmountPerDayLimit": 57600, + "annualBalanceIncreaseBPLimit": "1000", + "simulatedShareRateDeviationBPLimit": "250", + "maxBalanceExitRequestedPerReportInEth": 19200, + "maxEffectiveBalanceWeightWCType01": 32, + "maxEffectiveBalanceWeightWCType02": 2048, + "maxItemsPerExtraDataTransaction": "8", + "maxNodeOperatorsPerExtraDataItem": "24", + "requestTimestampMargin": "128", + "maxPositiveTokenRebase": "5000000", + "maxCLBalanceDecreaseBP": 360, + "clBalanceOraclesErrorUpperBPLimit": "50", + "consolidationEthAmountPerDayLimit": 93375, + "exitedValidatorEthAmountLimit": 32 + } + ] + }, + "predepositGuarantee": { + "deployParameters": { + "gIndex": "0x0000000000000000000000000000000000000000000000000096000000000028", + "gIndexAfterChange": "0x0000000000000000000000000000000000000000000000000096000000000028", + "changeSlot": 0 + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x3754cb88Ff9e3A2916Eaa0C6430BB9b299DfB658", + "constructorArgs": [ + "0x482AF59820C30d13C9b06AdC75c741912e599D29", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/vaults/predeposit_guarantee/PredepositGuarantee.sol", + "address": "0x482AF59820C30d13C9b06AdC75c741912e599D29", + "constructorArgs": [ + "0x00000000", + "0x0000000000000000000000000000000000000000000000000096000000000028", + "0x0000000000000000000000000000000000000000000000000096000000000028", + 0 + ] + } + }, + "resealManager": { + "address": "0x0000000000000000000000000000000000000111", + "note": "The reseal manager contract is not deployed, the address is set to the deployer address as a placeholder." + }, + "scratchDeployGasUsed": "483719081", + "simpleDvt": { + "deployParameters": { + "stakingModuleName": "SimpleDVT", + "stakingModuleTypeId": "curated-onchain-v1", + "stuckPenaltyDelay": 432000 + } + }, + "srLib": { + "contract": "contracts/0.8.25/sr/SRLib.sol", + "address": "0x0a2909E98b207e48559245837a307Bb3f39cCBaf", + "constructorArgs": [] + }, + "stakingRouter": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0xb239879B1f9Cd1B72b16Bab768D29Ea3293282Af", + "constructorArgs": [ + "0x6fDA176cb71b4f2b85c17E398b58803797f721e4", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/sr/StakingRouter.sol", + "address": "0x0A26f3D2c486e09d6D1D53De27a2CED2bDD17B34", + "constructorArgs": [ + "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "32000000000000000000", + "2048000000000000000000" + ] + } + }, + "stakingVaultBeacon": { + "contract": "@openzeppelin/contracts-v5.2/proxy/beacon/UpgradeableBeacon.sol", + "address": "0xF02a43985ab5011af94F6d4dAd454C5E305A3e42", + "constructorArgs": ["0x389De1637Fb8866A9741Cb46Aa10bF6a38646D9a", "0x8943545177806ED17B9F23F0a21ee5948eCaa776"] + }, + "stakingVaultFactory": { + "contract": "contracts/0.8.25/vaults/VaultFactory.sol", + "address": "0x0cA0DC8810361829363AB17196A747fA1387D202", + "constructorArgs": [ + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "0xF02a43985ab5011af94F6d4dAd454C5E305A3e42", + "0xc5092f6c1f30C8970dc835F0F754057f79b89CC6", + "0x0000000000000000000000000000000000000000" + ] + }, + "stakingVaultImplementation": { + "contract": "contracts/0.8.25/vaults/StakingVault.sol", + "address": "0x389De1637Fb8866A9741Cb46Aa10bF6a38646D9a", + "constructorArgs": ["0x00000000219ab540356cBB839Cbe05303d7705Fa"] + }, + "tokenRebaseNotifier": { + "contract": "contracts/0.8.9/TokenRateNotifier.sol", + "address": "0x4b0Ead5b3CB32D243176c73dA6be0dECe837C36F", + "constructorArgs": ["0x105031FEEa0162de118C5EdEEd626E08BD55B256", "0x4004e893E863ECA264ffA895566F0587B5F53b80"] + }, + "topUpGateway": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x6fBC704F5a3B7Cc7C1Cf6991483508EdeddBf169", + "constructorArgs": [ + "0x8aD9e7419e0954606f955F8A8d5864a244F2e432", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "0x45ff4c80000000000000000000000000dd427698b14932dd5b88822ce9a9d98c35ccdd5800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000012c000000000000000000000000000000000000000000000000000001dc8bce8380000000000000000000000000000000000000000000000000000000003b9aca00" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/TopUpGateway.sol", + "address": "0x8aD9e7419e0954606f955F8A8d5864a244F2e432", + "constructorArgs": [ + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "0x0000000000000000000000000000000000000000000000000096000000000028", + "0x0000000000000000000000000000000000000000000000000096000000000028", + 0, + 32 + ] + } + }, + "triggerableWithdrawalsGateway": { + "deployParameters": { + "maxExitRequestsLimit": 13000, + "exitsPerFrame": 1, + "frameDurationInSec": 48 + }, + "contract": "contracts/0.8.9/TriggerableWithdrawalsGateway.sol", + "address": "0x2af486b3C64D73B03A01Ee8aBD5A94287a5BFD49", + "constructorArgs": [ + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + 13000, + 1, + 48 + ] + }, + "upgradeTemplate": { + "contract": "contracts/upgrade/UpgradeTemplate.sol", + "address": "0x4869a340FEe68f625bb16bbeBa5aE56cb9eBCac0", + "constructorArgs": [ + { + "locator": "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "agent": "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "voting": "0xff396Fe634ca149492999280F4967683C5Ab4680", + "dualGovernance": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "resealManager": "0x0000000000000000000000000000000000000111", + "easyTrack": "0x88E0e2E7C06D24E3746035eFA183bBD83F0373DE", + "circuitBreaker": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "newFactories": { + "UpdateStakingModuleShareLimits": "0x7ed96eEfB57E1e4C6d64D19818F3A78664318829", + "AllowConsolidationPair": "0xD6aC302915790074c7291c3bD64ddD5aeC5A7708", + "AllowedMerkleGatesRegistryForCSM": "0xF7740F7a819825152383f12182d1F0cFdB1e19c7", + "SetMerkleGateTreeForCSM": "0xF417BBcc9682B6d5B8312e8F8588Cd6489C5B85F", + "ReportWithdrawalsForSlashedValidatorsForCSM": "0x8418692bf1390F1a9f2E3996475612514F746a77", + "SettleGeneralDelayedPenaltyForCSM": "0xF3865826309f55cD3E104a22Ab9d04724d2D9020", + "AllowedMerkleGatesRegistryForCM": "0x92420b96A6ce5B4F00849A944e0489c58B9759F6", + "SetMerkleGateTreeForCM": "0x9BC48d538C9d8dd7a559f4460CCb2327fF39C1f4", + "ReportWithdrawalsForSlashedValidatorsForCM": "0x612d23ba265ad1fDc62B2e3A08722668E1D0ECa3", + "SettleGeneralDelayedPenaltyForCM": "0xd51c0d3C9BF00FF5690f901BBB8E85e976374BAE", + "CreateOrUpdateOperatorGroupForCM": "0x29D272Db50946716466cf00bC7d4bc435d8Ea44E" + }, + "oldFactories": { + "CSMSettleElStealingPenalty": "0x00000000000000000000000000000000000eee07", + "CSMSetVettedGateTree": "0x00000000000000000000000000000000000eee16" + }, + "coreUpgrade": { + "oldLocatorImpl": "0xF8140B6286DbED6be8448eed7ef81E69E4b5EAd4", + "oldLidoImpl": "0x80741a37E3644612F0465145C9709a90B6D77Ee3", + "oldAccountingImpl": "0x6B3342821680031732Bc7d4E88A6528478aF9E38", + "oldAccountingOracleImpl": "0xc6F76D133052002abdBAda02ba35dB8b7414FcAa", + "oldStakingRouterImpl": "0x6fDA176cb71b4f2b85c17E398b58803797f721e4", + "oldWithdrawalVaultImpl": "0x97b20dE03D78Db78fB59C9c92769ECabb183d1bA", + "oldValidatorsExitBusOracleImpl": "0x8A28E51A50CB8b532457486443a4ab56000D0811", + "newLocatorImpl": "0xc50f6b8AcBc6C5852A44371ef31b52122F43c349", + "newLidoImpl": "0xa923c38662D9a0120629156a36c5029bA273d43f", + "newAccountingImpl": "0xbEDe43672Ef2A96F7426F91C1F4F6D4618C3E688", + "newAccountingOracleImpl": "0x8603eb738b381BCcbC264c4a4568650D99aec1e3", + "newStakingRouterImpl": "0x0A26f3D2c486e09d6D1D53De27a2CED2bDD17B34", + "newWithdrawalVaultImpl": "0x325cC427128Ae28Cd547D5D112cabD65EE79719A", + "newValidatorsExitBusOracleImpl": "0x0dbbEDA8D62db7bdB8929da9f8db3318b0299067", + "consolidationBusImpl": "0x6BDfDD18391507eFc8534752328b8cbAbeBfc480", + "consolidationMigratorImpl": "0xA08Ff9f3262A0E42e241cBDeBFBED1D3d601A12a", + "topUpGatewayImpl": "0x8aD9e7419e0954606f955F8A8d5864a244F2e432", + "topUpGateway": "0x6fBC704F5a3B7Cc7C1Cf6991483508EdeddBf169", + "topUpGatewayDepositor": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "twMaxExitRequestsLimit": 250, + "twExitsPerFrame": 1, + "twFrameDurationInSec": 240, + "aoConsensusVersion": 6, + "veboMaxValidatorsPerReport": 600, + "veboMaxExitBalanceEth": 416000, + "veboBalancePerFrameEth": 32, + "veboFrameDurationInSec": 48, + "veboConsensusVersion": 5, + "consolidationBus": "0xAd9cfEB98bD1D4769C45Bd2Ec551197096e26e64", + "consolidationMigrator": "0x36dD4b2B46a27A33b1caAe819F53E4E7a0cE785b", + "curatedModuleCommittee": "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "lidoDepositsReserveTarget": "1000000000000000000000" + }, + "csmUpgrade": { + "csmProxy": "0x531C17C4E2Eef4cE6aA72CCA0aacf41FD06bcA84", + "csmImpl": "0xc8249cbe40477bAFfB7c3944A54e8Bcc04442E61", + "vettedGateProxy": "0xd04E46D444cD12eD1136Db229d9eB7357b0D4685", + "parametersRegistryImpl": "0x21a846DB6B1fde38874Da0175195ed257bBee656", + "feeOracleImpl": "0x5Ab1E1A3F85EA5b350ff524bC6a2E74d212E09D8", + "feeOracleConsensusVersion": 4, + "vettedGateImpl": "0x2FbCe6cF3DA01Fc80A67491574e04396379d8C21", + "accountingImpl": "0xE50288Ddfe4bCDfC9c47CAE59C55A525921Cf6f8", + "feeDistributorImpl": "0xCf7f0285Df627aFDcFAA3f8FcAa9EC4fDC892815", + "exitPenaltiesImpl": "0x386C57D258383052D2ee23F01081ecd3b0bD8cd7", + "strikesImpl": "0x948c0D6d635010314e010DcEB86C72Bfc10b69bB", + "oldPermissionlessGate": "0x5676715F8537DCbC5a13003Bf1bb7EF6a3196dB9", + "verifier": "0x9De74784543D1428Fa73AAFF05B3E334769c7C50", + "verifierV3": "0x50d6EDfE6669e91dcab40503E5Aa1f49A6933F19", + "permissionlessGate": "0xD29438F352dF79e948B9F79f273e3Fda528C5C06", + "ejector": "0xE9D7a318F531AB07abb7E5560aa55E23365a9515", + "gateSeal": "0xE25583099BA105D9ec0A67f5Ae86D90e50036425", + "identifiedCommunityStakersGateManager": "0xE25583099BA105D9ec0A67f5Ae86D90e50036425", + "generalDelayedPenaltyReporter": "0xE25583099BA105D9ec0A67f5Ae86D90e50036425", + "penaltiesManager": "0xE25583099BA105D9ec0A67f5Ae86D90e50036425" + }, + "curatedModule": { + "module": "0x1c5A849640229610800c9E27F84039fCb6912f23", + "moduleName": "curated-onchain-v1", + "stakeShareLimit": 2000, + "priorityExitShareThreshold": 2500, + "stakingModuleFee": 800, + "treasuryFee": 200, + "maxDepositsPerBlock": 30, + "minDepositBlockDistance": 25, + "hashConsensusInitialEpoch": 2694 + } + }, + 1780272000 + ] + }, + "upgradeTemporaryAdmin": { + "contract": "contracts/upgrade/UpgradeTemporaryAdmin.sol", + "address": "0xDD427698B14932DD5b88822cE9a9D98c35CcdD58", + "constructorArgs": ["0x105031FEEa0162de118C5EdEEd626E08BD55B256"] + }, + "upgradeVoteScript": { + "contract": "contracts/upgrade/UpgradeVoteScript.sol", + "address": "0xDD51dfACb742c5f97104328DeCEeceB7c560dF2c", + "constructorArgs": [ + ["0x4869a340FEe68f625bb16bbeBa5aE56cb9eBCac0", "0x2a30F5aC03187674553024296bed35Aa49749DDa", 0, 86400] + ] + }, + "validatorConsolidationRequests": { + "contract": "contracts/0.8.25/vaults/ValidatorConsolidationRequests.sol", + "address": "0xa5CD4eC37DC16d03C7EDC706449aaAAA6d592DD5", + "constructorArgs": ["0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9"], + "validatorConsolidationRequests": "0xa5CD4eC37DC16d03C7EDC706449aaAAA6d592DD5" + }, + "validatorExitDelayVerifier": { + "deployParameters": { + "gIFirstValidatorPrev": "0x0000000000000000000000000000000000000000000000000096000000000028", + "gIFirstValidatorCurr": "0x0000000000000000000000000000000000000000000000000096000000000028", + "gIFirstHistoricalSummaryPrev": "0x000000000000000000000000000000000000000000000000000000b600000018", + "gIFirstHistoricalSummaryCurr": "0x000000000000000000000000000000000000000000000000000000b600000018", + "gIFirstBlockRootInSummaryPrev": "0x000000000000000000000000000000000000000000000000000000000040000d", + "gIFirstBlockRootInSummaryCurr": "0x000000000000000000000000000000000000000000000000000000000040000d", + "firstSupportedSlot": 11649024, + "pivotSlot": 11649024, + "capellaSlot": 6209536, + "slotsPerHistoricalRoot": 8192, + "shardCommitteePeriodInSeconds": 98304 + }, + "contract": "contracts/0.8.25/ValidatorExitDelayVerifier.sol", + "address": "0xA2C4Ef228de6BA701660e75Cb06f1c9b29E53069", + "constructorArgs": [ + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + { + "gIFirstValidatorPrev": "0x0000000000000000000000000000000000000000000000000096000000000028", + "gIFirstValidatorCurr": "0x0000000000000000000000000000000000000000000000000096000000000028", + "gIFirstHistoricalSummaryPrev": "0x000000000000000000000000000000000000000000000000000000b600000018", + "gIFirstHistoricalSummaryCurr": "0x000000000000000000000000000000000000000000000000000000b600000018", + "gIFirstBlockRootInSummaryPrev": "0x000000000000000000000000000000000000000000000000000000000040000d", + "gIFirstBlockRootInSummaryCurr": "0x000000000000000000000000000000000000000000000000000000000040000d" + }, + 11649024, + 11649024, + 6209536, + 8192, + 32, + 12, + 1775658511, + 98304 + ] + }, + "validatorsExitBusOracle": { + "deployParameters": { + "consensusVersion": 4, + "maxValidatorsPerRequest": 600, + "maxExitRequestsLimit": 13000, + "exitsPerFrame": 1, + "frameDurationInSec": 48 + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x05cE5c99898b2a4376381C3c39217833ED0E15a4", + "constructorArgs": [ + "0x8A28E51A50CB8b532457486443a4ab56000D0811", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol", + "address": "0x0dbbEDA8D62db7bdB8929da9f8db3318b0299067", + "constructorArgs": [12, 1775658511, "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9"] + } + }, + "vaultHub": { + "deployParameters": { + "maxRelativeShareLimitBP": 1000 + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0xb34CcfE453E4D31A1A7B5CdDd79020eA669A2a03", + "constructorArgs": [ + "0x3D7F46607b02EAd1Cd1d09A86B76885730d3De0F", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/vaults/VaultHub.sol", + "address": "0x3D7F46607b02EAd1Cd1d09A86B76885730d3De0F", + "constructorArgs": [ + "0x85cB33Fc344275709c0c194Bc7D1c5C32736C8B9", + "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "0x1912A7496314854fB890B1B88C0f1Ced653C1830", + 1000 + ] + } + }, + "vestingParams": { + "unvestedTokensAmount": "0", + "start": 0, + "cliff": 0, + "end": 0, + "revokable": false, + "holders": { + "0xe4dD9D749004872b68279Eda85306ada07CDB12a": "760000000000000000000000", + "0x51Af50A64Ec8A4F442A36Bd5dcEF1e86c127Bd51": "60000000000000000000000", + "0xaa6bfBCD634EE744CB8FE522b29ADD23124593D3": "60000000000000000000000", + "0xBA59A84C6440E8cccfdb5448877E26F1A431Fc8B": "60000000000000000000000", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776": "820000000000000000000000", + "0xE25583099BA105D9ec0A67f5Ae86D90e50036425": "820000000000000000000000", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256": "60000000000000000000000" + } + }, + "withdrawalQueueERC721": { + "deployParameters": { + "name": "Lido: stETH Withdrawal NFT", + "symbol": "unstETH", + "baseUri": null + }, + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x86A0679C7987B5BA9600affA994B78D0660088ff", + "constructorArgs": [ + "0xaDe68b4b6410aDB1578896dcFba75283477b6b01", + "0x8943545177806ED17B9F23F0a21ee5948eCaa776", + "0x" + ] + }, + "implementation": { + "contract": "contracts/0.8.9/WithdrawalQueueERC721.sol", + "address": "0xaDe68b4b6410aDB1578896dcFba75283477b6b01", + "constructorArgs": ["0xeB804d271b58d9405CD94e294504E56d55B6E35c", "Lido: stETH Withdrawal NFT", "unstETH"] + } + }, + "withdrawalVault": { + "proxy": { + "contract": "contracts/0.8.9/proxy/WithdrawalsManagerProxy.sol", + "address": "0xbFaC9e95F250952630Eef4ef62E602d0D37844fe", + "constructorArgs": ["0x8943545177806ED17B9F23F0a21ee5948eCaa776", "0x2694cfcc47aAbC5C042dea104b6ac37e9924a4B1"] + }, + "address": "0xbFaC9e95F250952630Eef4ef62E602d0D37844fe", + "implementation": { + "contract": "contracts/0.8.9/WithdrawalVault.sol", + "address": "0x325cC427128Ae28Cd547D5D112cabD65EE79719A", + "constructorArgs": [ + "0x8101cbB371bCFF480552B6B1CF847dF606c4AE46", + "0x105031FEEa0162de118C5EdEEd626E08BD55B256", + "0x2af486b3C64D73B03A01Ee8aBD5A94287a5BFD49", + "0x91abAc209511A6b73A0b4722DC9a227a54944e7C", + "0x00000961Ef480Eb55e80D19ad83579A64c007002", + "0x0000BBdDc7CE488642fb579F8B00f3a590007251" + ] + } + }, + "wstETH": { + "contract": "contracts/0.6.12/WstETH.sol", + "address": "0xeB804d271b58d9405CD94e294504E56d55B6E35c", + "constructorArgs": ["0x8101cbB371bCFF480552B6B1CF847dF606c4AE46"] + } +} diff --git a/deployed-hoodi.json b/deployed-hoodi.json index 277701a074..6ad023fe6c 100644 --- a/deployed-hoodi.json +++ b/deployed-hoodi.json @@ -11,7 +11,7 @@ }, "implementation": { "contract": "contracts/0.8.9/Accounting.sol", - "address": "0xd7eb46d18a07F78ed07201E1C7F7A4933967da6D", + "address": "0xDB47544d5813f15116bf95c1cF2ff4dEdb2226fD", "constructorArgs": ["0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", "0x3508A952176b3c15387C97BE809eaffB1982176a"] } }, @@ -30,7 +30,7 @@ }, "implementation": { "contract": "contracts/0.8.9/oracle/AccountingOracle.sol", - "address": "0x6D799F4C92e8eE9CC0E33367Dd47990ed49a21AC", + "address": "0x41bF10F28A1312f2241f86A2537A04b08e343C0a", "constructorArgs": ["0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", 12, 1742213400] } }, @@ -140,7 +140,7 @@ "app:lido": { "implementation": { "contract": "contracts/0.4.24/Lido.sol", - "address": "0x4f9143Dba1f1BbFa535528254592f3396E229e53", + "address": "0x6147270470A9Ee5b55c33EA71e32000E5d6D8E6B", "constructorArgs": [] }, "aragonApp": { @@ -308,6 +308,11 @@ "0x7e74a86b6e146964fb965db04dc2590516da77f720bb6759337bf5632415fd86" ] }, + "beaconChainDepositor": { + "contract": "contracts/0.8.25/lib/BeaconChainDepositor.sol", + "address": "0x29e702f163d4Ca47a0813e09BDfdAb960fb1B90b", + "constructorArgs": [] + }, "burner": { "proxy": { "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", @@ -336,6 +341,60 @@ "genesisTime": 1742213400, "depositContractAddress": "0x00000000219ab540356cBB839Cbe05303d7705Fa" }, + "circuitBreaker": { + "address": "0x44a5789dFeDa59cD176Ab5709ec2F4829dE4d555" + }, + "consolidationBus": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0xe09fBcE63826468867eE66Eda491E444829E022A", + "constructorArgs": [ + "0x27Ff16a465c1A00a727dd3Dbd479c5F3De275a1f", + "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", + "0x4ec81af1000000000000000000000000c676167aaea6de6af3e1ed34c0595de449e0de7b000000000000000000000000000000000000000000000000000000000000015e000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000015180" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/consolidation/ConsolidationBus.sol", + "address": "0x27Ff16a465c1A00a727dd3Dbd479c5F3De275a1f", + "constructorArgs": ["0xC9991Bb865d025364BCbBd99f36e85889Fb68e69"] + } + }, + "consolidationGateway": { + "contract": "contracts/0.8.25/consolidation/ConsolidationGateway.sol", + "address": "0xC9991Bb865d025364BCbBd99f36e85889Fb68e69", + "constructorArgs": [ + "0xC676167aAea6de6Af3e1ED34C0595de449E0de7b", + "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", + 2900, + 1, + 30, + "0x0000000000000000000000000000000000000000000000000096000000000028", + "0x0000000000000000000000000000000000000000000000000096000000000028", + 0 + ] + }, + "consolidationMigrator": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x747d357F5b6410B80Eb63406FaC5E2A91131B4f8", + "constructorArgs": [ + "0x2A8585201BFD6830944f0bf008B774e7e32b380d", + "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", + "0xc4d66de8000000000000000000000000c676167aaea6de6af3e1ed34c0595de449e0de7b" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/consolidation/ConsolidationMigrator.sol", + "address": "0x2A8585201BFD6830944f0bf008B774e7e32b380d", + "constructorArgs": [ + "0xCc820558B39ee15C7C45B59390B503b83fb499A8", + "0xe09fBcE63826468867eE66Eda491E444829E022A", + 1, + 5 + ] + } + }, "createAppReposTx": "0xda42173c8a2bd75956437d03e275d32d583650cf4673693716d20c72ac94c137", "daoAragonId": "lido-dao", "daoFactory": { @@ -392,7 +451,7 @@ "usePredefinedAddressInstead": null }, "contract": "contracts/0.8.9/DepositSecurityModule.sol", - "address": "0x2F0303F20E0795E6CCd17BD5efE791A586f28E03", + "address": "0x1a629bB7C0563650e46406Eb6764A2ba207a0eFE", "constructorArgs": [ "0x3508A952176b3c15387C97BE809eaffB1982176a", "0x00000000219ab540356cBB839Cbe05303d7705Fa", @@ -416,6 +475,9 @@ "address": "0x3Ff49B57A7cc523c26567AF97F51C09f572A200A", "constructorArgs": [] }, + "easyTrack": { + "address": "0x284D91a7D47850d21A6DEaaC6E538AC7E5E6fc2a" + }, "easyTrackEVMScriptExecutor": { "address": "0x79a20FD0FA36453B2F45eAbab19bfef43575Ba9E" }, @@ -455,9 +517,6 @@ "address": "0x9b108015fe433F173696Af3Aa0CF7CDb3E104258", "constructorArgs": ["0x3508A952176b3c15387C97BE809eaffB1982176a", "0x0534aA41907c9631fae990960bCC72d75fA7cfeD"] }, - "circuitBreaker": { - "address": "0x44a5789dFeDa59cD176Ab5709ec2F4829dE4d555" - }, "gateSeal": { "address": "0x73d76Bd3D589B2b2185c402da82cdAfbc18b958D", "factoryAddress": "0xA402349F560D45310D301E92B1AA4DeCABe147B3", @@ -568,14 +627,14 @@ }, "implementation": { "contract": "contracts/0.8.9/LidoLocator.sol", - "address": "0x751A4AA1A29Bc0C0E587aa04c3EABF0797F9B1A4", + "address": "0x2C33BE7c09bfBC8e41E7648d611d857fD4831b68", "constructorArgs": [ { "accountingOracle": "0xcb883B1bD0a41512b42D2dB267F2A2cd919FB216", - "depositSecurityModule": "0x2F0303F20E0795E6CCd17BD5efE791A586f28E03", + "depositSecurityModule": "0x1a629bB7C0563650e46406Eb6764A2ba207a0eFE", "elRewardsVault": "0x9b108015fe433F173696Af3Aa0CF7CDb3E104258", "lido": "0x3508A952176b3c15387C97BE809eaffB1982176a", - "oracleReportSanityChecker": "0x53417BA942bC86492bAF46FAbA8769f246422388", + "oracleReportSanityChecker": "0x049A972e9cBEfFFc1c2543dFD0Fa892C2E9Ed6C5", "postTokenRebaseReceiver": "0x9c53d0075eA00ad77dDAd1b71E67bb97AaBC1e3D", "burner": "0xb2c99cd38a2636a6281a849C8de938B3eF4A7C3D", "stakingRouter": "0xCc820558B39ee15C7C45B59390B503b83fb499A8", @@ -586,13 +645,15 @@ "oracleDaemonConfig": "0x2a833402e3F46fFC1ecAb3598c599147a78731a9", "validatorExitDelayVerifier": "0xa5F5A9360275390fF9728262a29384399f38d2f0", "triggerableWithdrawalsGateway": "0x6679090D92b08a2a686eF8614feECD8cDFE209db", + "consolidationGateway": "0xC9991Bb865d025364BCbBd99f36e85889Fb68e69", "accounting": "0x9b5b78D1C9A3238bF24662067e34c57c83E8c354", "predepositGuarantee": "0xa5F55f3402beA2B14AE15Dae1b6811457D43581d", "wstETH": "0x7E99eE3C66636DE415D2d7C880938F2f40f94De4", "vaultHub": "0x4C9fFC325392090F789255b9948Ab1659b797964", "vaultFactory": "0x7Ba269a03eeD86f2f54CB04CA3b4b7626636Df4E", "lazyOracle": "0xf41491C79C30e8f4862d3F4A5b790171adB8e04A", - "operatorGrid": "0x501e678182bB5dF3f733281521D3f3D1aDe69917" + "operatorGrid": "0x501e678182bB5dF3f733281521D3f3D1aDe69917", + "topUpGateway": "0x10DBEb3367876826d00D21718D1d893e0fbD2956" } ] } @@ -614,7 +675,7 @@ "lidoTemplateNewDaoTx": "0xdd5b8ac931c01359e0948b4c202cf4ebac0fe6c5d6628025d1e09a2012cf4329", "minFirstAllocationStrategy": { "contract": "contracts/common/lib/MinFirstAllocationStrategy.sol", - "address": "0x6d1a9bBFF97f7565e9532FEB7b499982848E5e07", + "address": "0x8E6FDB231D7CE30C2459319c0d4c4Eb4B681f9C9", "constructorArgs": [] }, "miniMeTokenFactory": { @@ -677,13 +738,29 @@ "clBalanceOraclesErrorUpperBPLimit": 50 }, "contract": "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol", - "address": "0x53417BA942bC86492bAF46FAbA8769f246422388", + "address": "0x049A972e9cBEfFFc1c2543dFD0Fa892C2E9Ed6C5", "constructorArgs": [ "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", - "0x6D799F4C92e8eE9CC0E33367Dd47990ed49a21AC", "0x9b5b78D1C9A3238bF24662067e34c57c83E8c354", "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", - ["9000", "43200", "1000", "50", "600", "8", "24", "128", "750000", "1000", "101", "50"] + { + "exitedEthAmountPerDayLimit": 57600, + "appearedEthAmountPerDayLimit": 57600, + "annualBalanceIncreaseBPLimit": "1000", + "simulatedShareRateDeviationBPLimit": "50", + "maxBalanceExitRequestedPerReportInEth": 19200, + "maxEffectiveBalanceWeightWCType01": 32, + "maxEffectiveBalanceWeightWCType02": 2048, + "maxItemsPerExtraDataTransaction": "8", + "maxNodeOperatorsPerExtraDataItem": "24", + "requestTimestampMargin": "128", + "maxPositiveTokenRebase": "750000", + "maxCLBalanceDecreaseBP": 360, + "clBalanceOraclesErrorUpperBPLimit": "50", + "consolidationEthAmountPerDayLimit": 93375, + "exitedValidatorEthAmountLimit": 32, + "externalPendingBalanceCapEth": 300 + } ] }, "pinnedBeaconProxy": { @@ -712,13 +789,21 @@ ] } }, - "scratchDeployGasUsed": "57329468", + "resealManager": { + "address": "0x05172CbCDb7307228F781436b327679e4DAE166B" + }, + "scratchDeployGasUsed": "124435084", "simpleDvt": { "deployParameters": { "stakingModuleTypeId": "curated-onchain-v1", "stuckPenaltyDelay": 172800 } }, + "srLib": { + "contract": "contracts/0.8.25/sr/SRLib.sol", + "address": "0xA43DeC6250D4B59C345c8515569983E3e24d6990", + "constructorArgs": [] + }, "stakingRouter": { "proxy": { "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", @@ -730,9 +815,15 @@ ] }, "implementation": { - "contract": "contracts/0.8.9/StakingRouter.sol", - "address": "0xd5F04A81ac472B2cB32073CE9dDABa6FaF022827", - "constructorArgs": ["0x00000000219ab540356cBB839Cbe05303d7705Fa"] + "contract": "contracts/0.8.25/sr/StakingRouter.sol", + "address": "0x44d0b2B95d2C2bDF73FE4f5cD7E3A930494E5B1f", + "constructorArgs": [ + "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "0x3508A952176b3c15387C97BE809eaffB1982176a", + "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", + "32000000000000000000", + "2048000000000000000000" + ] } }, "stakingVaultBeacon": { @@ -762,6 +853,28 @@ "constructorArgs": ["0x0534aA41907c9631fae990960bCC72d75fA7cfeD", "0x9b5b78D1C9A3238bF24662067e34c57c83E8c354"] } }, + "topUpGateway": { + "proxy": { + "contract": "contracts/0.8.9/proxy/OssifiableProxy.sol", + "address": "0x10DBEb3367876826d00D21718D1d893e0fbD2956", + "constructorArgs": [ + "0xFd1b63657dda65C4E6fDEF9d1f37064D078e9B49", + "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", + "0x45ff4c800000000000000000000000009e0a534371e3ea5651a26725008cd4507b2e0804000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000258000000000000000000000000000000000000000000000000000001dc8bce83800000000000000000000000000000000000000000000000000000000077359400" + ] + }, + "implementation": { + "contract": "contracts/0.8.25/TopUpGateway.sol", + "address": "0xFd1b63657dda65C4E6fDEF9d1f37064D078e9B49", + "constructorArgs": [ + "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", + "0x0000000000000000000000000000000000000000000000000096000000000028", + "0x0000000000000000000000000000000000000000000000000096000000000028", + 0, + 32 + ] + } + }, "triggerableWithdrawalsGateway": { "contract": "contracts/0.8.9/TriggerableWithdrawalsGateway.sol", "address": "0x6679090D92b08a2a686eF8614feECD8cDFE209db", @@ -773,6 +886,129 @@ 48 ] }, + "upgradeTemplate": { + "contract": "contracts/upgrade/UpgradeTemplate.sol", + "address": "0x256c4eece96b79584A705D8dbFBf29cC876b41b6", + "constructorArgs": [ + { + "locator": "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8", + "agent": "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", + "voting": "0x49B3512c44891bef83F8967d075121Bd1b07a01B", + "dualGovernance": "0x9CAaCCc62c66d817CC59c44780D1b722359795bF", + "resealManager": "0x05172CbCDb7307228F781436b327679e4DAE166B", + "easyTrack": "0x284D91a7D47850d21A6DEaaC6E538AC7E5E6fc2a", + "circuitBreaker": "0x44a5789dFeDa59cD176Ab5709ec2F4829dE4d555", + "newFactories": { + "UpdateStakingModuleShareLimits": "0xD63cf25df1bA6144db27A81A98120Dfc53dE4540", + "AllowConsolidationPair": "0x22D36e7616F541A527989C5652fDA4d527bB461C", + "SetMerkleGateTreeForCSM": "0xf71fcB20B9FB8468653Bcb24E31F39bc069D5995", + "ReportWithdrawalsForSlashedValidatorsForCSM": "0x4EaB04775837A6F0218750A10454119f349258FE", + "SettleGeneralDelayedPenaltyForCSM": "0xd0c38B2F0C1F760976dA010C1c35D828331Ff9E2", + "SetMerkleGateTreeForCM": "0x5194cC02B6F477B4a23DFA422fFC238c8B5b1736", + "ReportWithdrawalsForSlashedValidatorsForCM": "0x6E40FED7c28bAA93a798cA10f8A93965a19eC52e", + "SettleGeneralDelayedPenaltyForCM": "0x3486B872768D361309e405A046C4BF995c21CC6c", + "CreateOrUpdateOperatorGroupForCM": "0x44D9b39bBdc2182Aa1af6f16f8F55E0eA038294d" + }, + "oldFactories": { + "CSMSettleElStealingPenalty": "0x5c0af5b9f96921d3F61503e1006CF0ab9867279E", + "CSMSetVettedGateTree": "0xa890fc73e1b771Ee6073e2402E631c312FF92Cd9" + }, + "coreUpgrade": { + "oldLocatorImpl": "0x751A4AA1A29Bc0C0E587aa04c3EABF0797F9B1A4", + "oldLidoImpl": "0x4f9143Dba1f1BbFa535528254592f3396E229e53", + "oldAccountingImpl": "0xd7eb46d18a07F78ed07201E1C7F7A4933967da6D", + "oldAccountingOracleImpl": "0x6D799F4C92e8eE9CC0E33367Dd47990ed49a21AC", + "oldStakingRouterImpl": "0xd5F04A81ac472B2cB32073CE9dDABa6FaF022827", + "oldWithdrawalVaultImpl": "0xfe7A58960Af333eAdeAeC39149F9d6A71dc3E668", + "oldValidatorsExitBusOracleImpl": "0x7E6d9C9C44417bf2EaF69685981646e9752D623A", + "newLocatorImpl": "0x2C33BE7c09bfBC8e41E7648d611d857fD4831b68", + "newLidoImpl": "0x6147270470A9Ee5b55c33EA71e32000E5d6D8E6B", + "newAccountingImpl": "0xDB47544d5813f15116bf95c1cF2ff4dEdb2226fD", + "newAccountingOracleImpl": "0x41bF10F28A1312f2241f86A2537A04b08e343C0a", + "newStakingRouterImpl": "0x44d0b2B95d2C2bDF73FE4f5cD7E3A930494E5B1f", + "newWithdrawalVaultImpl": "0xB97e67CC20bd2970E30341c0ECc7497d8A5b7342", + "newValidatorsExitBusOracleImpl": "0x86aeA211B30174b3ee5d294ECeaDbD7f1C575eF3", + "consolidationBusImpl": "0x27Ff16a465c1A00a727dd3Dbd479c5F3De275a1f", + "consolidationMigratorImpl": "0x2A8585201BFD6830944f0bf008B774e7e32b380d", + "topUpGatewayImpl": "0xFd1b63657dda65C4E6fDEF9d1f37064D078e9B49", + "topUpGateway": "0x10DBEb3367876826d00D21718D1d893e0fbD2956", + "topUpGatewayDepositor": "0x9b186cE78Ddd6fF098b4a533Dd17a139e1FFeD76", + "twMaxExitRequestsLimit": 250, + "twExitsPerFrame": 1, + "twFrameDurationInSec": 240, + "aoConsensusVersion": 6, + "veboMaxValidatorsPerReport": 600, + "veboMaxExitBalanceEth": 358400, + "veboBalancePerFrameEth": 32, + "veboFrameDurationInSec": 48, + "veboConsensusVersion": 5, + "consolidationBus": "0xe09fBcE63826468867eE66Eda491E444829E022A", + "consolidationMigrator": "0x747d357F5b6410B80Eb63406FaC5E2A91131B4f8", + "curatedModuleCommittee": "0x84DffcfB232594975C608DE92544Ff239a24c9E9", + "consolidationGatewayPauser": "0x83BCE68B4e8b7071b2a664a26e6D3Bc17eEe3102", + "lidoDepositsReserveTarget": "1000000000000000000000" + }, + "csmUpgrade": { + "csmProxy": "0x79CEf36D84743222f37765204Bec41E92a93E59d", + "csmImpl": "0x161b1DAa658fD0D78a4603860edd8Ed06f98F4cA", + "vettedGateProxy": "0x10a254E724fe2b7f305F76f3F116a3969c53845f", + "identifiedDVTClusterGate": "0x887F8512F9998045f4b5993e6eaa6BCfE5F02A94", + "identifiedDVTClusterCurveSetup": "0x75CC99052fB05eEb4D9f80Ba94A5D077e3a721C1", + "identifiedDVTClusterBondCurveId": 4, + "parametersRegistryImpl": "0x58376D8B192813E85532b25685D948EB49c2A8B5", + "feeOracleImpl": "0x27d1Ff0353AF6b7480CBc902169d0F89b49334B5", + "feeOracleConsensusVersion": 4, + "vettedGateImpl": "0x3b834c6d043F4CE5C61d84723bA737D405B2e276", + "accountingImpl": "0x3a18675fFB2C37A4296dD794A7Ed94644225F881", + "feeDistributorImpl": "0x74c5be19CcD1a264899FbCf8dB1a64C1e3fb73Ac", + "exitPenaltiesImpl": "0xf38A3DA25B417D83182EEDD30d00557d78c35C96", + "strikesImpl": "0x47F96DCD5cf3e94492CD050c00C9F6e33b3ca677", + "oldPermissionlessGate": "0x5553077102322689876A6AdFd48D75014c28acfb", + "oldVerifier": "0x1773b2Ff99A030F6000554Cb8A5Ec93145650cbA", + "newVerifier": "0xC96406b0eADdAC5708aFCa04DcCA67BAdC9642Fd", + "newPermissionlessGate": "0xd7bD8D2A9888D1414c770B35ACF55890B15de26a", + "ejector": "0xCAe028378d69D54dc8bF809e6C44CF751F997b80", + "csmCommittee": "0x4AF43Ee34a6fcD1fEcA1e1F832124C763561dA53" + }, + "curatedModule": { + "module": "0x87EB69Ae51317405FD285efD2326a4a11f6173b9", + "curatedGates": [ + "0xF1862d120831eBE31f7202378Ff3Ae63A5658ae3", + "0x410A309dF81B782190188CDB3d215729cc6bC1f3", + "0xa5A604b172787e017b1b118F02fE54fC1D696519", + "0xE966874cDB6A4282ED75Cd10439e3799e5531a2D", + "0x5c063da03e3f21443716D75a2205EE16706e1153", + "0x1cD655Ac53CfE8269DE0DBfc0140B074623C4A6B", + "0x28518be9894C20135F280a9539617783b08a04c7" + ], + "verifier": "0x209190Ebc2Be80367a15d05e626784Eb94d6A880", + "circuitBreakerPauser": "0x84DffcfB232594975C608DE92544Ff239a24c9E9", + "moduleName": "curated-onchain-v2", + "stakeShareLimit": 10000, + "priorityExitShareThreshold": 10000, + "stakingModuleFee": 400, + "treasuryFee": 600, + "maxDepositsPerBlock": 150, + "minDepositBlockDistance": 25, + "feeOracleConsensusVersion": 4, + "hashConsensusInitialEpoch": 93833 + } + }, + 1780272000 + ] + }, + "upgradeTemporaryAdmin": { + "contract": "contracts/upgrade/UpgradeTemporaryAdmin.sol", + "address": "0x9E0A534371e3eA5651A26725008CD4507B2E0804", + "constructorArgs": ["0x0534aA41907c9631fae990960bCC72d75fA7cfeD"] + }, + "upgradeVoteScript": { + "contract": "contracts/upgrade/UpgradeVoteScript.sol", + "address": "0xaC83987948dB29c54b91B9a3Bd7a5cA99fA7F1D1", + "constructorArgs": [ + ["0x256c4eece96b79584A705D8dbFBf29cC876b41b6", "0xB26Fd3b50280AbC55c572EE73071778A51088408", 0, 86399] + ] + }, "v3Template": { "contract": "contracts/upgrade/V3Template.sol", "address": "0xd253b0ca059343e70474e685Beb2974F10CCFa67", @@ -857,7 +1093,7 @@ }, "implementation": { "contract": "contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol", - "address": "0x7E6d9C9C44417bf2EaF69685981646e9752D623A", + "address": "0x86aeA211B30174b3ee5d294ECeaDbD7f1C575eF3", "constructorArgs": [12, 1742213400, "0xe2EF9536DAAAEBFf5b1c130957AB3E80056b06D8"] } }, @@ -920,11 +1156,14 @@ "withdrawalVault": { "implementation": { "contract": "contracts/0.8.9/WithdrawalVault.sol", - "address": "0xfe7A58960Af333eAdeAeC39149F9d6A71dc3E668", + "address": "0xB97e67CC20bd2970E30341c0ECc7497d8A5b7342", "constructorArgs": [ "0x3508A952176b3c15387C97BE809eaffB1982176a", "0x0534aA41907c9631fae990960bCC72d75fA7cfeD", - "0x6679090D92b08a2a686eF8614feECD8cDFE209db" + "0x6679090D92b08a2a686eF8614feECD8cDFE209db", + "0xce93710b849e0dC202AaC513837e05bEA9D7DdFD", + "0x00000961Ef480Eb55e80D19ad83579A64c007002", + "0x0000BBdDc7CE488642fb579F8B00f3a590007251" ] }, "proxy": { diff --git a/deployed-mainnet.json b/deployed-mainnet.json index 704fd04e55..3f3c1a07dd 100644 --- a/deployed-mainnet.json +++ b/deployed-mainnet.json @@ -256,6 +256,20 @@ "secondsPerSlot": 12, "genesisTime": 1606824023 }, + "circuitBreaker": { + "address": "0x00000000000000000000000000000000000000cb", + "note": "TODO: replace with real address upon deployment", + "constructorArgs": { + "admin": "0x3e40D73EB977Dc6a537aF587D48316feE66E9C8c", + "initialHeartbeatInterval": 31536000, + "initialPauseDuration": 3600, + "maxHeartbeatInterval": 94608000, + "maxPauseDuration": 2592000, + "minHeartbeatInterval": 60, + "minPauseDuration": 60 + } + }, + "createAppReposTx": "0xf48cb21c6be021dd18bd8e02ce89ac7b924245b859f0a8b7c47e88a39016ed41", "daoAragonId": "lido-dao", "daoFactoryAddress": "0x7378ad1ba8f3c8e64bbb2a04473edd35846360f1", diff --git a/docs/scratch-deploy.md b/docs/scratch-deploy.md index 677edfbce5..54d824098f 100644 --- a/docs/scratch-deploy.md +++ b/docs/scratch-deploy.md @@ -132,8 +132,6 @@ To do a testnet deployment, the following parameters must be set up via env vari `0x10000910` for Hoodi. Used to properly calculate the deposit domain for the network. - `GAS_PRIORITY_FEE`. Gas priority fee. By default set to `2` - `GAS_MAX_FEE`. Gas max fee. By default set to `100` -- `GATE_SEAL_FACTORY`. Address of the [GateSeal Factory](https://github.com/lidofinance/gate-seals) contract. Must be - deployed in advance. Can be set to any `0x0000000000000000000000000000000000000000` to debug deployment - `WITHDRAWAL_QUEUE_BASE_URI`. BaseURI for WithdrawalQueueERC721. By default not set (left an empty string) - `DSM_PREDEFINED_ADDRESS`. Address to use instead of deploying `DepositSecurityModule` or `null` otherwise. If used, the deposits can be made by calling `Lido.deposit` from the address. diff --git a/eslint.config.mjs b/eslint.config.mjs index 1a01c59af5..c87f6f5a1c 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -17,7 +17,7 @@ const gitignorePath = path.resolve(__dirname, ".gitignore"); export default [ includeIgnoreFile(gitignorePath), { - ignores: [".solcover.js", "eslint.config.mjs"], + ignores: [".solcover.js", "eslint.config.mjs", "**/archive/**"], }, js.configs.recommended, ...ts.configs.recommended, @@ -66,4 +66,3 @@ export default [ }, ]; - diff --git a/foundry.lock b/foundry.lock new file mode 100644 index 0000000000..fa8a276a48 --- /dev/null +++ b/foundry.lock @@ -0,0 +1,5 @@ +{ + "foundry/lib/forge-std": { + "rev": "662ae0d6936654c5d1fb79fc15f521de28edb60e" + } +} \ No newline at end of file diff --git a/foundry.toml b/foundry.toml index f379f00057..bbc8491b3b 100644 --- a/foundry.toml +++ b/foundry.toml @@ -22,6 +22,8 @@ match_path = '**/test/**/*.t.sol' # Enable latest EVM features evm_version = "prague" +optimizer = true +optimizer_runs = 200 # https://book.getfoundry.sh/reference/config/testing#fuzz # fuzz = { runs = 256 } @@ -33,15 +35,25 @@ evm_version = "prague" fmt = { int_types = 'long' } -# add via_ir profile +# profiles required by compilation_restrictions additional_compiler_profiles = [ - { name = "v3", version = "0.8.25", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, - { name = "vaultHub", version = "0.8.25", optimizer = true, optimizer_runs = 100, via_ir = true, evm_version = "cancun" }, + { name = "solc0424", optimizer = true, optimizer_runs = 200, evm_version = "constantinople" }, + { name = "solc06x_089", optimizer = true, optimizer_runs = 200, evm_version = "istanbul" }, + { name = "solc0825", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { name = "v3", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { name = "vaultHub", optimizer = true, optimizer_runs = 100, via_ir = true, evm_version = "cancun" }, ] -# enforce compiling 0.8.25 contract with via_ir +# align compiler settings with hardhat.config.ts compilation_restrictions = [ - { paths = "contracts/0.8.25/**", optimizer_runs = 200, via_ir = true }, - { paths = "contracts/0.8.25/vaults/VaultHub.sol", optimizer_runs = 100, via_ir = true }, - { paths = "contracts/upgrade/**", optimizer_runs = 200, via_ir = true }, + { paths = "contracts/0.4.24/**", optimizer_runs = 200, evm_version = "constantinople" }, + { paths = "contracts/0.6.11/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.6.12/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.8.9/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.8.25/**", optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { paths = "contracts/upgrade/**", optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + # NB: Foundry cannot safely mirror Hardhat's strict VaultHub runs=100 override + # together with a runs=200 rule on the same import graph, so we keep a merge-safe + # range here for compatibility. + { paths = "contracts/0.8.25/vaults/VaultHub.sol", min_optimizer_runs = 100, max_optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, ] diff --git a/foundry/lib/forge-std b/foundry/lib/forge-std index 8f24d6b04c..ffa2ee0d92 160000 --- a/foundry/lib/forge-std +++ b/foundry/lib/forge-std @@ -1 +1 @@ -Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa +Subproject commit ffa2ee0d921b4163b7abd0f1122df93ead205805 diff --git a/globals.d.ts b/globals.d.ts index f9c0e9de92..047d5d049d 100644 --- a/globals.d.ts +++ b/globals.d.ts @@ -10,6 +10,9 @@ declare namespace NodeJS { /* Test execution mode: 'scratch' for fresh network, 'fork' for forked network */ MODE?: "scratch" | "forking"; // default: "scratch" + /* Extends MODE=forking, if set, the tests will run in upgrade mode */ + UPGRADE?: boolean; + /* Block number to fork from. If not set, the fork will start from the latest block. */ FORKING_BLOCK_NUMBER?: number; // default: undefined @@ -94,8 +97,13 @@ declare namespace NodeJS { LOCAL_DEVNET_EXPLORER_API_URL?: string; LOCAL_DEVNET_EXPLORER_URL?: string; - /* scratch deploy environment variables */ + /* scratch & upgrade deploy environment variables */ NETWORK_STATE_FILE?: string; + /* migration steps file */ + STEPS_FILE?: string; + ALLOW_SKIP_STEPS?: "true" | "false" | "1" | "0"; + /* auto-confirm tx's params review prompts */ + AUTO_CONFIRM?: "true" | "false" | "1" | "0"; /* hardhat plugins options */ SKIP_CONTRACT_SIZE?: boolean; diff --git a/hardhat.config.ts b/hardhat.config.ts index 8237b57020..5b20e37b2a 100644 --- a/hardhat.config.ts +++ b/hardhat.config.ts @@ -48,6 +48,13 @@ const config: HardhatUserConfig = { }, forking: getHardhatForkingConfig(), hardfork: "prague", + chains: { + 32382: { + hardforkHistory: { + prague: 0, + }, + }, + }, mining: { mempool: { order: "fifo", @@ -61,10 +68,13 @@ const config: HardhatUserConfig = { // local nodes "local": { url: process.env.LOCAL_RPC_URL || RPC_URL, + timeout: 20 * 60 * 1000, // 20 minutes }, "local-devnet": { url: process.env.LOCAL_RPC_URL || RPC_URL, + timeout: 20 * 60 * 1000, // 20 minutes accounts: [process.env.LOCAL_DEVNET_PK || ZERO_PK], + chainId: parseInt(process.env.LOCAL_DEVNET_CHAIN_ID || "32382", 10), }, // testnets "sepolia": { @@ -147,7 +157,9 @@ const config: HardhatUserConfig = { }, }, ], - apiKey: process.env.LOCAL_DEVNET_EXPLORER_API_URL ? "local-devnet" : process.env.ETHERSCAN_API_KEY || "", + apiKey: process.env.LOCAL_DEVNET_EXPLORER_API_URL + ? { "local-devnet": "local-devnet" } + : process.env.ETHERSCAN_API_KEY || "", }, solidity: { compilers: [ diff --git a/lib/account.ts b/lib/account.ts index ee953f375c..624bb2510f 100644 --- a/lib/account.ts +++ b/lib/account.ts @@ -8,6 +8,27 @@ import { randomAddress } from "./address"; import { getNetworkName } from "./network"; import { ether } from "./units"; +export async function getSignerOrImpersonate( + address: string | Addressable, + balance: bigint = ether("100"), +): Promise { + if (typeof address !== "string") { + address = await address.getAddress(); + } + + const signers = await ethers.getSigners(); + const signer = signers.find((item) => item.address.toLowerCase() === address.toLowerCase()); + if (signer) { + return signer; + } + + try { + return await impersonate(address, balance); + } catch { + throw new Error(`Can't get a signer or impersonation for ${address}.`); + } +} + export async function impersonate(address: string | Addressable, balance?: bigint): Promise { if (typeof address !== "string") { address = await address.getAddress(); diff --git a/lib/config-schemas.ts b/lib/config-schemas.ts index 3c33c968bb..30b56b7b38 100644 --- a/lib/config-schemas.ts +++ b/lib/config-schemas.ts @@ -72,6 +72,11 @@ const BurnerSchema = z.object({ totalNonCoverSharesBurnt: BigIntStringSchema.optional(), }); +const WithdrawalVaultSchema = z.object({ + withdrawalRequestContract: EthereumAddressSchema, + consolidationRequestContract: EthereumAddressSchema, +}); + // Triggerable withdrawals gateway schema (used in scratch configs) const TriggerableWithdrawalsGatewaySchema = z.object({ maxExitRequestsLimit: PositiveIntSchema, @@ -79,18 +84,74 @@ const TriggerableWithdrawalsGatewaySchema = z.object({ frameDurationInSec: PositiveIntSchema, }); +// Consolidation gateway schema +const ConsolidationGatewaySchema = z.object({ + maxConsolidationRequestsLimit: PositiveIntSchema, + consolidationsPerFrame: PositiveIntSchema, + frameDurationInSec: PositiveIntSchema, + gIFirstValidatorPrev: HexStringSchema, + gIFirstValidatorCurr: HexStringSchema, + pivotSlot: NonNegativeIntSchema, + pauser: EthereumAddressSchema.optional(), +}); + +const ConsolidationBusSchema = z.object({ + initialBatchSize: PositiveIntSchema, + initialMaxGroupsInBatch: PositiveIntSchema, + initialExecutionDelay: NonNegativeIntSchema, +}); + +const ConsolidationMigratorSchema = z.object({ + sourceModuleId: PositiveIntSchema, + targetModuleId: PositiveIntSchema, + committee: EthereumAddressSchema.optional(), +}); + +// Top-up gateway schema +const TopUpGatewaySchema = z.object({ + maxValidatorsPerTopUp: PositiveIntSchema, + minBlockDistance: PositiveIntSchema, + maxRootAge: PositiveIntSchema, + targetBalanceGwei: PositiveIntSchema, + minTopUpGwei: PositiveIntSchema, + gIFirstValidatorPrev: HexStringSchema, + gIFirstValidatorCurr: HexStringSchema, + pivotSlot: NonNegativeIntSchema, + depositor: EthereumAddressSchema.optional(), +}); + +const StakingRouterSchema = z.object({ + maxEBType1: BigIntStringSchema, + maxEBType2: BigIntStringSchema, +}); + // Easy track schema const EasyTrackSchema = z.object({ - VaultsAdapter: EthereumAddressSchema, + trustedCaller: EthereumAddressSchema.optional(), newFactories: z.object({ - AlterTiersInOperatorGrid: EthereumAddressSchema, - RegisterGroupsInOperatorGrid: EthereumAddressSchema, - RegisterTiersInOperatorGrid: EthereumAddressSchema, - SetJailStatusInOperatorGrid: EthereumAddressSchema, - SocializeBadDebtInVaultHub: EthereumAddressSchema, - ForceValidatorExitsInVaultHub: EthereumAddressSchema, - UpdateGroupsShareLimitInOperatorGrid: EthereumAddressSchema, - UpdateVaultsFeesInOperatorGrid: EthereumAddressSchema, + // v3 + // AlterTiersInOperatorGrid: EthereumAddressSchema, + // RegisterGroupsInOperatorGrid: EthereumAddressSchema, + // RegisterTiersInOperatorGrid: EthereumAddressSchema, + // SetJailStatusInOperatorGrid: EthereumAddressSchema, + // SocializeBadDebtInVaultHub: EthereumAddressSchema, + // ForceValidatorExitsInVaultHub: EthereumAddressSchema, + // UpdateGroupsShareLimitInOperatorGrid: EthereumAddressSchema, + // UpdateVaultsFeesInOperatorGrid: EthereumAddressSchema, + // v4 + UpdateStakingModuleShareLimits: EthereumAddressSchema, + AllowConsolidationPair: EthereumAddressSchema, + SetMerkleGateTreeForCSM: EthereumAddressSchema, + ReportWithdrawalsForSlashedValidatorsForCSM: EthereumAddressSchema, + SettleGeneralDelayedPenaltyForCSM: EthereumAddressSchema, + SetMerkleGateTreeForCM: EthereumAddressSchema, + ReportWithdrawalsForSlashedValidatorsForCM: EthereumAddressSchema, + SettleGeneralDelayedPenaltyForCM: EthereumAddressSchema, + CreateOrUpdateOperatorGroupForCM: EthereumAddressSchema, + }), + oldFactories: z.object({ + CSMSettleElStealingPenalty: EthereumAddressSchema, + CSMSetVettedGateTree: EthereumAddressSchema, }), }); @@ -100,13 +161,13 @@ const OracleVersionsSchema = z.object({ }); // V3 vote script params -const V3VoteScriptSchema = z.object({ - expiryTimestamp: NonNegativeIntSchema, - initialMaxExternalRatioBP: BasisPointsSchema, - timeConstraintsContract: EthereumAddressSchema, - odcSlashingReserveWeRightShiftEpochs: NonNegativeIntSchema, - odcSlashingReserveWeLeftShiftEpochs: NonNegativeIntSchema, -}); +// const V3VoteScriptSchema = z.object({ +// expiryTimestamp: NonNegativeIntSchema, +// initialMaxExternalRatioBP: BasisPointsSchema, +// timeConstraintsContract: EthereumAddressSchema, +// odcSlashingReserveWeRightShiftEpochs: NonNegativeIntSchema, +// odcSlashingReserveWeLeftShiftEpochs: NonNegativeIntSchema, +// }); // Aragon app versions schema const AragonAppVersionsSchema = z.object({ @@ -114,34 +175,60 @@ const AragonAppVersionsSchema = z.object({ sdvt_version: z.array(z.number()).length(3), }); -// Upgrade parameters schema -export const UpgradeParametersSchema = z.object({ - chainSpec: ChainSpecSchema.extend({ - genesisTime: z.number().int(), - depositContract: EthereumAddressSchema, - }), - gateSealForVaults: z.object({ - sealDuration: PositiveIntSchema, - sealingCommittee: EthereumAddressSchema, - }), - easyTrack: EasyTrackSchema, - vaultHub: VaultHubSchema, - lazyOracle: LazyOracleSchema, - predepositGuarantee: PredepositGuaranteeSchema.extend({ - genesisForkVersion: HexStringSchema, - }), - operatorGrid: OperatorGridSchema, - burner: BurnerSchema, - oracleVersions: OracleVersionsSchema.optional(), - aragonAppVersions: AragonAppVersionsSchema.optional(), - v3VoteScript: V3VoteScriptSchema, +const CSMUpgradeConfigSchema = z.object({ + csmProxy: EthereumAddressSchema, + csmImpl: EthereumAddressSchema, + vettedGateProxy: EthereumAddressSchema, + identifiedDVTClusterGate: EthereumAddressSchema, + identifiedDVTClusterCurveSetup: EthereumAddressSchema, + identifiedDVTClusterBondCurveId: NonNegativeIntSchema, + parametersRegistryImpl: EthereumAddressSchema, + feeOracleImpl: EthereumAddressSchema, + feeOracleConsensusVersion: NonNegativeIntSchema, + vettedGateImpl: EthereumAddressSchema, + accountingImpl: EthereumAddressSchema, + feeDistributorImpl: EthereumAddressSchema, + exitPenaltiesImpl: EthereumAddressSchema, + strikesImpl: EthereumAddressSchema, + oldPermissionlessGate: EthereumAddressSchema, + oldVerifier: EthereumAddressSchema, + newVerifier: EthereumAddressSchema, + newPermissionlessGate: EthereumAddressSchema, + ejector: EthereumAddressSchema, + csmCommittee: EthereumAddressSchema, +}); + +const CuratedModuleConfigSchema = z.object({ + module: EthereumAddressSchema, + curatedGates: z.array(EthereumAddressSchema), + verifier: EthereumAddressSchema, + circuitBreakerPauser: EthereumAddressSchema, + moduleName: z.string().min(1), + stakeShareLimit: NonNegativeIntSchema, + priorityExitShareThreshold: NonNegativeIntSchema, + stakingModuleFee: NonNegativeIntSchema, + treasuryFee: NonNegativeIntSchema, + maxDepositsPerBlock: NonNegativeIntSchema, + minDepositBlockDistance: NonNegativeIntSchema, + feeOracleConsensusVersion: NonNegativeIntSchema, + hashConsensusInitialEpoch: NonNegativeIntSchema, +}); + +const UpgradeVoteScriptSchema = z.object({ + expiryTimestamp: NonNegativeIntSchema, + timeConstraintsContract: EthereumAddressSchema, + enabledDaySpanStart: NonNegativeIntSchema, + enabledDaySpanEnd: NonNegativeIntSchema, }); -// Gate seal schema (for scratch deployment) -const GateSealSchema = z.object({ - sealDuration: PositiveIntSchema, - expiryTimestamp: PositiveIntSchema, - sealingCommittee: z.array(EthereumAddressSchema), +// CircuitBreaker schema (for scratch deployment) +const CircuitBreakerSchema = z.object({ + minPauseDuration: PositiveIntSchema, + maxPauseDuration: PositiveIntSchema, + minHeartbeatInterval: PositiveIntSchema, + maxHeartbeatInterval: PositiveIntSchema, + initialPauseDuration: PositiveIntSchema, + initialHeartbeatInterval: PositiveIntSchema, }); // DAO schema @@ -188,10 +275,11 @@ const OracleSchema = z.object({ }); const ValidatorsExitBusOracleSchema = OracleSchema.extend({ - maxValidatorsPerRequest: PositiveIntSchema, - maxExitRequestsLimit: PositiveIntSchema, - exitsPerFrame: PositiveIntSchema, + maxValidatorsPerReport: PositiveIntSchema, + maxExitBalanceEth: PositiveIntSchema, + balancePerFrameEth: PositiveIntSchema, frameDurationInSec: PositiveIntSchema, + consensusVersion: PositiveIntSchema, }); // Deposit security module schema @@ -203,19 +291,22 @@ const DepositSecurityModuleSchema = z.object({ // Oracle report sanity checker schema const OracleReportSanityCheckerSchema = z.object({ - exitedValidatorsPerDayLimit: PositiveIntSchema, - appearedValidatorsPerDayLimit: PositiveIntSchema, - deprecatedOneOffCLBalanceDecreaseBPLimit: BasisPointsSchema, + exitedEthAmountPerDayLimit: PositiveIntSchema, + appearedEthAmountPerDayLimit: PositiveIntSchema, annualBalanceIncreaseBPLimit: BasisPointsSchema, simulatedShareRateDeviationBPLimit: BasisPointsSchema, - maxValidatorExitRequestsPerReport: PositiveIntSchema, + maxBalanceExitRequestedPerReportInEth: PositiveIntSchema, + maxEffectiveBalanceWeightWCType01: PositiveIntSchema, + maxEffectiveBalanceWeightWCType02: PositiveIntSchema, maxItemsPerExtraDataTransaction: PositiveIntSchema, maxNodeOperatorsPerExtraDataItem: PositiveIntSchema, requestTimestampMargin: PositiveIntSchema, maxPositiveTokenRebase: PositiveIntSchema, - initialSlashingAmountPWei: PositiveIntSchema, - inactivityPenaltiesAmountPWei: PositiveIntSchema, + maxCLBalanceDecreaseBP: BasisPointsSchema, clBalanceOraclesErrorUpperBPLimit: BasisPointsSchema, + consolidationEthAmountPerDayLimit: NonNegativeIntSchema, + exitedValidatorEthAmountLimit: PositiveIntSchema, + externalPendingBalanceCapEth: NonNegativeIntSchema, }); // Oracle daemon config schema @@ -251,11 +342,16 @@ const LidoApmSchema = z.object({ ensRegDurationSec: PositiveIntSchema, }); +const LidoSchema = z.object({ + lidoDepositsReserveTarget: BigIntStringSchema, +}); + // Scratch parameters schema export const ScratchParametersSchema = z.object({ chainSpec: ChainSpecSchema.omit({ genesisTime: true, depositContract: true }), - gateSeal: GateSealSchema, + circuitBreaker: CircuitBreakerSchema, lidoApm: LidoApmSchema, + lido: LidoSchema.optional(), dao: DaoSchema, vesting: VestingSchema, burner: BurnerSchema.extend({ @@ -278,8 +374,47 @@ export const ScratchParametersSchema = z.object({ withdrawalQueueERC721: WithdrawalQueueERC721Schema, validatorExitDelayVerifier: ValidatorExitDelayVerifierSchema, triggerableWithdrawalsGateway: TriggerableWithdrawalsGatewaySchema, + consolidationGateway: ConsolidationGatewaySchema, + consolidationBus: ConsolidationBusSchema, + consolidationMigrator: ConsolidationMigratorSchema, predepositGuarantee: PredepositGuaranteeSchema.omit({ genesisForkVersion: true }), operatorGrid: OperatorGridSchema, + topUpGateway: TopUpGatewaySchema, + stakingRouter: StakingRouterSchema, +}); + +// Upgrade parameters schema +export const UpgradeParametersSchema = z.object({ + lido: LidoSchema, + easyTrack: EasyTrackSchema, + depositSecurityModule: DepositSecurityModuleSchema, + oracleReportSanityChecker: OracleReportSanityCheckerSchema, + consolidationGateway: ConsolidationGatewaySchema, + consolidationBus: ConsolidationBusSchema, + consolidationMigrator: ConsolidationMigratorSchema, + + topUpGateway: TopUpGatewaySchema, + stakingRouter: StakingRouterSchema, + withdrawalVault: WithdrawalVaultSchema, + triggerableWithdrawalsGateway: TriggerableWithdrawalsGatewaySchema, + accountingOracle: OracleSchema, + validatorsExitBusOracle: ValidatorsExitBusOracleSchema, + + // csm + csmUpgrade: CSMUpgradeConfigSchema, + curatedModule: CuratedModuleConfigSchema, + + upgradeVoteScript: UpgradeVoteScriptSchema, + + // old and optional + vaultHub: VaultHubSchema.optional(), + chainSpec: ChainSpecSchema.extend({ + genesisTime: z.number().int(), + depositContract: EthereumAddressSchema, + }).optional(), + burner: BurnerSchema.optional(), + oracleVersions: OracleVersionsSchema.optional(), + aragonAppVersions: AragonAppVersionsSchema.optional(), }); // Inferred types from zod schemas diff --git a/lib/console.ts b/lib/console.ts new file mode 100644 index 0000000000..d2e55a738e --- /dev/null +++ b/lib/console.ts @@ -0,0 +1,94 @@ +import readline from "node:readline"; + +import { artifacts, ethers, network } from "hardhat"; + +import { bl, ConvertibleToString, cy, gr, gy, log, or, rd, toBool, yg, yl } from "lib"; + +export async function confirm(question: string): Promise { + const AUTO_CONFIRM = toBool(process.env.AUTO_CONFIRM); + if (AUTO_CONFIRM) { + log.warning(" •", rd(`Auto-confirming!`)); + return; + } + + const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); + return new Promise((resolve, reject) => { + rl.question(question, (answer) => { + rl.close(); + if (answer.trim().toLowerCase() === "yes") { + resolve(); + } else { + reject(new Error(`Aborted by user (got "${answer.trim()}")`)); + } + }); + }); +} + +export async function logScriptHeader(title: string, deployer?: string) { + const { chainId } = await ethers.provider.getNetwork(); + + log.splitter(); + log.header(title); + log.splitter(); + + log.info("Network", { + "name": yl(network.name), + "chain ID": yl(chainId.toString()), + }); + + if (deployer) { + const deployerBalance = await ethers.provider.getBalance(deployer); + log.info("Deployer", { + address: bl(deployer), + balance: `${gr(ethers.formatEther(deployerBalance))} ETH`, + }); + } +} + +export function logStartReview(msg?: string) { + log.emptyLine(); + log.splitter(); + log.warning(" •", rd(msg || `Start review here ${or("↓↓↓")}`)); + log.splitter(); + log.emptyLine(); +} + +export async function logConfirmReview(msg?: string) { + log.splitter(); + log.warning(" •", rd(msg || `Please review ${or("↑↑↑")} and confirm!`)); + log.splitter(); + await confirm(`Type ${gr("yes")} to confirm and start deployment: `); + log.splitter(); + log.emptyLine(); +} + +export async function buildArgRecords( + contract: string, + args: readonly ConvertibleToString[], + method: string = "constructor", +) { + if (args.length === 0) return { [`${method} args`]: args }; + + const constructorAbi = (await artifacts.readArtifact(contract)).abi.find( + (entry) => entry.type === method || (entry.type === "function" && entry.name === method), + ); + const argNames = + constructorAbi?.inputs?.map((input: { name?: string }, index: number) => input.name || `arg${index}`) ?? []; + + return args.reduce>( + (r, a, i) => ((r[`${method} arg [${i}] ${or(argNames[i] || `arg${i}`)}`] = a), r), + {}, + ); +} + +export async function logArgs( + contract: string, + args: readonly ConvertibleToString[], + method: string = "constructor", + note: string = "new impl.", +) { + log.info(`${contract} ${note}`, { + [cy("call method")]: `${yg(contract)}::${gy(method)}`, + ...(await buildArgRecords(contract, args, method)), + }); +} diff --git a/lib/constants.ts b/lib/constants.ts index 2c5cd8723f..dbca56ac83 100644 --- a/lib/constants.ts +++ b/lib/constants.ts @@ -64,8 +64,27 @@ export const TOTAL_BASIS_POINTS = 100_00n; export const ABNORMALLY_HIGH_FEE_THRESHOLD_BP = 1_00n; export const MAX_FEE_BP = 65_535n; + export const MAX_RESERVE_RATIO_BP = 99_99n; export const LIMITER_PRECISION_BASE = 10n ** 9n; export const DISCONNECT_NOT_INITIATED = 2n ** 48n - 1n; + +export const WITHDRAWAL_CREDENTIALS_TYPE_01 = 0x01; +export const WITHDRAWAL_CREDENTIALS_TYPE_02 = 0x02; + +export const MAX_EFFECTIVE_BALANCE_WC_TYPE_01 = 32n * 10n ** 18n; // 32 ETH +export const MAX_EFFECTIVE_BALANCE_WC_TYPE_02 = 2048n * 10n ** 18n; // 2048 ETH + +export enum WithdrawalCredentialsType { + WC0x01 = WITHDRAWAL_CREDENTIALS_TYPE_01, + WC0x02 = WITHDRAWAL_CREDENTIALS_TYPE_02, +} + +export enum StakingModuleStatus { + Active = 0, + DepositsPaused = 1, + Stopped = 2, +} + export const MAX_SANE_SETTLED_GROWTH = MAX_INT104; diff --git a/lib/contract.ts b/lib/contract.ts index a52d4afc73..5907215735 100644 --- a/lib/contract.ts +++ b/lib/contract.ts @@ -3,6 +3,35 @@ import { artifacts, ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { NonPayableOverrides } from "typechain-types/common"; + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export type MethodArgs = C[M] extends (...args: any[]) => any ? Parameters : never; + +// constructor args +// example: const constructorArgs: ConstructorArgs; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type ContractWithConstructor = { deploy: (...args: any[]) => any }; +type DeployArgs = MethodArgs; +type RequiredDeployArgs = Required>; +export type ConstructorArgs = + RequiredDeployArgs extends [...infer Args, infer Last] + ? Last extends NonPayableOverrides & { from?: string } // check if `overrides?` are the last argument + ? Args + : DeployArgs + : DeployArgs; + +// initialize method args +// example: const initArgs: InitializeArgs = [param1, param2]; +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type ContractWithInitialize = { initialize: (...args: any[]) => any }; +export type InitializeArgs = MethodArgs; + +// finalizeUpgrade_xxx method args +// example: for finalizeUpgrade_v5() - const finArgs: FinalizeUpgradeArgs; +type ContractWithFinalizeUpgrade = Extract; +export type FinalizeUpgradeArgs = MethodArgs>; + interface LoadedContractHelper { name: string; contractPath: string; @@ -49,3 +78,18 @@ export async function getContractPath(contractName: string) { const artifact = await artifacts.readArtifact(contractName); return artifact.sourceName; } + +export async function encodeFunctionCall( + contractName: string, + method: string, + args: T, +) { + const artifact = await artifacts.readArtifact(contractName); + const contractInterface = new ethers.Interface(artifact.abi); + return contractInterface.encodeFunctionData(method, args); +} + +export async function isContractDeployed(address: string): Promise { + const code = await ethers.provider.getCode(address); + return code !== "0x"; +} diff --git a/lib/deploy.ts b/lib/deploy.ts index f7d5e41ddf..4a92f3acc9 100644 --- a/lib/deploy.ts +++ b/lib/deploy.ts @@ -120,11 +120,12 @@ export async function deployWithoutProxy( constructorArgs: ConvertibleToString[] = [], addressFieldName = "address", withStateFile = true, + signerOrOptions?: Signer | FactoryOptions, fields: Record = {}, ): Promise { logWithConstructorArgs(`Deploying: ${yl(artifactName)} (without proxy)`, constructorArgs); - const contract = await deployContract(artifactName, constructorArgs, deployer, withStateFile); + const contract = await deployContract(artifactName, constructorArgs, deployer, withStateFile, signerOrOptions); if (withStateFile) { const contractPath = await getContractPath(artifactName); @@ -257,14 +258,15 @@ async function getLocatorConfig(locatorAddress: string) { "oracleDaemonConfig", "validatorExitDelayVerifier", "triggerableWithdrawalsGateway", + "consolidationGateway", "accounting", - "wstETH", "predepositGuarantee", + "wstETH", "vaultHub", "vaultFactory", "lazyOracle", "operatorGrid", - "vaultFactory", + "topUpGateway", ]) as (keyof LidoLocator.ConfigStruct)[]; const config = await Promise.all(locatorKeys.map((name) => locator[name]())); diff --git a/lib/explorer.ts b/lib/explorer.ts index d2df191a94..757ad06da7 100644 --- a/lib/explorer.ts +++ b/lib/explorer.ts @@ -24,6 +24,10 @@ async function getBlockExplorer(): Promise { } export async function getTxLink(txHash: string): Promise { - const explorer = await getBlockExplorer(); - return explorer ? `${explorer.baseUrl}/tx/${txHash}` : null; + let baseUrl = process.env.BLOCK_EXPLORER_BASE_URL || null; + if (!baseUrl) { + const explorer = await getBlockExplorer(); + baseUrl = explorer?.baseUrl || null; + } + return baseUrl ? `${baseUrl}/tx/${txHash}` : null; } diff --git a/lib/index.ts b/lib/index.ts index 65bad93d60..3a771ff477 100644 --- a/lib/index.ts +++ b/lib/index.ts @@ -4,6 +4,7 @@ export * from "./bigint-math"; export * from "./bytes"; export * from "./constants"; export * from "./contract"; +export * from "./console"; export * from "./deploy"; export * from "./deposit"; export * from "./dsm"; @@ -26,3 +27,4 @@ export * from "./string"; export * from "./storage"; export * from "./time"; export * from "./units"; +export * from "./wc"; diff --git a/lib/log.ts b/lib/log.ts index a1ec2dc135..136b021d52 100644 --- a/lib/log.ts +++ b/lib/log.ts @@ -1,4 +1,5 @@ import chalk from "chalk"; +import { isAddress } from "ethers"; import path from "path"; import { getTxLink } from "./explorer"; @@ -10,12 +11,19 @@ BigInt.prototype.toJSON = function () { export type ConvertibleToString = string | number | boolean | { toString(): string }; -export const rd = (s: ConvertibleToString) => chalk.red(s); -export const yl = (s: ConvertibleToString) => chalk.yellow(s); -export const gr = (s: ConvertibleToString) => chalk.green(s); -export const bl = (s: ConvertibleToString) => chalk.blue(s); -export const cy = (s: ConvertibleToString) => chalk.cyan(s); -export const mg = (s: ConvertibleToString) => chalk.magenta(s); +export const rd = chalk.keyword("red"); // more intense than chalk.red +export const yl = chalk.yellow; +export const gr = chalk.green; +export const bl = chalk.keyword("dodgerblue"); //chalk.blue; +export const cy = chalk.cyan; +export const mg = chalk.keyword("violet"); // not so jarring +export const or = chalk.keyword("orange"); +export const br = chalk.keyword("brown"); +export const dp = chalk.keyword("deeppink"); +export const gy = chalk.keyword("greenyellow"); +export const yg = chalk.keyword("yellowgreen"); +export const nv = chalk.keyword("navy"); +export const bk = chalk.keyword("black"); export const log = (...args: ConvertibleToString[]) => { if (!shouldLog("info")) return; @@ -54,7 +62,7 @@ const _splitter = (minLength = LINE_LENGTH, ...args: ConvertibleToString[]) => { if (minLength < MIN_LINE_LENGTH) minLength = MIN_LINE_LENGTH; - console.error(cy(_line(0, minLength))); + console.error(bk(_line(0, minLength))); if (args.length) { console.error(...args); @@ -73,7 +81,7 @@ const _header = (minLength = 20, ...args: ConvertibleToString[]) => { const paddedTitle = title.padStart((totalLength + title.length) / 2).padEnd(totalLength); console.error(`${cy(line)}`); - console.error(`${cy("=")} ${mg(paddedTitle)} ${cy("=")}`); + console.error(`${cy("=")} ${dp(paddedTitle)} ${cy("=")}`); console.error(`${cy(line)}`); if (args.length > 1) { @@ -85,11 +93,58 @@ const _header = (minLength = 20, ...args: ConvertibleToString[]) => { const _title = (title: string) => { if (!shouldLog("debug")) return; - log(mg(title)); + log(br(title)); +}; + +const FORMAT_INDENT = 2; + +const _indent = (depth: number) => " ".repeat(depth * FORMAT_INDENT); + +const _formatRecordValue = (value: unknown, depth = 0, seen = new WeakSet()): string => { + if (value === null) return chalk.gray("null"); + if (value === undefined) return chalk.gray("undefined"); + + if (typeof value === "string") { + return isAddress(value) ? bl(value) : yl(value); + } + + if (typeof value === "number" || typeof value === "bigint") { + return gr(value.toString()); + } + + if (typeof value === "boolean") { + return mg(value.toString()); + } + + if (Array.isArray(value)) { + if (seen.has(value)) return chalk.gray("[Circular]"); + if (value.length === 0) return chalk.gray("[]"); + + seen.add(value); + const lines = value.map((item) => `${_indent(depth + 1)}${_formatRecordValue(item, depth + 1, seen)}`); + seen.delete(value); + return `[\n${lines.join(",\n")}\n${_indent(depth)}]`; + } + + if (typeof value === "object") { + if (seen.has(value)) return chalk.gray("{Circular}"); + const entries = Object.entries(value); + if (entries.length === 0) return chalk.gray("{}"); + + seen.add(value); + const lines = entries.map( + ([key, nested]) => `${_indent(depth + 1)}${or(key)}: ${_formatRecordValue(nested, depth + 1, seen)}`, + ); + seen.delete(value); + return `{\n${lines.join(",\n")}\n${_indent(depth)}}`; + } + + return yl(String(value)); }; const _record = (label: string, value: ConvertibleToString) => { - log(`${chalk.grey(label)}: ${yl(value.toString())}`); + const formattedValue = _formatRecordValue(value, 2); + log(`${nv(label)}: ${formattedValue}`); }; // TODO: add logging to file @@ -141,12 +196,12 @@ log.withArguments = (firstLine: string, args: ConvertibleToString[]) => { } if (args.length === 1) { - log(`${mg(JSON.stringify(args[0]))})`); + log(`${or(JSON.stringify(args[0]))})`); return; } log.emptyLine(); - args.forEach((arg) => log(` ${mg(JSON.stringify(arg))},`)); + args.forEach((arg) => log(` ${or(JSON.stringify(arg))},`)); log(`)`); }; @@ -166,6 +221,14 @@ log.scriptFinish = (filename: string) => { log.emptyLine(); }; +log.scriptSkip = (filename: string) => { + if (!shouldLog("info")) return; + + log.splitter(); + log.warning(`Skipped script: ${bl(path.basename(filename))}`); + log.emptyLine(); +}; + log.done = (message: string) => { if (!shouldLog("info")) return; @@ -177,7 +240,7 @@ log.debug = (title: string, records: Record = {}) = if (!shouldLog("debug")) return; _title(title); - Object.keys(records).forEach((label) => _record(` ${label}`, records[label])); + Object.keys(records).forEach((label) => _record(`${_indent(1)}${label}`, records[label])); log.emptyLine(); }; @@ -185,7 +248,7 @@ log.info = (title: string, records: Record = {}) => if (!shouldLog("info")) return; _title(title); - Object.keys(records).forEach((label) => _record(` ${label}`, records[label])); + Object.keys(records).forEach((label) => _record(`${_indent(1)}${label}`, records[label])); log.emptyLine(); }; @@ -193,7 +256,7 @@ log.txLink = async (txHash: string) => { const link = await getTxLink(txHash); if (link) { log.info("🔗 Transaction", { - Link: link, + Link: chalk.blue.underline(link), }); } }; diff --git a/lib/oracle.ts b/lib/oracle.ts index 7677a0002b..6d55319106 100644 --- a/lib/oracle.ts +++ b/lib/oracle.ts @@ -35,10 +35,12 @@ export const EXTRA_DATA_TYPE_EXITED_VALIDATORS = 2n; export const DEFAULT_REPORT_FIELDS: OracleReport = { consensusVersion: 1n, refSlot: 0n, - numValidators: 0n, - clBalanceGwei: 0n, + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 0n, stakingModuleIdsWithNewlyExitedValidators: [], numExitedValidatorsByStakingModule: [], + stakingModuleIdsWithUpdatedBalance: [], + validatorBalancesGweiByStakingModule: [], withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, @@ -56,10 +58,12 @@ export function getReportDataItems(r: OracleReport) { return [ r.consensusVersion, r.refSlot, - r.numValidators, - r.clBalanceGwei, + r.clValidatorsBalanceGwei, + r.clPendingBalanceGwei, r.stakingModuleIdsWithNewlyExitedValidators, r.numExitedValidatorsByStakingModule, + r.stakingModuleIdsWithUpdatedBalance, + r.validatorBalancesGweiByStakingModule, r.withdrawalVaultBalance, r.elRewardsVaultBalance, r.sharesRequestedToBurn, @@ -77,7 +81,7 @@ export function getReportDataItems(r: OracleReport) { export function calcReportDataHash(reportItems: ReportAsArray) { const data = ethers.AbiCoder.defaultAbiCoder().encode( [ - "(uint256, uint256, uint256, uint256, uint256[], uint256[], uint256, uint256, uint256, uint256[], uint256, bool, bytes32, string, uint256, bytes32, uint256)", + "(uint256, uint256, uint256, uint256, uint256[], uint256[], uint256[], uint256[], uint256, uint256, uint256, uint256[], uint256, bool, bytes32, string, uint256, bytes32, uint256)", ], [reportItems], ); diff --git a/lib/protocol/context.ts b/lib/protocol/context.ts index 6308994bfa..bad6b8f3b8 100644 --- a/lib/protocol/context.ts +++ b/lib/protocol/context.ts @@ -18,6 +18,10 @@ export const withCSM = () => { return process.env.INTEGRATION_WITH_CSM !== "off"; }; +export const withCMv2 = () => { + return process.env.INTEGRATION_WITH_CMv2 !== "off"; +}; + export const ensureVaultsShareLimit = async (ctx: ProtocolContext) => { const { operatorGrid } = ctx.contracts; if (!operatorGrid) return; @@ -61,20 +65,23 @@ export const getProtocolContext = async (skipV3Contracts: boolean = false): Prom await deployUpgrade(hre.network.name, process.env.STEPS_FILE!); } - const { contracts, signers } = await discover(skipV3Contracts); + const { contracts, signers, modules } = await discover(skipV3Contracts); const interfaces = Object.values(contracts).map((contract) => contract.interface); // By default, all flags are "on" const flags = { withCSM: withCSM(), + withCMv2: withCMv2(), } as ProtocolContextFlags; log.debug("Protocol context flags", { "With CSM": flags.withCSM, + "With CMv2": flags.withCMv2, }); const context = { contracts, + modules, signers, interfaces, flags, diff --git a/lib/protocol/discover.ts b/lib/protocol/discover.ts index 3580876608..dba9d31a35 100644 --- a/lib/protocol/discover.ts +++ b/lib/protocol/discover.ts @@ -23,6 +23,7 @@ import { ProtocolContracts, ProtocolSigners, StakingModuleContracts, + StakingModules, VaultsContracts, WstETHContracts, } from "./types"; @@ -123,6 +124,12 @@ const getCoreContracts = async ( "TriggerableWithdrawalsGateway", config.get("triggerableWithdrawalsGateway") || (await locator.triggerableWithdrawalsGateway()), ), + consolidationGateway: loadContract( + "ConsolidationGateway", + config.get("consolidationGateway") || (await locator.consolidationGateway()), + ), + consolidationBus: loadContract("ConsolidationBus", config.get("consolidationBus")), + consolidationMigrator: loadContract("ConsolidationMigrator", config.get("consolidationMigrator")), accounting: loadContract("Accounting", config.get("accounting") || (await locator.accounting())), }), })) as CoreContracts; @@ -158,7 +165,15 @@ const getStakingModules = async (stakingRouter: LoadedContract, c promises.csm = loadContract("IStakingModule", config.get("csm") || csm.stakingModuleAddress); } - return (await batch(promises)) as StakingModuleContracts; + const cmv2 = modules.find((m) => m.name === "curated-onchain-v2"); + if (cmv2) { + promises.cmv2 = loadContract("IStakingModule", config.get("cmv2") || cmv2.stakingModuleAddress); + } + + return { + contracts: (await batch(promises)) as StakingModuleContracts, + modules: { nor, sdvt, csm, cmv2 } as StakingModules, + }; }; /** @@ -214,11 +229,15 @@ export async function discover(skipV3Contracts: boolean) { const locator = await loadContract("LidoLocator", networkConfig.get("locator")); const foundationContracts = await getCoreContracts(locator, networkConfig, skipV3Contracts); + const { contracts: modulesContracts, modules } = await getStakingModules( + foundationContracts.stakingRouter, + networkConfig, + ); const contracts = { locator, ...foundationContracts, ...(await getAragonContracts(foundationContracts.lido, networkConfig)), - ...(await getStakingModules(foundationContracts.stakingRouter, networkConfig)), + ...modulesContracts, ...(await getHashConsensusContract(foundationContracts.accountingOracle, networkConfig)), ...(await getWstEthContract(foundationContracts.withdrawalQueue, networkConfig)), ...(skipV3Contracts ? {} : await getVaultsContracts(networkConfig, locator)), @@ -246,6 +265,9 @@ export async function discover(skipV3Contracts: boolean) { "Burner": foundationContracts.burner.address, "wstETH": contracts.wstETH.address, "Triggered Withdrawal Gateway": contracts.triggerableWithdrawalsGateway?.address, + "Consolidation Gateway": contracts.consolidationGateway?.address, + "Consolidation Bus": contracts.consolidationBus?.address, + "Consolidation Migrator": contracts.consolidationMigrator?.address, // Vaults "Staking Vault Factory": contracts.stakingVaultFactory?.address, "Staking Vault Beacon": contracts.stakingVaultBeacon?.address, @@ -264,5 +286,5 @@ export async function discover(skipV3Contracts: boolean) { log.debug("Signers discovered", signers); - return { contracts, signers }; + return { contracts, signers, modules }; } diff --git a/lib/protocol/helpers/accounting.ts b/lib/protocol/helpers/accounting.ts index 0e34ab8309..20caaabfe7 100644 --- a/lib/protocol/helpers/accounting.ts +++ b/lib/protocol/helpers/accounting.ts @@ -5,11 +5,10 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { AccountingOracle } from "typechain-types"; -import { ReportValuesStruct } from "typechain-types/contracts/0.8.9/Accounting"; +import { ReportValuesStruct } from "typechain-types/contracts/0.8.9/Accounting.sol/Accounting"; import { advanceChainTime, - BigIntMath, certainAddress, ether, EXTRA_DATA_FORMAT_EMPTY, @@ -20,13 +19,17 @@ import { log, ONE_GWEI, prepareExtraData, + toGwei, } from "lib"; import { ProtocolContext } from "../types"; +import { buildModuleAccountingReportParams } from "./staking"; + export type OracleReportParams = { clDiff?: bigint; clAppearedValidators?: bigint; + clPendingBalanceGwei?: bigint; elRewardsVaultBalance?: bigint | null; withdrawalVaultBalance?: bigint | null; sharesRequestedToBurn?: bigint | null; @@ -43,6 +46,8 @@ export type OracleReportParams = { extraDataList?: Uint8Array; stakingModuleIdsWithNewlyExitedValidators?: bigint[]; numExitedValidatorsByStakingModule?: bigint[]; + stakingModuleIdsWithUpdatedBalance?: bigint[]; + validatorBalancesGweiByStakingModule?: bigint[]; reportElVault?: boolean; reportWithdrawalsVault?: boolean; reportBurner?: boolean; @@ -59,6 +64,45 @@ type OracleReportResults = { export const ZERO_HASH = new Uint8Array(32).fill(0); const ZERO_BYTES32 = "0x" + Buffer.from(ZERO_HASH).toString("hex"); const SHARE_RATE_PRECISION = 10n ** 27n; +const CL_BALANCE_DECREASE_WINDOW_RESET_SECONDS = 37n * 24n * 60n * 60n; + +export function adjustReportModuleBalances( + { + stakingModuleIdsWithUpdatedBalance = [], + validatorBalancesGweiByStakingModule = [], + }: { + stakingModuleIdsWithUpdatedBalance: bigint[]; + validatorBalancesGweiByStakingModule: bigint[]; + }, + clValidatorsBalanceGwei: bigint, +) { + const { lastIndex: lastNonZeroIndex, totalBalance: totalBalanceGwei } = validatorBalancesGweiByStakingModule.reduce<{ + lastIndex: number; + totalBalance: bigint; + }>( + ({ lastIndex, totalBalance }, balance, index) => { + return { lastIndex: balance > 0n ? index : lastIndex, totalBalance: totalBalance + balance }; + }, + { lastIndex: 0, totalBalance: 0n }, + ); + + let remainingTotalBalanceGwei = clValidatorsBalanceGwei; + for (let i = 0; i < validatorBalancesGweiByStakingModule.length; ++i) { + const balance = validatorBalancesGweiByStakingModule[i]; + if (balance === 0n) continue; + + const balanceNew = + i === lastNonZeroIndex ? remainingTotalBalanceGwei : (clValidatorsBalanceGwei * balance) / totalBalanceGwei; + + remainingTotalBalanceGwei -= balanceNew; + validatorBalancesGweiByStakingModule[i] = balanceNew; + } + + return { + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + }; +} /** * Prepare and push oracle report. @@ -66,8 +110,9 @@ const SHARE_RATE_PRECISION = 10n ** 27n; export const report = async ( ctx: ProtocolContext, { - clDiff = ether("0.01"), + clDiff, clAppearedValidators = 0n, + clPendingBalanceGwei = 0n, elRewardsVaultBalance = null, withdrawalVaultBalance = null, sharesRequestedToBurn = null, @@ -83,6 +128,8 @@ export const report = async ( extraDataList = new Uint8Array(), stakingModuleIdsWithNewlyExitedValidators = [], numExitedValidatorsByStakingModule = [], + stakingModuleIdsWithUpdatedBalance = [], + validatorBalancesGweiByStakingModule = [], reportElVault = true, reportWithdrawalsVault = true, reportBurner = true, @@ -90,7 +137,8 @@ export const report = async ( vaultsDataTreeCid = "", }: OracleReportParams = {}, ): Promise => { - const { hashConsensus, lido, elRewardsVault, withdrawalVault, burner, accountingOracle } = ctx.contracts; + const { hashConsensus, lido, elRewardsVault, withdrawalVault, burner, accountingOracle, oracleReportSanityChecker } = + ctx.contracts; if (waitNextReportTime) { await waitNextAvailableReportTime(ctx); @@ -98,14 +146,15 @@ export const report = async ( refSlot = refSlot ?? (await hashConsensus.getCurrentFrame()).refSlot; - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); - const postCLBalance = beaconBalance + clDiff; - const postBeaconValidators = beaconValidators + clAppearedValidators; - - log.debug("Beacon", { - "Beacon validators": postBeaconValidators, - "Beacon balance": formatEther(postCLBalance), - }); + const { + clValidatorsBalanceAtLastReport, + clPendingBalanceAtLastReport, + depositedForCurrentReport, + depositedSinceLastReport, + } = await lido.getBalanceStats(); + const deposited = waitNextReportTime ? depositedForCurrentReport : depositedSinceLastReport; + clDiff = clDiff ?? deposited; + const preCLBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; elRewardsVaultBalance = elRewardsVaultBalance ?? (await ethers.provider.getBalance(elRewardsVault.address)); withdrawalVaultBalance = withdrawalVaultBalance ?? (await ethers.provider.getBalance(withdrawalVault.address)); @@ -126,6 +175,30 @@ export const report = async ( withdrawalVaultBalance = reportWithdrawalsVault ? withdrawalVaultBalance : 0n; elRewardsVaultBalance = reportElVault ? elRewardsVaultBalance : 0n; + if (reportWithdrawalsVault) { + const lastVaultBalanceAfterTransfer = BigInt(await ethers.provider.getStorage(oracleReportSanityChecker, 4n)); + if (withdrawalVaultBalance < lastVaultBalanceAfterTransfer) { + throw new Error("Reported withdrawal vault balance is below last vault balance after transfer"); + } + // Sync _lastVaultBalanceAfterTransfer with the current vault balance so the pending check + // does not interpret test-funded vault balance as CL withdrawals (zero-sum rebalancing). + // The contract will update _lastVaultBalanceAfterTransfer = vaultBalance - transfer after the report. + if (withdrawalVaultBalance > lastVaultBalanceAfterTransfer) { + await ethers.provider.send("hardhat_setStorageAt", [ + await oracleReportSanityChecker.getAddress(), + ethers.toBeHex(4n, 32), + ethers.toBeHex(withdrawalVaultBalance, 32), + ]); + } + } + + const postCLBalance = preCLBalance + clDiff; + + log.debug("Beacon", { + "Beacon validators delta": clAppearedValidators, + "Beacon balance": formatEther(postCLBalance), + }); + if (sharesRequestedToBurn === null && reportBurner) { const [coverShares, nonCoverShares] = await burner.getSharesRequestedToBurn(); sharesRequestedToBurn = coverShares + nonCoverShares; @@ -141,8 +214,8 @@ export const report = async ( const simulatedReport = await simulateReport(ctx, { refSlot, - beaconValidators: postBeaconValidators, - clBalance: postCLBalance, + clValidatorsBalance: postCLBalance, + clPendingBalance: 0n, withdrawalVaultBalance, elRewardsVaultBalance, }); @@ -173,17 +246,25 @@ export const report = async ( } isBunkerMode = (await lido.getTotalPooledEther()) > postTotalPooledEther; - log.debug("Bunker Mode", { "Is Active": isBunkerMode }); } + if (stakingModuleIdsWithUpdatedBalance.length === 0) { + ({ stakingModuleIdsWithUpdatedBalance, validatorBalancesGweiByStakingModule } = adjustReportModuleBalances( + await buildModuleAccountingReportParams(ctx), + toGwei(postCLBalance), + )); + } + const reportData = { consensusVersion: await accountingOracle.getConsensusVersion(), refSlot, - numValidators: postBeaconValidators, - clBalanceGwei: postCLBalance / ONE_GWEI, + clValidatorsBalanceGwei: postCLBalance / ONE_GWEI - clPendingBalanceGwei, + clPendingBalanceGwei, stakingModuleIdsWithNewlyExitedValidators, numExitedValidatorsByStakingModule, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn: sharesRequestedToBurn ?? 0n, @@ -209,17 +290,59 @@ export const report = async ( }); }; +export const getDepositedSinceLastReport = async (ctx: ProtocolContext): Promise => { + const { depositedSinceLastReport } = await ctx.contracts.lido.getBalanceStats(); + return depositedSinceLastReport; +}; + +/** + * Submit report with an effective CL delta between reports. + * + * `report()` expects `clDiff` as raw `postCLBalance - preCLBalance`. + * Since `preCLBalance` is based on last report snapshot, deposits made after that + * snapshot must be added to preserve the intended effective delta. + */ +export const reportWithEffectiveClDiff = async ( + ctx: ProtocolContext, + effectiveClDiff: bigint, + params: Omit = {}, +): Promise => { + const depositedSinceLastReport = await getDepositedSinceLastReport(ctx); + return report(ctx, { ...params, clDiff: depositedSinceLastReport + effectiveClDiff }); +}; + +export const resetCLBalanceDecreaseWindow = async ( + ctx: ProtocolContext, + params: Omit = {}, +): Promise => { + // Move report timestamp beyond the 36-day window and submit an effective neutral report. + await advanceChainTime(CL_BALANCE_DECREASE_WINDOW_RESET_SECONDS); + return reportWithEffectiveClDiff(ctx, 0n, { + excludeVaultsBalances: true, + skipWithdrawals: true, + ...params, + }); +}; + export async function reportWithoutExtraData( ctx: ProtocolContext, numExitedValidatorsByStakingModule: bigint[], stakingModuleIdsWithNewlyExitedValidators: bigint[], extraData: ReturnType, + { + effectiveClDiff, + }: { + effectiveClDiff?: bigint; + } = {}, ) { const { accountingOracle } = ctx.contracts; const { extraDataItemsCount, extraDataChunks, extraDataChunkHashes } = extraData; + const clDiff = effectiveClDiff === undefined ? undefined : (await getDepositedSinceLastReport(ctx)) + effectiveClDiff; + const reportData: Partial = { + ...(clDiff === undefined ? {} : { clDiff }), excludeVaultsBalances: true, extraDataFormat: EXTRA_DATA_FORMAT_LIST, extraDataHash: extraDataChunkHashes[0], @@ -292,6 +415,22 @@ export const getReportTimeElapsed = async (ctx: ProtocolContext) => { }; }; +export const getNextReportContext = async ( + ctx: ProtocolContext, +): Promise<{ nextReportRefSlot: bigint; reportTimeElapsed: bigint }> => { + const { accountingOracle, hashConsensus } = ctx.contracts; + + const lastProcessingRefSlot = await accountingOracle.getLastProcessingRefSlot(); + const currentFrame = await hashConsensus.getCurrentFrame(); + const frameConfig = await hashConsensus.getFrameConfig(); + const chainConfig = await hashConsensus.getChainConfig(); + + const nextReportRefSlot = currentFrame.refSlot + frameConfig.epochsPerFrame * chainConfig.slotsPerEpoch; + const reportTimeElapsed = (nextReportRefSlot - lastProcessingRefSlot) * chainConfig.secondsPerSlot; + + return { nextReportRefSlot, reportTimeElapsed }; +}; + /** * Wait for the next available report time. * Returns the report timestamp and the ref slot of the next frame. @@ -330,8 +469,8 @@ export const waitNextAvailableReportTime = async ( type SimulateReportParams = { refSlot: bigint; - beaconValidators: bigint; - clBalance: bigint; + clValidatorsBalance: bigint; + clPendingBalance: bigint; withdrawalVaultBalance: bigint; elRewardsVaultBalance: bigint; }; @@ -348,7 +487,13 @@ type SimulateReportResult = { */ export const simulateReport = async ( ctx: ProtocolContext, - { refSlot, beaconValidators, clBalance, withdrawalVaultBalance, elRewardsVaultBalance }: SimulateReportParams, + { + refSlot, + clValidatorsBalance, + clPendingBalance, + withdrawalVaultBalance, + elRewardsVaultBalance, + }: SimulateReportParams, ): Promise => { const { hashConsensus, accounting } = ctx.contracts; @@ -357,17 +502,18 @@ export const simulateReport = async ( log.debug("Simulating oracle report", { "Ref Slot": refSlot, - "Beacon Validators": beaconValidators, - "CL Balance": formatEther(clBalance), + "CL Validators Balance": formatEther(clValidatorsBalance), + "CL Pending Balance": formatEther(clPendingBalance), "Withdrawal Vault Balance": formatEther(withdrawalVaultBalance), "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), }); const reportValues: ReportValuesStruct = { timestamp: reportTimestamp, + // timeElapsed: (await getReportTimeElapsed(ctx)).timeElapsed, timeElapsed: /* 1 day */ 86_400n, - clValidators: beaconValidators, - clBalance, + clValidatorsBalance, + clPendingBalance, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn: 0n, @@ -393,7 +539,6 @@ export const simulateReport = async ( }; type HandleOracleReportParams = { - beaconValidators: bigint; clBalance: bigint; sharesRequestedToBurn: bigint; withdrawalVaultBalance: bigint; @@ -405,7 +550,6 @@ type HandleOracleReportParams = { export const handleOracleReport = async ( ctx: ProtocolContext, { - beaconValidators, clBalance, sharesRequestedToBurn, withdrawalVaultBalance, @@ -425,7 +569,6 @@ export const handleOracleReport = async ( try { log.debug("Handle oracle report", { "Ref Slot": refSlot, - "Beacon Validators": beaconValidators, "CL Balance": formatEther(clBalance), "Withdrawal Vault Balance": formatEther(withdrawalVaultBalance), "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), @@ -435,8 +578,8 @@ export const handleOracleReport = async ( await accounting.connect(accountingOracleAccount).handleOracleReport({ timestamp: reportTimestamp, timeElapsed, // 1 day - clValidators: beaconValidators, - clBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, @@ -472,8 +615,7 @@ const getFinalizationBatches = async ( const bufferedEther = await lido.getBufferedEther(); const unfinalizedSteth = await withdrawalQueue.unfinalizedStETH(); - - const reservedBuffer = BigIntMath.min(bufferedEther, unfinalizedSteth); + const reservedBuffer = await lido.getWithdrawalsReserve(); const availableEth = limitedWithdrawalVaultBalance + limitedElRewardsVaultBalance + reservedBuffer; const blockTimestamp = await getCurrentBlockTimestamp(); @@ -545,12 +687,14 @@ const getFinalizationBatches = async ( export type OracleReportSubmitParams = { refSlot: bigint; clBalance: bigint; - numValidators: bigint; + clPendingBalanceGwei?: bigint; withdrawalVaultBalance: bigint; elRewardsVaultBalance: bigint; sharesRequestedToBurn: bigint; stakingModuleIdsWithNewlyExitedValidators?: bigint[]; numExitedValidatorsByStakingModule?: bigint[]; + stakingModuleIdsWithUpdatedBalance?: bigint[]; + validatorBalancesGweiByStakingModule?: bigint[]; withdrawalFinalizationBatches?: bigint[]; simulatedShareRate?: bigint; isBunkerMode?: boolean; @@ -568,6 +712,43 @@ type OracleReportSubmitResult = { extraDataTx: ContractTransactionResponse; }; +export const submitReportDataWithConsensus = async ( + ctx: ProtocolContext, + data: AccountingOracle.ReportDataStruct, +): Promise => { + const { accountingOracle } = ctx.contracts; + + const reportHash = calcReportDataHash(getReportDataItems(data)); + const submitter = await reachConsensus(ctx, { + refSlot: BigInt(data.refSlot), + reportHash, + consensusVersion: BigInt(data.consensusVersion), + }); + const oracleVersion = await accountingOracle.getContractVersion(); + + return accountingOracle.connect(submitter).submitReportData(data, oracleVersion); +}; + +export const submitReportDataWithConsensusAndEmptyExtraData = async ( + ctx: ProtocolContext, + data: AccountingOracle.ReportDataStruct, +): Promise<{ reportTx: ContractTransactionResponse; extraDataTx: ContractTransactionResponse }> => { + const { accountingOracle } = ctx.contracts; + + const reportHash = calcReportDataHash(getReportDataItems(data)); + const submitter = await reachConsensus(ctx, { + refSlot: BigInt(data.refSlot), + reportHash, + consensusVersion: BigInt(data.consensusVersion), + }); + const oracleVersion = await accountingOracle.getContractVersion(); + + const reportTx = await accountingOracle.connect(submitter).submitReportData(data, oracleVersion); + const extraDataTx = await accountingOracle.connect(submitter).submitReportExtraDataEmpty(); + + return { reportTx, extraDataTx }; +}; + /** * Main function to push oracle report to the protocol. */ @@ -576,12 +757,14 @@ const submitReport = async ( { refSlot, clBalance, - numValidators, + clPendingBalanceGwei = 0n, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, stakingModuleIdsWithNewlyExitedValidators = [], numExitedValidatorsByStakingModule = [], + stakingModuleIdsWithUpdatedBalance = [], + validatorBalancesGweiByStakingModule = [], withdrawalFinalizationBatches = [], simulatedShareRate = 0n, isBunkerMode = false, @@ -598,12 +781,15 @@ const submitReport = async ( log.debug("Pushing oracle report", { "Ref slot": refSlot, "CL balance": formatEther(clBalance), - "Validators": numValidators, + // TODO: Add proper validator count logging "Withdrawal vault": formatEther(withdrawalVaultBalance), "El rewards vault": formatEther(elRewardsVaultBalance), "Shares requested to burn": sharesRequestedToBurn, "Staking module ids with newly exited validators": stakingModuleIdsWithNewlyExitedValidators, "Num exited validators by staking module": numExitedValidatorsByStakingModule, + "Staking module ids with updated active balance": stakingModuleIdsWithUpdatedBalance, + "Validator balances by staking module": validatorBalancesGweiByStakingModule, + "CL pending balance (gwei)": clPendingBalanceGwei, "Withdrawal finalization batches": withdrawalFinalizationBatches, "Is bunker mode": isBunkerMode, "Vaults data tree root": vaultsDataTreeRoot, @@ -616,17 +802,23 @@ const submitReport = async ( const consensusVersion = await accountingOracle.getConsensusVersion(); const oracleVersion = await accountingOracle.getContractVersion(); + const clBalanceGwei = clBalance / ONE_GWEI; + if (clPendingBalanceGwei > clBalanceGwei) { + throw new Error("Reported pending CL balance exceeds total CL balance"); + } const data = { consensusVersion, refSlot, - clBalanceGwei: clBalance / ONE_GWEI, - numValidators, + clValidatorsBalanceGwei: clBalanceGwei - clPendingBalanceGwei, + clPendingBalanceGwei, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, stakingModuleIdsWithNewlyExitedValidators, numExitedValidatorsByStakingModule, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, withdrawalFinalizationBatches, simulatedShareRate, isBunkerMode, @@ -746,10 +938,12 @@ const reachConsensus = async ( export const getReportDataItems = (data: AccountingOracle.ReportDataStruct) => [ data.consensusVersion, data.refSlot, - data.numValidators, - data.clBalanceGwei, + data.clValidatorsBalanceGwei, + data.clPendingBalanceGwei, data.stakingModuleIdsWithNewlyExitedValidators, data.numExitedValidatorsByStakingModule, + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, @@ -770,10 +964,13 @@ export const calcReportDataHash = (items: ReturnType) const types = [ "uint256", // consensusVersion "uint256", // refSlot - "uint256", // numValidators - "uint256", // clBalanceGwei + // TODO: Update types to match new balance-based structure + "uint256", // clValidatorsBalanceGwei + "uint256", // clPendingBalanceGwei "uint256[]", // stakingModuleIdsWithNewlyExitedValidators "uint256[]", // numExitedValidatorsByStakingModule + "uint256[]", // stakingModuleIdsWithUpdatedBalance + "uint256[]", // validatorBalancesGweiByStakingModule "uint256", // withdrawalVaultBalance "uint256", // elRewardsVaultBalance "uint256", // sharesRequestedToBurn @@ -825,7 +1022,9 @@ export const ensureOracleCommitteeMembers = async (ctx: ProtocolContext, minMemb log(`Adding oracle committee member ${count}`); const address = getOracleCommitteeMemberAddress(count); - await hashConsensus.connect(agentSigner).addMember(address, quorum); + if (!(await hashConsensus.getIsMember(address))) { + await hashConsensus.connect(agentSigner).addMember(address, quorum); + } addresses.push(address); diff --git a/lib/protocol/helpers/index.ts b/lib/protocol/helpers/index.ts index ba41a60322..b3af3e5814 100644 --- a/lib/protocol/helpers/index.ts +++ b/lib/protocol/helpers/index.ts @@ -1,25 +1,43 @@ -export { depositAndReportValidators, ensureStakeLimit, unpauseStaking } from "./staking"; +export { + depositAndReportValidators, + depositValidatorsWithoutReport, + ensureStakeLimit, + seedProtocolPendingBaseline, + getStakingModuleBalances, + unpauseStaking, +} from "./staking"; export { finalizeWQViaElVault, finalizeWQViaSubmit, unpauseWithdrawalQueue } from "./withdrawal"; -export { setMaxPositiveTokenRebase } from "./sanity-checker"; +export { setMaxPositiveTokenRebase, updateOracleReportLimits } from "./sanity-checker"; export { calcReportDataHash, ensureHashConsensusInitialEpoch, ensureOracleCommitteeMembers, getReportDataItems, + getNextReportContext, getReportTimeElapsed, waitNextAvailableReportTime, handleOracleReport, OracleReportParams, OracleReportSubmitParams, report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, + submitReportDataWithConsensus, + submitReportDataWithConsensusAndEmptyExtraData, + getDepositedSinceLastReport, } from "./accounting"; export { ensureDsmGuardians } from "./dsm"; +export { + norSdvtEnsureOperators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, +} from "./nor-sdvt"; export { ensurePredepositGuaranteeUnpaused } from "./pdg"; -export { norSdvtEnsureOperators } from "./nor-sdvt"; export { calcNodeOperatorRewards } from "./staking-module"; export * from "./vaults"; diff --git a/lib/protocol/helpers/sanity-checker.ts b/lib/protocol/helpers/sanity-checker.ts index 7a0a8c54eb..daa72c9f9e 100644 --- a/lib/protocol/helpers/sanity-checker.ts +++ b/lib/protocol/helpers/sanity-checker.ts @@ -12,3 +12,46 @@ export const setMaxPositiveTokenRebase = async (ctx: ProtocolContext, maxPositiv await sanityChecker.connect(agent).revokeRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE, agent.address); return initialMaxPositiveTokenRebase; }; + +export const updateOracleReportLimits = async ( + ctx: ProtocolContext, + patch: Partial< + Awaited> + >, +) => { + const { oracleReportSanityChecker: sanityChecker } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + const currentLimits = await sanityChecker.getOracleReportLimits(); + const secondOpinionOracle = await sanityChecker.secondOpinionOracle(); + const role = await sanityChecker.ALL_LIMITS_MANAGER_ROLE(); + const nextLimits = { + exitedEthAmountPerDayLimit: patch.exitedEthAmountPerDayLimit ?? currentLimits.exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit: patch.appearedEthAmountPerDayLimit ?? currentLimits.appearedEthAmountPerDayLimit, + annualBalanceIncreaseBPLimit: patch.annualBalanceIncreaseBPLimit ?? currentLimits.annualBalanceIncreaseBPLimit, + simulatedShareRateDeviationBPLimit: + patch.simulatedShareRateDeviationBPLimit ?? currentLimits.simulatedShareRateDeviationBPLimit, + maxBalanceExitRequestedPerReportInEth: + patch.maxBalanceExitRequestedPerReportInEth ?? currentLimits.maxBalanceExitRequestedPerReportInEth, + maxEffectiveBalanceWeightWCType01: + patch.maxEffectiveBalanceWeightWCType01 ?? currentLimits.maxEffectiveBalanceWeightWCType01, + maxEffectiveBalanceWeightWCType02: + patch.maxEffectiveBalanceWeightWCType02 ?? currentLimits.maxEffectiveBalanceWeightWCType02, + maxItemsPerExtraDataTransaction: + patch.maxItemsPerExtraDataTransaction ?? currentLimits.maxItemsPerExtraDataTransaction, + maxNodeOperatorsPerExtraDataItem: + patch.maxNodeOperatorsPerExtraDataItem ?? currentLimits.maxNodeOperatorsPerExtraDataItem, + requestTimestampMargin: patch.requestTimestampMargin ?? currentLimits.requestTimestampMargin, + maxPositiveTokenRebase: patch.maxPositiveTokenRebase ?? currentLimits.maxPositiveTokenRebase, + maxCLBalanceDecreaseBP: patch.maxCLBalanceDecreaseBP ?? currentLimits.maxCLBalanceDecreaseBP, + clBalanceOraclesErrorUpperBPLimit: + patch.clBalanceOraclesErrorUpperBPLimit ?? currentLimits.clBalanceOraclesErrorUpperBPLimit, + consolidationEthAmountPerDayLimit: + patch.consolidationEthAmountPerDayLimit ?? currentLimits.consolidationEthAmountPerDayLimit, + exitedValidatorEthAmountLimit: patch.exitedValidatorEthAmountLimit ?? currentLimits.exitedValidatorEthAmountLimit, + externalPendingBalanceCapEth: patch.externalPendingBalanceCapEth ?? currentLimits.externalPendingBalanceCapEth, + }; + + await sanityChecker.connect(agent).grantRole(role, agent.address); + await sanityChecker.connect(agent).setOracleReportLimits(nextLimits, secondOpinionOracle); + await sanityChecker.connect(agent).revokeRole(role, agent.address); +}; diff --git a/lib/protocol/helpers/share-rate.ts b/lib/protocol/helpers/share-rate.ts index 88a56495f2..b13832b187 100644 --- a/lib/protocol/helpers/share-rate.ts +++ b/lib/protocol/helpers/share-rate.ts @@ -28,16 +28,14 @@ async function changeInternalEther(ctx: ProtocolContext, internalEtherDelta: big const accountingSigner = await impersonate(accounting, ether("1")); - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); - - await lido - .connect(accountingSigner) - .processClStateUpdate( - await getCurrentBlockTimestamp(), - beaconValidators, - beaconValidators, - beaconBalance + internalEtherDelta, - ); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const beaconBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; + + await lido.connect(accountingSigner).processClStateUpdate( + await getCurrentBlockTimestamp(), + beaconBalance + internalEtherDelta, // new clValidatorsBalance + 0n, // new clPendingBalance + ); } export const ensureExactShareRate = async (ctx: ProtocolContext, targetShareRate: bigint) => { diff --git a/lib/protocol/helpers/staking.ts b/lib/protocol/helpers/staking.ts index 0354ef00ce..7e0ac015d4 100644 --- a/lib/protocol/helpers/staking.ts +++ b/lib/protocol/helpers/staking.ts @@ -1,16 +1,34 @@ import { ethers, ZeroAddress } from "ethers"; -import { BigIntMath, certainAddress, ether, impersonate, log } from "lib"; -import { TOTAL_BASIS_POINTS } from "lib/constants"; - -import { ZERO_HASH } from "test/deploy"; +import { + BigIntMath, + certainAddress, + ether, + impersonate, + log, + ONE_GWEI, + StakingModuleStatus, + toGwei, + TOTAL_BASIS_POINTS, +} from "lib"; + +import { ZERO_HASH } from "test/suite"; import { ProtocolContext } from "../types"; -import { report } from "./accounting"; +import { adjustReportModuleBalances, report, submitReportDataWithConsensusAndEmptyExtraData } from "./accounting"; const DEPOSIT_SIZE = ether("32"); +export type StakingModuleBalances = { + validatorsBalanceGwei: bigint; +}; + +export type ModuleAccountingReportParams = { + stakingModuleIdsWithUpdatedBalance: bigint[]; + validatorBalancesGweiByStakingModule: bigint[]; +}; + export const unpauseStaking = async (ctx: ProtocolContext) => { const { lido } = ctx.contracts; if (await lido.isStakingPaused()) { @@ -21,12 +39,6 @@ export const unpauseStaking = async (ctx: ProtocolContext) => { } }; -export enum StakingModuleStatus { - Active = 0, - DepositsPaused = 1, - Stopped = 2, -} - export const getStakingModuleStatuses = async ( ctx: ProtocolContext, ): Promise<{ [moduleId: number]: StakingModuleStatus }> => { @@ -54,6 +66,41 @@ export const getStakingModuleManagerSigner = async (ctx: ProtocolContext) => { return await impersonate(await stakingRouter.getRoleMember(role, 0n), ether("100000")); }; +export const getStakingModuleBalances = async ( + ctx: ProtocolContext, + moduleId: bigint, +): Promise => { + const [validatorsBalanceGwei] = await ctx.contracts.stakingRouter.getStakingModuleStateAccounting(moduleId); + return { validatorsBalanceGwei }; +}; + +export const buildModuleAccountingReportParams = async ( + ctx: ProtocolContext, + { + validatorsDeltaGweiByModule = new Map(), + }: { + validatorsDeltaGweiByModule?: Map; + } = {}, +): Promise => { + const { stakingRouter } = ctx.contracts; + + const stakingModuleIds = await stakingRouter.getStakingModuleIds(); + // Router balance reporting now requires all registered modules in router order. + const stakingModuleIdsWithUpdatedBalance = [...stakingModuleIds]; + const validatorBalancesGweiByStakingModule: bigint[] = []; + + for (const moduleId of stakingModuleIds) { + const [currentValidatorsBalanceGwei] = await stakingRouter.getStakingModuleStateAccounting(moduleId); + const validatorsBalanceGwei = currentValidatorsBalanceGwei + (validatorsDeltaGweiByModule.get(moduleId) ?? 0n); + validatorBalancesGweiByStakingModule.push(validatorsBalanceGwei); + } + + return { + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + }; +}; + export const setModuleStakeShareLimit = async (ctx: ProtocolContext, moduleId: bigint, stakeShareLimit: bigint) => { const { stakingRouter } = ctx.contracts; @@ -120,13 +167,145 @@ export const setStakingLimit = async ( await acl.connect(agentSigner).revokePermission(agentAddress, lido.address, role); }; +const depositValidatorsViaRouter = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { + const { depositSecurityModule, stakingRouter } = ctx.contracts; + + const managerSigner = await getStakingModuleManagerSigner(ctx); + if (!managerSigner) { + throw new Error("staking module manager signer is required for deposit setup"); + } + + const moduleConfig = await stakingRouter.getStakingModule(moduleId); + const shouldRestoreMaxDepositsPerBlock = moduleConfig.maxDepositsPerBlock > depositsCount; + + if (shouldRestoreMaxDepositsPerBlock) { + await stakingRouter + .connect(managerSigner) + .updateStakingModule( + moduleId, + moduleConfig.stakeShareLimit, + moduleConfig.priorityExitShareThreshold, + moduleConfig.stakingModuleFee, + moduleConfig.treasuryFee, + depositsCount, + moduleConfig.minDepositBlockDistance, + ); + } + + try { + const dsmSigner = await impersonate(await depositSecurityModule.getAddress(), ether("1")); + await stakingRouter.connect(dsmSigner).deposit(moduleId, ZERO_HASH); + } finally { + if (shouldRestoreMaxDepositsPerBlock) { + await stakingRouter + .connect(managerSigner) + .updateStakingModule( + moduleId, + moduleConfig.stakeShareLimit, + moduleConfig.priorityExitShareThreshold, + moduleConfig.stakingModuleFee, + moduleConfig.treasuryFee, + moduleConfig.maxDepositsPerBlock, + moduleConfig.minDepositBlockDistance, + ); + } + } +}; + +export const depositValidatorsWithoutReport = async ( + ctx: ProtocolContext, + depositsCount: bigint, +): Promise> => { + const { lido, withdrawalQueue } = ctx.contracts; + + const ethToDeposit = depositsCount * DEPOSIT_SIZE; + let depositableEther = await lido.getDepositableEther(); + let submitValue = ethToDeposit; + + if (depositableEther < ethToDeposit) { + const bufferedEther = await lido.getBufferedEther(); + const unfinalizedStETH = await withdrawalQueue.unfinalizedStETH(); + submitValue += unfinalizedStETH - bufferedEther; + } else { + submitValue -= ether("0.001"); // ensure consume buffer + } + const ethHolder = await impersonate(certainAddress("provision:eth:whale"), submitValue + ether("1")); + await lido.connect(ethHolder).submit(ZeroAddress, { value: submitValue }); + + depositableEther = await lido.getDepositableEther(); + if (depositableEther < ethToDeposit) { + throw new Error(`Not enough depositable ether`); + } + + const depositedBefore = (await lido.getBalanceStats()).depositedSinceLastReport; + + const { totalAllocated, allocated } = await ctx.contracts.stakingRouter.getDepositAllocations(ethToDeposit, false); + + if (totalAllocated < ethToDeposit) { + throw new Error(`Not enough allocation capacity in staking modules`); + } + + const moduleIds = await ctx.contracts.stakingRouter.getStakingModuleIds(); + const validatorsDeltaGweiByModule = new Map(); + + for (let i = 0; i < moduleIds.length; i++) { + if (allocated[i] === 0n) { + continue; + } + const moduleDepositsCount = allocated[i] / DEPOSIT_SIZE; + if (moduleDepositsCount === 0n) { + throw new Error(`Wrong deposits allocated to Module ${moduleIds[i]}`); + } + await depositValidatorsViaRouter(ctx, moduleIds[i], moduleDepositsCount); + + validatorsDeltaGweiByModule.set(moduleIds[i], allocated[i] / ONE_GWEI); + } + + const { depositedSinceLastReport } = await lido.getBalanceStats(); + + if (depositedSinceLastReport - depositedBefore !== ethToDeposit) { + throw new Error(`Deposited ${depositedSinceLastReport - depositedBefore} wei, expected ${ethToDeposit}`); + } + + return validatorsDeltaGweiByModule; +}; + +export const seedProtocolPendingBaseline = async ( + ctx: ProtocolContext, + moduleId: bigint, + depositsCount: bigint = 1n, +) => { + await depositValidatorsWithoutReport(ctx, depositsCount); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport, depositedSinceLastReport } = + await ctx.contracts.lido.getBalanceStats(); + + const { data } = await report(ctx, { + clDiff: depositedSinceLastReport, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + waitNextReportTime: true, + // adjust modules balances in case of unaccounted cl balance in tests + ...adjustReportModuleBalances( + await buildModuleAccountingReportParams(ctx), + toGwei(clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport), + ), + }); + + const pendingBaselineGwei = toGwei(depositedSinceLastReport); + return submitReportDataWithConsensusAndEmptyExtraData(ctx, { + ...data, + clValidatorsBalanceGwei: BigInt(data.clValidatorsBalanceGwei) - pendingBaselineGwei, + clPendingBalanceGwei: pendingBaselineGwei, + }); +}; + export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { - const { lido, depositSecurityModule, withdrawalQueue, stakingRouter } = ctx.contracts; + const { lido, withdrawalQueue, stakingRouter } = ctx.contracts; const ethToDeposit = depositsCount * DEPOSIT_SIZE; const submitValue = (await withdrawalQueue.unfinalizedStETH()) + ethToDeposit; const ethHolder = await impersonate(certainAddress("provision:eth:whale"), submitValue + ether("1")); - const dsmSigner = await impersonate(depositSecurityModule.address, ether("100000")); const managerSigner = await getStakingModuleManagerSigner(ctx); await lido.connect(ethHolder).submit(ZeroAddress, { value: submitValue }); @@ -137,7 +316,7 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: } const isMaxDepositsCountNotEnough = async () => { - const maxDepositsCount = await stakingRouter.getStakingModuleMaxDepositsCount(moduleId, depositableEther); + const maxDepositsCount = await stakingRouter.getStakingModuleMaxDepositsCount(moduleId, ethToDeposit); return maxDepositsCount < depositsCount; }; @@ -159,12 +338,17 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: throw new Error(`Not enough max deposits count for staking module ${moduleId}`); } - const numDepositedBefore = (await lido.getBeaconStat()).depositedValidators; + const getTotalDepositedValidators = async () => { + const moduleDigests = await stakingRouter.getAllStakingModuleDigests(); + return moduleDigests.reduce((sum, digest) => sum + digest.summary.totalDepositedValidators, 0n); + }; + + const numDepositedBefore = await getTotalDepositedValidators(); - // Deposit validators - await lido.connect(dsmSigner).deposit(depositsCount, moduleId, ZERO_HASH); + // Deposit validators via StakingRouter (DSM calls SR which pulls ETH from Lido) + await depositValidatorsViaRouter(ctx, moduleId, depositsCount); - const numDepositedAfter = (await lido.getBeaconStat()).depositedValidators; + const numDepositedAfter = await getTotalDepositedValidators(); if (numDepositedAfter !== numDepositedBefore + depositsCount) { throw new Error(`Deposited ${numDepositedAfter} validators, expected ${numDepositedBefore + depositsCount}`); @@ -177,28 +361,35 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: await stakingRouter.connect(managerSigner).setStakingModuleStatus(mId, originalStatus); } - const before = await lido.getBeaconStat(); + const before = await lido.getBalanceStats(); log.debug("Validators on beacon chain before provisioning", { "Module ID to deposit": moduleId, - "Deposited": before.depositedValidators, - "Total": before.beaconValidators, - "Balance": before.beaconBalance, + "Deposited": before.depositedSinceLastReport, + "Active": before.clValidatorsBalanceAtLastReport, + "Pending": before.clPendingBalanceAtLastReport, }); // Add new validators to beacon chain + const validatorsDeltaGweiByModule = new Map([[moduleId, toGwei(ethToDeposit)]]); + const postCLBalanceWei = before.clValidatorsBalanceAtLastReport + before.clPendingBalanceAtLastReport + ethToDeposit; + await report(ctx, { clDiff: ethToDeposit, clAppearedValidators: depositsCount, skipWithdrawals: true, + ...adjustReportModuleBalances( + await buildModuleAccountingReportParams(ctx, { validatorsDeltaGweiByModule }), + toGwei(postCLBalanceWei), + ), }); - const after = await lido.getBeaconStat(); + const after = await lido.getBalanceStats(); log.debug("Validators on beacon chain after depositing", { "Module ID deposited": moduleId, - "Deposited": after.depositedValidators, - "Total": after.beaconValidators, - "Balance": after.beaconBalance, + "Deposited": after.depositedSinceLastReport, + "Active": after.clValidatorsBalanceAtLastReport, + "Pending": after.clPendingBalanceAtLastReport, }); }; diff --git a/lib/protocol/networks.ts b/lib/protocol/networks.ts index f67bf96721..6606280f93 100644 --- a/lib/protocol/networks.ts +++ b/lib/protocol/networks.ts @@ -75,12 +75,16 @@ const defaultEnv = { nor: "NODE_OPERATORS_REGISTRY_ADDRESS", sdvt: "SIMPLE_DVT_REGISTRY_ADDRESS", csm: "CSM_REGISTRY_ADDRESS", + cmv2: "CURATED_MODULE_V2_ADDRESS", // hash consensus hashConsensus: "HASH_CONSENSUS_ADDRESS", // vaults stakingVaultFactory: "STAKING_VAULT_FACTORY_ADDRESS", stakingVaultBeacon: "STAKING_VAULT_BEACON_ADDRESS", validatorConsolidationRequests: "VALIDATOR_CONSOLIDATION_REQUESTS_ADDRESS", + // consolidation + consolidationBus: "CONSOLIDATION_BUS_ADDRESS", + consolidationMigrator: "CONSOLIDATION_MIGRATOR_ADDRESS", } as ProtocolNetworkItems; const getPrefixedEnv = (prefix: string, obj: ProtocolNetworkItems) => @@ -101,6 +105,8 @@ async function getLocalNetworkConfig(network: string, source: "fork" | "scratch" stakingVaultBeacon: config[Sk.stakingVaultBeacon].address, operatorGrid: config[Sk.operatorGrid].proxy.address, validatorConsolidationRequests: config[Sk.validatorConsolidationRequests].address, + consolidationBus: config[Sk.consolidationBus].proxy.address, + consolidationMigrator: config[Sk.consolidationMigrator].proxy.address, }; return new ProtocolNetworkConfig(getPrefixedEnv(network.toUpperCase(), defaultEnv), defaults, `${network}-${source}`); } @@ -135,6 +141,8 @@ async function getForkingNetworkConfig(): Promise { stakingVaultBeacon: state[Sk.stakingVaultBeacon]?.address, operatorGrid: state[Sk.operatorGrid]?.proxy.address, validatorConsolidationRequests: state[Sk.validatorConsolidationRequests]?.address, + consolidationBus: state[Sk.consolidationBus]?.proxy.address, + consolidationMigrator: state[Sk.consolidationMigrator]?.proxy.address, }; const chainId = state[Sk.chainId]; @@ -146,6 +154,7 @@ async function getForkingNetworkConfig(): Promise { export async function getNetworkConfig(network: string): Promise { switch (network) { case "hardhat": + case "localhost": if (getMode() === "scratch") { return getLocalNetworkConfig(network, "scratch"); } diff --git a/lib/protocol/provision.ts b/lib/protocol/provision.ts index 45f5f88527..36902cdc7f 100644 --- a/lib/protocol/provision.ts +++ b/lib/protocol/provision.ts @@ -1,3 +1,5 @@ +import { ZeroAddress } from "ethers"; + import { certainAddress, ether, impersonate, log } from "lib"; import { ensureEIP4788BeaconBlockRootContractPresent, @@ -20,6 +22,30 @@ import { ProtocolContext } from "./types"; let alreadyProvisioned = false; +const ensureNonZeroDepositsReserveTarget = async (ctx: ProtocolContext, target: bigint = ether("8")) => { + const { acl, lido } = ctx.contracts; + if ((await lido.getDepositsReserveTarget()) > 0n) return; + + const role = await lido.BUFFER_RESERVE_MANAGER_ROLE(); + const agent = await ctx.getSigner("agent"); + const hasRole = await acl["hasPermission(address,address,bytes32)"](agent.address, lido.address, role); + if (!hasRole) { + const permissionManager = await acl.getPermissionManager(lido.address, role); + if (permissionManager === ZeroAddress) { + const voting = await ctx.getSigner("voting"); + await acl.connect(voting).createPermission(agent.address, lido.address, role, agent.address); + } else { + if (permissionManager.toLowerCase() !== agent.address.toLowerCase()) { + throw new Error(`BUFFER_RESERVE_MANAGER_ROLE manager must be agent, got: ${permissionManager}`); + } + await acl.connect(agent).grantPermission(agent.address, lido.address, role); + } + } + + await lido.connect(agent).setDepositsReserveTarget(target); + log.debug("Set non-zero deposits reserve target", { target: target.toString() }); +}; + /** * In order to make the protocol fully operational from scratch deploy, the additional steps are required: */ @@ -56,6 +82,7 @@ export const provision = async (ctx: ProtocolContext) => { // await ethHolder.sendTransaction({ to: ctx.contracts.lido.address, value: ether("100000") }); await ensureStakeLimit(ctx); + await ensureNonZeroDepositsReserveTarget(ctx); await ensureDsmGuardians(ctx, 3n, 2n); diff --git a/lib/protocol/types.ts b/lib/protocol/types.ts index c4de561d2e..a5e5baa2a4 100644 --- a/lib/protocol/types.ts +++ b/lib/protocol/types.ts @@ -7,9 +7,11 @@ import { AccountingOracle, ACL, Burner, + ConsolidationBus, + ConsolidationGateway, + ConsolidationMigrator, DepositSecurityModule, HashConsensus, - ICSModule, IStakingModule, Kernel, LazyOracle, @@ -33,6 +35,7 @@ import { WithdrawalVault, WstETH, } from "typechain-types"; +import { StakingModuleStructOutput } from "typechain-types/contracts/0.8.25/sr/StakingRouter"; export type LogDescriptionExtended = LogDescription & { address?: string; @@ -56,6 +59,9 @@ export type ProtocolNetworkItems = { validatorExitDelayVerifier: string; validatorsExitBusOracle: string; triggerableWithdrawalsGateway: string; + consolidationGateway: string; + consolidationBus: string; + consolidationMigrator: string; withdrawalQueue: string; withdrawalVault: string; oracleDaemonConfig: string; @@ -67,6 +73,7 @@ export type ProtocolNetworkItems = { nor: string; sdvt: string; csm: string; + cmv2: string; // hash consensus hashConsensus: string; // vaults @@ -99,9 +106,11 @@ export interface ContractTypes { HashConsensus: HashConsensus; PredepositGuarantee: PredepositGuarantee; NodeOperatorsRegistry: NodeOperatorsRegistry; - ICSModule: ICSModule; WstETH: WstETH; TriggerableWithdrawalsGateway: TriggerableWithdrawalsGateway; + ConsolidationGateway: ConsolidationGateway; + ConsolidationBus: ConsolidationBus; + ConsolidationMigrator: ConsolidationMigrator; VaultFactory: VaultFactory; UpgradeableBeacon: UpgradeableBeacon; VaultHub: VaultHub; @@ -136,6 +145,9 @@ export type CoreContracts = { oracleDaemonConfig: LoadedContract; wstETH: LoadedContract; triggerableWithdrawalsGateway: LoadedContract; + consolidationGateway: LoadedContract; + consolidationBus: LoadedContract; + consolidationMigrator: LoadedContract; }; export type AragonContracts = { @@ -147,6 +159,14 @@ export type StakingModuleContracts = { nor: LoadedContract; sdvt: LoadedContract; csm?: LoadedContract; + cmv2?: LoadedContract; +}; + +export type StakingModules = { + nor: StakingModuleStructOutput; + sdvt: StakingModuleStructOutput; + csm?: StakingModuleStructOutput; + cmv2?: StakingModuleStructOutput; }; export type StakingModuleName = "nor" | "sdvt" | "csm"; @@ -186,10 +206,12 @@ export type Signer = keyof ProtocolSigners; export type ProtocolContextFlags = { withCSM: boolean; + withCMv2: boolean; }; export type ProtocolContext = { contracts: ProtocolContracts; + modules: StakingModules; signers: ProtocolSigners; interfaces: Array; flags: ProtocolContextFlags; diff --git a/lib/scratch.ts b/lib/scratch.ts index c54c095985..e778cf4a73 100644 --- a/lib/scratch.ts +++ b/lib/scratch.ts @@ -4,6 +4,7 @@ import path from "node:path"; import { ethers } from "hardhat"; import { log } from "./log"; +import { toBool } from "./string"; class StepsFileNotFoundError extends Error { constructor(filePath: string) { @@ -26,6 +27,13 @@ class MigrationMainFunctionError extends Error { } } +class MigrationSkipFunctionError extends Error { + constructor(filePath: string) { + super(`Migration file ${filePath} exports 'skip' but it is not a function!`); + this.name = "MigrationSkipFunctionError"; + } +} + const deployedSteps: string[] = []; async function applySteps(steps: string[]) { @@ -44,11 +52,6 @@ async function applySteps(steps: string[]) { } export async function deployUpgrade(networkName: string, stepsFile: string): Promise { - // Hardhat network is a fork of mainnet so we need to use the mainnet-fork steps - if (networkName === "hardhat") { - networkName = "mainnet-fork"; - } - try { const steps = loadSteps(stepsFile); await applySteps(steps); @@ -97,13 +100,23 @@ export const resolveMigrationFile = (step: string): string => { */ export async function applyMigrationScript(migrationFile: string): Promise { const fullPath = path.resolve(migrationFile); - const { main } = await import(fullPath); + const { main, skip } = await import(fullPath); + const allowSkipSteps = toBool(process.env.ALLOW_SKIP_STEPS); if (typeof main !== "function") { throw new MigrationMainFunctionError(migrationFile); } + if (skip !== undefined && typeof skip !== "function") { + throw new MigrationSkipFunctionError(migrationFile); + } + try { + if (allowSkipSteps && skip && (await skip())) { + log.scriptSkip(migrationFile); + return; + } + log.scriptStart(migrationFile); await main(); log.scriptFinish(migrationFile); diff --git a/lib/state-file.ts b/lib/state-file.ts index 04a887f36f..a609276a58 100644 --- a/lib/state-file.ts +++ b/lib/state-file.ts @@ -1,7 +1,7 @@ import { readFileSync, writeFileSync } from "node:fs"; import { resolve } from "node:path"; -import { network as hardhatNetwork } from "hardhat"; +import { ethers, network as hardhatNetwork } from "hardhat"; import { readScratchParameters, scratchParametersToDeploymentState } from "scripts/utils/scratch"; const NETWORK_STATE_FILE_PREFIX = "deployed-"; @@ -66,11 +66,11 @@ export enum Sk { callsScript = "callsScript", vestingParams = "vestingParams", withdrawalVault = "withdrawalVault", + circuitBreaker = "circuitBreaker", gateSeal = "gateSeal", gateSealV3 = "gateSealV3", gateSealFactory = "gateSealFactory", gateSealTW = "gateSealTW", - circuitBreaker = "circuitBreaker", resealManager = "resealManager", stakingRouter = "stakingRouter", burner = "burner", @@ -99,6 +99,9 @@ export enum Sk { // Triggerable withdrawals validatorExitDelayVerifier = "validatorExitDelayVerifier", triggerableWithdrawalsGateway = "triggerableWithdrawalsGateway", + consolidationGateway = "consolidationGateway", + consolidationBus = "consolidationBus", + consolidationMigrator = "consolidationMigrator", // Vaults predepositGuarantee = "predepositGuarantee", stakingVaultImplementation = "stakingVaultImplementation", @@ -108,19 +111,32 @@ export enum Sk { v3Template = "v3Template", v3Addresses = "v3Addresses", v3VoteScript = "v3VoteScript", + stakingRouterV3VoteScript = "stakingRouterV3VoteScript", operatorGrid = "operatorGrid", validatorConsolidationRequests = "validatorConsolidationRequests", lazyOracle = "lazyOracle", + topUpGateway = "topUpGateway", v3TemporaryAdmin = "v3TemporaryAdmin", // Dual Governance dgDualGovernance = "dg:dualGovernance", dgEmergencyProtectedTimelock = "dg:emergencyProtectedTimelock", + depositsTempStorage = "depositsTempStorage", + beaconChainDepositor = "beaconChainDepositor", + srLib = "srLib", // Easy Track easyTrack = "easyTrack", easyTrackEVMScriptExecutor = "easyTrackEVMScriptExecutor", vaultsAdapter = "vaultsAdapter", // Harnesses alertingHarness = "alertingHarness", + // protocol upgrade + upgradeConfig = "upgradeConfig", + upgradeTemplate = "upgradeTemplate", + upgradeVoteScript = "upgradeVoteScript", + upgradeTemporaryAdmin = "upgradeTemporaryAdmin", + // csm & cm + csm_CSM = "csm:CSM", + csm_CM = "csm:CM", } export function getAddress(contractKey: Sk, state: DeploymentState): string { @@ -153,6 +169,9 @@ export function getAddress(contractKey: Sk, state: DeploymentState): string { case Sk.vaultHub: case Sk.dgDualGovernance: case Sk.dgEmergencyProtectedTimelock: + case Sk.consolidationBus: + case Sk.consolidationMigrator: + case Sk.topUpGateway: return state[contractKey].proxy.address; case Sk.apmRegistryFactory: case Sk.callsScript: @@ -164,9 +183,10 @@ export function getAddress(contractKey: Sk, state: DeploymentState): string { case Sk.ensFactory: case Sk.evmScriptRegistryFactory: case Sk.executionLayerRewardsVault: + case Sk.circuitBreaker: case Sk.gateSeal: case Sk.gateSealV3: - case Sk.circuitBreaker: + case Sk.gateSealTW: case Sk.resealManager: case Sk.hashConsensusForAccountingOracle: case Sk.hashConsensusForValidatorsExitBusOracle: @@ -182,17 +202,36 @@ export function getAddress(contractKey: Sk, state: DeploymentState): string { case Sk.tokenRebaseNotifierV3: case Sk.validatorExitDelayVerifier: case Sk.triggerableWithdrawalsGateway: + case Sk.consolidationGateway: case Sk.stakingVaultFactory: case Sk.minFirstAllocationStrategy: case Sk.validatorConsolidationRequests: case Sk.v3VoteScript: + case Sk.stakingRouterV3VoteScript: + case Sk.depositsTempStorage: + case Sk.beaconChainDepositor: + case Sk.vaultsAdapter: case Sk.easyTrack: + case Sk.upgradeTemporaryAdmin: + case Sk.upgradeTemplate: + case Sk.upgradeVoteScript: case Sk.gateSealFactory: return state[contractKey].address; default: throw new Error(`Unsupported contract entry key ${contractKey}`); } } +export function getAddressValidated(contractKey: Sk, state: DeploymentState): string | null { + if (!state[contractKey]) return null; + // allow error throw on missed items + let address = getAddress(contractKey, state); + try { + address = ethers.getAddress(address); + return address !== "0x0000000000000000000000000000000000000000" ? address : null; + } catch { + return null; + } +} export function readNetworkState({ deployer, @@ -238,13 +277,13 @@ export function setValueInState(key: Sk, value: unknown): DeploymentState { return state; } -export function incrementGasUsed(increment: bigint | number, useStateFile = true) { +export function incrementGasUsed(increment: bigint | number, useStateFile = true, key: Sk = Sk.scratchDeployGasUsed) { if (!useStateFile) { return; } const state = readNetworkState(); - state[Sk.scratchDeployGasUsed] = (BigInt(state[Sk.scratchDeployGasUsed] || 0) + BigInt(increment)).toString(); + state[key] = (BigInt(state[key] || 0) + BigInt(increment)).toString(); persistNetworkState(state); } diff --git a/lib/storage.ts b/lib/storage.ts index 627559b018..398c935fcf 100644 --- a/lib/storage.ts +++ b/lib/storage.ts @@ -4,6 +4,13 @@ import { getStorageAt } from "@nomicfoundation/hardhat-network-helpers"; import { streccak } from "lib"; +const MASK_128_BITS = (1n << 128n) - 1n; + +export type Uint128Pair = { + low: bigint; + high: bigint; +}; + /** * @dev Get the storage at a given position for a given contract * @param contract - The contract to get the storage at @@ -13,3 +20,29 @@ import { streccak } from "lib"; export async function getStorageAtPosition(contract: AddressLike, positionTag: string): Promise { return getStorageAt(await resolveAddress(contract), streccak(positionTag)); } + +/** + * @dev Splits a uint256 slot value into low/high uint128 parts. + * @param value - Raw value returned by getStorageAtPosition (hex string or bigint) + * @returns Parsed low and high 128-bit values + */ +export function splitStorageUint256ToUint128Pair(value: string | bigint): Uint128Pair { + const rawValue = typeof value === "bigint" ? value : BigInt(value); + return { + low: rawValue & MASK_128_BITS, + high: rawValue >> 128n, + }; +} + +/** + * @dev Reads storage at a tagged position and returns low/high uint128 parts. + * @param contract - The contract to read storage from + * @param positionTag - The tag of the position to read + * @returns Parsed low and high 128-bit values + */ +export async function getStorageAtPositionAsUint128Pair( + contract: AddressLike, + positionTag: string, +): Promise { + return splitStorageUint256ToUint128Pair(await getStorageAtPosition(contract, positionTag)); +} diff --git a/lib/string.ts b/lib/string.ts index efccb74e83..20d2e9690c 100644 --- a/lib/string.ts +++ b/lib/string.ts @@ -29,3 +29,8 @@ export function hexToBytes(hex: string): Uint8Array { const cleanHex = hex.startsWith("0x") ? hex.slice(2) : hex; return new Uint8Array(Buffer.from(cleanHex, "hex")); } + +export function toBool(value: string | undefined): boolean { + const trimmedValue = value?.trim().toLowerCase(); + return trimmedValue === "true" || trimmedValue === "1"; +} diff --git a/lib/top-ups.ts b/lib/top-ups.ts new file mode 100644 index 0000000000..a466bd1e23 --- /dev/null +++ b/lib/top-ups.ts @@ -0,0 +1,28 @@ +import { ethers } from "hardhat"; + +import { SSZValidatorsMerkleTree } from "typechain-types"; + +import { generateValidator } from "lib"; + +const DEFAULT_GI_VALIDATOR_0 = "0x0000000000000000000000000000000000000000000000000096000000000028"; + +export const prepareLocalMerkleTree = async (giValidator0: string = DEFAULT_GI_VALIDATOR_0) => { + const stateTree: SSZValidatorsMerkleTree = await ethers.deployContract("SSZValidatorsMerkleTree", [giValidator0], {}); + + // leafCount before adding = offset to validators field (22*2^40 for mainnet GI) + const firstValidatorLeafIndex = await stateTree.leafCount(); + + // generate first validator to initialize the tree + const firstValidator = generateValidator(); + await stateTree.addValidatorsLeaf(firstValidator.container); + + // GI of validator[0] is known from the spec + const gIFirstValidator = giValidator0; + + return { + stateTree, + gIFirstValidator, + firstValidatorLeafIndex, + firstValidator, + }; +}; diff --git a/lib/wc.ts b/lib/wc.ts new file mode 100644 index 0000000000..d392ca3207 --- /dev/null +++ b/lib/wc.ts @@ -0,0 +1,36 @@ +import { + MAX_EFFECTIVE_BALANCE_WC_TYPE_01, + MAX_EFFECTIVE_BALANCE_WC_TYPE_02, + WithdrawalCredentialsType, +} from "./constants"; +import { de0x, en0x, randomString } from "./string"; + +/** + * Returns the max effective balance for the given withdrawal credentials type + */ +export const wcTypeMaxEB = (withdrawalType: WithdrawalCredentialsType): bigint => { + switch (withdrawalType) { + case WithdrawalCredentialsType.WC0x01: + return MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + case WithdrawalCredentialsType.WC0x02: + return MAX_EFFECTIVE_BALANCE_WC_TYPE_02; + default: { + const _exhaustive: never = withdrawalType; + return _exhaustive; + } + } +}; + +/** + * Generates random Winthdrawal Credentials of type 0x01 + */ +export const randomWCType1 = () => { + return en0x(WithdrawalCredentialsType.WC0x01) + de0x(randomString(31)); +}; + +/** + * Generates random Winthdrawal Credentials of type 0x02 + */ +export const randomWCType2 = () => { + return en0x(WithdrawalCredentialsType.WC0x02) + de0x(randomString(31)); +}; diff --git a/package.json b/package.json index beb384068f..5d04141a7c 100644 --- a/package.json +++ b/package.json @@ -18,38 +18,48 @@ "format": "prettier . --check", "format:fix": "prettier . --write", "check": "yarn lint && yarn format && yarn typecheck", - "test": "hardhat test test/**/*.test.ts --parallel", + "test": "NODE_OPTIONS='--max-old-space-size=10240' hardhat test test/**/*.test.ts --parallel", "test:bls:blst": "SKIP_INTERFACES_CHECK=true SKIP_LINT_SOLIDITY=true SKIP_GAS_REPORT=true hardhat test test/common/bls.blst.e2e.fuzz.test.ts", "test:forge": "forge test", - "test:coverage": "COVERAGE=unit hardhat coverage", + "test:coverage": "NODE_OPTIONS='--max-old-space-size=16384' COVERAGE=unit hardhat coverage", "test:coverage:integration": "COVERAGE=integration MODE=scratch hardhat coverage", "test:coverage:full": "COVERAGE=full MODE=scratch hardhat coverage", "test:sequential": "hardhat test test/**/*.test.ts", "test:trace": "hardhat test test/**/*.test.ts --trace --disabletracer", "test:fulltrace": "hardhat test test/**/*.test.ts --fulltrace --disabletracer", "test:watch": "SKIP_GAS_REPORT=true SKIP_CONTRACT_SIZE=true hardhat watch test", - "test:integration": "MODE=forking hardhat test test/integration/**/*.ts", - "test:integration:trace": "MODE=forking hardhat test test/integration/**/*.ts --trace --disabletracer", - "test:integration:fulltrace": "MODE=forking hardhat test test/integration/**/*.ts --fulltrace --disabletracer", - "test:integration:upgrade": "GAS_LIMIT=16000000 STEPS_FILE=upgrade/steps-mock-upgrade-v3-02.json yarn test:integration:upgrade:helper test/integration/**/*.ts", - "test:integration:upgrade:helper": "MODE=forking UPGRADE=true GAS_PRIORITY_FEE=1 GAS_MAX_FEE=100 DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 hardhat test --trace --disabletracer", - "test:integration:upgrade-template": "cp deployed-mainnet.json deployed-mainnet-upgrade.json && NETWORK_STATE_FILE=deployed-mainnet-upgrade.json UPGRADE_PARAMETERS_FILE=scripts/upgrade/upgrade-params-mainnet.toml MODE=forking TEMPLATE_TEST=true GAS_PRIORITY_FEE=1 GAS_MAX_FEE=100 DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 hardhat test test/integration/upgrade/*.ts --fulltrace --disabletracer", + "test:integration": "bash scripts/run-test-integration.sh", + "test:integration:trace": "TRACE=trace yarn test:integration", + "test:integration:fulltrace": "TRACE=fulltrace yarn test:integration", + "test:integration:upgrade": "MODE=forking UPGRADE=true bash scripts/run-test-integration-upgrade.sh", + "test:integration:upgrade-template": "TEMPLATE_TEST=true yarn test:integration:upgrade", + "test:integration:upgrade:mainnet": "NETWORK=mainnet yarn test:integration:upgrade", + "test:integration:upgrade:local": "TRACE=fulltrace MODE=forking UPGRADE=true NETWORK=local bash scripts/run-test-integration.sh", "test:integration:scratch": "DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 SKIP_INTERFACES_CHECK=true SKIP_CONTRACT_SIZE=true SKIP_GAS_REPORT=true GENESIS_TIME=1639659600 GAS_PRIORITY_FEE=1 GAS_MAX_FEE=100 yarn test:integration:scratch:helper test/integration/**/*.ts", "test:integration:scratch:helper": "MODE=scratch hardhat test", "test:integration:scratch:trace": "MODE=scratch hardhat test test/integration/**/*.ts --trace --disabletracer", "test:integration:scratch:fulltrace": "MODE=scratch hardhat test test/integration/**/*.ts --fulltrace --disabletracer", "test:integration:fork:local": "MODE=scratch hardhat test test/integration/**/*.ts --network local", + "scratch:deploy:localhost": "SKIP_INTERFACES_CHECK=true SKIP_CONTRACT_SIZE=true SKIP_GAS_REPORT=true GENESIS_TIME=1639659600 GAS_PRIORITY_FEE=1 GAS_MAX_FEE=100 DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 MODE=scratch hardhat run scripts/scratch/scratch-deploy.ts --network localhost", "test:fork:pdg-validator": "MODE=forking hardhat test test/integration/vaults/scenario/pdg-specific-validator.integration.ts", "validate:configs": "yarn hardhat validate-configs", "typecheck": "tsc --noEmit", "abis:extract": "hardhat abis:extract", "verify:deployed": "hardhat verify:deployed", + "upgrade:fork-node": "bash scripts/run-fork-node.sh", + "upgrade:helper:run": "bash scripts/dao-upgrade.sh", + "upgrade:deploy:base": "STEPS_FILE=upgrade/steps-deploy-base.json yarn upgrade:helper:run", + "upgrade:deploy:template": "STEPS_FILE=upgrade/steps-deploy-template.json yarn upgrade:helper:run", + "upgrade:deploy:base:local": "MODE=forking yarn upgrade:deploy:base", + "upgrade:deploy:template:local": "MODE=forking yarn upgrade:deploy:template", + "upgrade:mock-voting": "MODE=forking STEPS_FILE=upgrade/steps-mock-voting.json yarn upgrade:helper:run", + "upgrade:mock-upgrade": "MODE=forking STEPS_FILE=upgrade/steps-mock-upgrade.json yarn upgrade:helper:run", "deploy:v3.0.2": "STEPS_FILE=upgrade/steps-deploy-v3-02.json hardhat run scripts/utils/migrate.ts", "postinstall": "husky" }, "lint-staged": { "./**/*.ts": [ - "eslint --max-warnings=0 --fix" + "eslint --max-warnings=0 --fix --no-warn-ignored" ], "./**/*.{ts,md,json}": [ "prettier --write" diff --git a/remappings.txt b/remappings.txt new file mode 100644 index 0000000000..4b57a1c03c --- /dev/null +++ b/remappings.txt @@ -0,0 +1,9 @@ +@aragon/=node_modules/@aragon/ +@openzeppelin/=node_modules/@openzeppelin/ +ens/=node_modules/@aragon/os/contracts/lib/ens/ +forge-std/=foundry/lib/forge-std/src/ +hardhat/=node_modules/hardhat/ +math/=node_modules/@aragon/os/contracts/lib/math/ +misc/=node_modules/@aragon/os/contracts/lib/misc/ +openzeppelin-solidity/=node_modules/openzeppelin-solidity/ +token/=node_modules/@aragon/os/contracts/lib/token/ diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts index 3096a7067c..d0763af7a8 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts @@ -118,6 +118,7 @@ export async function main(): Promise { oracleDaemonConfig: await locator.oracleDaemonConfig(), validatorExitDelayVerifier: await locator.validatorExitDelayVerifier(), triggerableWithdrawalsGateway: await locator.triggerableWithdrawalsGateway(), + consolidationGateway: await locator.consolidationGateway(), accounting: await locator.accounting(), predepositGuarantee: await locator.predepositGuarantee(), wstETH: wstethAddress, @@ -125,6 +126,7 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts index 04bf18628a..3df8d8aa9f 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts @@ -108,6 +108,8 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + consolidationGateway: await locator.consolidationGateway(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts index 6562b48993..5e25aa7718 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts @@ -103,6 +103,8 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + consolidationGateway: await locator.consolidationGateway(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts b/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts index 891a3e6f54..e29ca6db60 100644 --- a/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts +++ b/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts @@ -62,6 +62,8 @@ export async function main() { const accounting = await deployBehindOssifiableProxy(Sk.accounting, "Accounting", proxyContractsOwner, deployer, [ locatorAddress, lidoAddress, + Number(chainSpec.secondsPerSlot), + Number(chainSpec.genesisTime), ]); // @@ -311,6 +313,20 @@ export async function main() { ]); console.log("VaultFactory address", await vaultFactory.getAddress()); + const consolidationGateway = await deployWithoutProxy(Sk.consolidationGateway, "ConsolidationGateway", deployer, [ + agentAddress, // TODO: check + locator.address, + // ToDo: Replace dummy parameters with real ones + 10, // maxConsolidationRequestsLimit, + 1, // consolidationsPerFrame, + 60, // frameDurationInSec + pdgDeployParams.gIndex, // gIFirstValidatorPrev + pdgDeployParams.gIndexAfterChange, // gIFirstValidatorCurr + pdgDeployParams.changeSlot, // pivotSlot + ]); + + console.log("ConsolidationGateway address", await consolidationGateway.getAddress()); + // // Deploy new LidoLocator implementation // @@ -330,6 +346,7 @@ export async function main() { oracleDaemonConfig: await locator.oracleDaemonConfig(), validatorExitDelayVerifier: getAddress(Sk.validatorExitDelayVerifier, state), triggerableWithdrawalsGateway: getAddress(Sk.triggerableWithdrawalsGateway, state), + consolidationGateway: consolidationGateway.address, accounting: accounting.address, predepositGuarantee: predepositGuarantee.address, wstETH: wstethAddress, @@ -337,6 +354,7 @@ export async function main() { vaultFactory: vaultFactory.address, lazyOracle: lazyOracle.address, operatorGrid: operatorGrid.address, + topUpGateway: getAddress(Sk.topUpGateway, state), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); diff --git a/scripts/dao-deploy.sh b/scripts/dao-deploy.sh index ad507c6991..fa2f3a0866 100755 --- a/scripts/dao-deploy.sh +++ b/scripts/dao-deploy.sh @@ -3,13 +3,13 @@ set -e +u set -o pipefail # Check for required environment variables -if [[ -z "${DEPLOYER}" ]]; then +if [[ -z ${DEPLOYER} ]]; then echo "Error: Environment variable DEPLOYER must be set" exit 1 fi echo "DEPLOYER is $DEPLOYER" -if [[ -z "${NETWORK}" ]]; then +if [[ -z ${NETWORK} ]]; then echo "Error: Environment variable NETWORK must be set" exit 1 fi @@ -25,5 +25,8 @@ export STEPS_FILE=scratch/steps.json yarn hardhat --network $NETWORK run --no-compile scripts/utils/migrate.ts +# Need this to get sure the last transactions are mined +yarn hardhat --network $NETWORK run --no-compile scripts/utils/mine.ts + # TODO # yarn hardhat --network $NETWORK run --no-compile scripts/scratch/steps/90-check-dao.ts diff --git a/scripts/dao-hoodi-v3-phase-2.sh b/scripts/dao-hoodi-v3-phase-2.sh deleted file mode 100755 index 2f40eeaf56..0000000000 --- a/scripts/dao-hoodi-v3-phase-2.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e +u -set -o pipefail - -export NETWORK=${NETWORK:="hoodi"} # if defined use the value set to default otherwise -export RPC_URL=${RPC_URL:="http://127.0.0.1:8545"} # if defined use the value set to default otherwise - -export DEPLOYER=${DEPLOYER:="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"} # first acc of default mnemonic "test test ..." -export GAS_PRIORITY_FEE=1 -export GAS_MAX_FEE=100 - -export NETWORK_STATE_FILE=${NETWORK_STATE_FILE:="deployed-hoodi.json"} -export STEPS_FILE=upgrade/steps-upgrade-hoodi-v3-phase-2.json -export UPGRADE_PARAMETERS_FILE=scripts/upgrade/upgrade-params-hoodi.toml - -yarn hardhat --network $NETWORK run --no-compile scripts/utils/migrate.ts diff --git a/scripts/dao-local-deploy.sh b/scripts/dao-local-deploy.sh index 119ff803ce..f00b4efe0b 100755 --- a/scripts/dao-local-deploy.sh +++ b/scripts/dao-local-deploy.sh @@ -18,9 +18,6 @@ export SCRATCH_DEPLOY_CONFIG="scripts/scratch/deploy-params-testnet.toml" bash scripts/dao-deploy.sh -# Need this to get sure the last transactions are mined -yarn hardhat --network $NETWORK run --no-compile scripts/utils/mine.ts - # Run acceptance tests export INTEGRATION_WITH_CSM="off" yarn test:integration:fork:local diff --git a/scripts/dao-local-upgrade.sh b/scripts/dao-local-upgrade.sh index f0e67fd90f..82629bb558 100755 --- a/scripts/dao-local-upgrade.sh +++ b/scripts/dao-local-upgrade.sh @@ -5,7 +5,6 @@ set -o pipefail export NETWORK=local export RPC_URL=${RPC_URL:="http://127.0.0.1:8555"} # if defined use the value set to default otherwise -export GENESIS_TIME=1639659600 # just some time # export WITHDRAWAL_QUEUE_BASE_URI="<< SET IF REQUIED >>" # export DSM_PREDEFINED_ADDRESS="<< SET IF REQUIED >>" @@ -13,12 +12,7 @@ export DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 # first acc of defau export GAS_PRIORITY_FEE=1 export GAS_MAX_FEE=100 -export NETWORK_STATE_FILE="deployed-${NETWORK}.json" - bash scripts/dao-upgrade.sh -# Need this to get sure the last transactions are mined -yarn hardhat --network $NETWORK run --no-compile scripts/utils/mine.ts - # Run acceptance tests yarn test:integration:fork:local diff --git a/scripts/dao-upgrade-mock-voting.sh b/scripts/dao-upgrade-mock-voting.sh new file mode 100755 index 0000000000..d4626e8f61 --- /dev/null +++ b/scripts/dao-upgrade-mock-voting.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -e +u +set -o pipefail + +# Check for required environment variables +if [[ -z ${DEPLOYER} ]]; then + echo "Error: Environment variable DEPLOYER must be set" + exit 1 +fi +echo "DEPLOYER is $DEPLOYER" + +if [[ -z ${NETWORK} ]]; then + echo "Error: Environment variable NETWORK must be set" + exit 1 +fi +echo "NETWORK is $NETWORK" + +# Compile contracts +echo "Compiling contracts..." +yarn compile + +# Generic migration steps files +export NETWORK_STATE_FILE=${NETWORK_STATE_FILE:="deployed-${NETWORK}.json"} +export UPGRADE_PARAMETERS_FILE=${UPGRADE_PARAMETERS_FILE:="scripts/upgrade/upgrade-params-${NETWORK}.toml"} + +export STEPS_FILE=${STEPS_FILE:="upgrade/steps-mock-voting.json"} +export PROPOSAL_METADATA=${PROPOSAL_METADATA:="mock-proposal-metadata"} +export PROPOSAL_ID=${PROPOSAL_ID:=""} + +yarn hardhat --network $NETWORK run --no-compile scripts/utils/migrate.ts + +# Need this to get sure the last transactions are mined +yarn hardhat --network $NETWORK run --no-compile scripts/utils/mine.ts diff --git a/scripts/dao-upgrade.sh b/scripts/dao-upgrade.sh new file mode 100755 index 0000000000..6ae35dfc09 --- /dev/null +++ b/scripts/dao-upgrade.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +set -e +u +set -o pipefail + +if [ -f .env ]; then + . .env +fi + +if [[ -z ${NETWORK} ]]; then + echo "Error: Environment variable NETWORK must be set" + exit 1 +fi +echo "NETWORK: $NETWORK" + +# Derive RPC_URL from _RPC_URL if not set explicitly +if [[ -z ${RPC_URL} ]]; then + RPC_VAR="${NETWORK^^}_RPC_URL" + RPC_URL="${!RPC_VAR}" + if [[ -z ${RPC_URL} ]]; then + echo "Error: RPC_URL is not set and ${RPC_VAR} is also not set" + exit 1 + fi + echo "RPC_URL derived from \${${RPC_VAR}}" + export RPC_URL +fi +# echo "RPC_URL: $RPC_URL" + +# Generic migration steps files +export NETWORK_STATE_FILE=${NETWORK_STATE_FILE-"deployed-${NETWORK}.json"} +echo "NETWORK_STATE_FILE: $NETWORK_STATE_FILE" +# Upgrade parameters file +export UPGRADE_PARAMETERS_FILE=${UPGRADE_PARAMETERS_FILE-"scripts/upgrade/upgrade-params-${NETWORK}.toml"} +echo "UPGRADE_PARAMETERS_FILE: $UPGRADE_PARAMETERS_FILE" +export STEPS_FILE=${STEPS_FILE-"upgrade/steps-upgrade.json"} +echo "STEPS_FILE: $STEPS_FILE" + +if [[ ${MODE:-} == "forking" ]]; then + echo "MODE: forking!" + + if [[ -f $NETWORK_STATE_FILE ]]; then + TEMP_NETWORK_STATE_FILE="deployed-local.json" + if [[ ! -f $TEMP_NETWORK_STATE_FILE ]]; then + cp "$NETWORK_STATE_FILE" "$TEMP_NETWORK_STATE_FILE" + echo "Copied $NETWORK_STATE_FILE to $TEMP_NETWORK_STATE_FILE" + fi + export NETWORK_STATE_FILE="deployed-local.json" + fi + + if [[ -f $UPGRADE_PARAMETERS_FILE ]]; then + TEMP_UPGRADE_PARAMETERS_FILE="scripts/upgrade/upgrade-params-local.toml" + if [[ ! -f $TEMP_UPGRADE_PARAMETERS_FILE ]]; then + cp "$UPGRADE_PARAMETERS_FILE" "$TEMP_UPGRADE_PARAMETERS_FILE" + echo "Copied $UPGRADE_PARAMETERS_FILE to $TEMP_UPGRADE_PARAMETERS_FILE" + fi + export UPGRADE_PARAMETERS_FILE=$TEMP_UPGRADE_PARAMETERS_FILE + fi + + export NETWORK="local" + export LOCAL_RPC_URL="http://localhost:8545" + export HOLDER=${HOLDER-"${DEPLOYER}"} + echo "HOLDER: $HOLDER" + export DEPLOYER="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + export ALLOW_SKIP_STEPS=${ALLOW_SKIP_STEPS-"true"} + echo "ALLOW_SKIP_STEPS: $ALLOW_SKIP_STEPS" + export AUTO_CONFIRM=${AUTO_CONFIRM-"false"} + echo "AUTO_CONFIRM: $AUTO_CONFIRM" + # export GAS_LIMIT=16000000 + export GAS_PRIORITY_FEE=1 + export GAS_MAX_FEE=100 +fi + +# Set default to local test deployer +export DEPLOYER=${DEPLOYER-"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"} +echo "DEPLOYER: $DEPLOYER" + +echo "Compiling contracts..." +yarn hardhat compile + +echo "Starting migration..." +yarn hardhat --network $NETWORK run --no-compile scripts/utils/migrate.ts + +# Need this to get sure the last transactions are mined +# yarn hardhat --network $NETWORK run --no-compile scripts/utils/mine.ts diff --git a/scripts/defaults/local-devnet-defaults.json b/scripts/defaults/local-devnet-defaults.json index 3bacca983d..6cc28a3c3e 100644 --- a/scripts/defaults/local-devnet-defaults.json +++ b/scripts/defaults/local-devnet-defaults.json @@ -88,7 +88,7 @@ }, "accountingOracle": { "deployParameters": { - "consensusVersion": 4 + "consensusVersion": 6 } }, "hashConsensusForValidatorsExitBusOracle": { @@ -99,7 +99,11 @@ }, "validatorsExitBusOracle": { "deployParameters": { - "consensusVersion": 4 + "maxValidatorsPerReport": 600, + "maxExitBalanceEth": 416000, + "balancePerFrameEth": 32, + "frameDurationInSec": 48, + "consensusVersion": 5 } }, "depositSecurityModule": { @@ -111,19 +115,20 @@ }, "oracleReportSanityChecker": { "deployParameters": { - "exitedValidatorsPerDayLimit": 1500, - "appearedValidatorsPerDayLimit": 1500, - "deprecatedOneOffCLBalanceDecreaseBPLimit": 500, + "exitedEthAmountPerDayLimit": 57600, + "appearedEthAmountPerDayLimit": 57600, "annualBalanceIncreaseBPLimit": 1000, "simulatedShareRateDeviationBPLimit": 250, - "maxValidatorExitRequestsPerReport": 2000, + "maxBalanceExitRequestedPerReportInEth": 19200, "maxItemsPerExtraDataTransaction": 8, "maxNodeOperatorsPerExtraDataItem": 24, "requestTimestampMargin": 128, "maxPositiveTokenRebase": 5000000, - "initialSlashingAmountPWei": 1000, - "inactivityPenaltiesAmountPWei": 101, - "clBalanceOraclesErrorUpperBPLimit": 50 + "maxCLBalanceDecreaseBP": 360, + "clBalanceOraclesErrorUpperBPLimit": 50, + "consolidationEthAmountPerDayLimit": 93375, + "exitedValidatorEthAmountLimit": 32, + "externalPendingBalanceCapEth": 500 } }, "oracleDaemonConfig": { @@ -139,7 +144,7 @@ }, "nodeOperatorsRegistry": { "deployParameters": { - "stakingModuleName": "Curated", + "stakingModuleName": "curated-onchain-v1", "stakingModuleTypeId": "curated-onchain-v1", "exitDeadlineThresholdInSeconds": 86400 } diff --git a/scripts/run-fork-node.sh b/scripts/run-fork-node.sh new file mode 100755 index 0000000000..e47b67da8c --- /dev/null +++ b/scripts/run-fork-node.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -e +u +set -o pipefail + +. scripts/utils/common-env.sh + +load_env_var NETWORK || { + echo "Error: NETWORK must be set" + exit 1 +} +echo "NETWORK: $NETWORK" + +if [[ ${NETWORK:-} == "local" ]]; then + echo "Error: Network cannot be 'local'" + exit 1 +fi + +# Derive RPC_URL from _RPC_URL if not set explicitly +load_env_var RPC_URL || { + RPC_VAR="${NETWORK^^}_RPC_URL" + + load_env_var "$RPC_VAR" || { + echo "Error: RPC_URL or ${RPC_VAR} must be set" + exit 1 + } + echo "Derive RPC_URL from ${RPC_VAR}" + export RPC_URL="${!RPC_VAR:-}" +} +# echo "RPC_URL: $RPC_URL" + +load_env_var NETWORK_STATE_FILE "deployed-${NETWORK}.json" +echo "NETWORK_STATE_FILE: $NETWORK_STATE_FILE" + +load_env_var UPGRADE_PARAMETERS_FILE "scripts/upgrade/upgrade-params-${NETWORK}.toml" +echo "UPGRADE_PARAMETERS_FILE: $UPGRADE_PARAMETERS_FILE" + +TEMP_NETWORK_STATE_FILE="deployed-local.json" +TEMP_UPGRADE_PARAMETERS_FILE="scripts/upgrade/upgrade-params-local.toml" + +if [[ -f $TEMP_NETWORK_STATE_FILE ]]; then + rm -f $TEMP_NETWORK_STATE_FILE +fi + +if [[ -f $TEMP_UPGRADE_PARAMETERS_FILE ]]; then + rm -f $TEMP_UPGRADE_PARAMETERS_FILE +fi + +if [[ -f $NETWORK_STATE_FILE ]]; then + cp "$NETWORK_STATE_FILE" "$TEMP_NETWORK_STATE_FILE" + echo "Copied $NETWORK_STATE_FILE to $TEMP_NETWORK_STATE_FILE" + export NETWORK_STATE_FILE=$TEMP_NETWORK_STATE_FILE +fi + +if [[ -f $UPGRADE_PARAMETERS_FILE ]]; then + cp "$UPGRADE_PARAMETERS_FILE" "$TEMP_UPGRADE_PARAMETERS_FILE" + echo "Copied $UPGRADE_PARAMETERS_FILE to $TEMP_UPGRADE_PARAMETERS_FILE" + export UPGRADE_PARAMETERS_FILE=$TEMP_UPGRADE_PARAMETERS_FILE +fi + +FORK_NODE=${FORK_NODE:-anvil} +echo "FORK_NODE: $FORK_NODE" + +BLOCK_ARG=() +if [[ -n ${FORKING_BLOCK_NUMBER:-} ]]; then + echo "FORKING_BLOCK_NUMBER: ${FORKING_BLOCK_NUMBER}" + BLOCK_ARG=(--fork-block-number "$FORKING_BLOCK_NUMBER") +fi + +if [[ ${FORK_NODE:-} == "anvil" ]]; then + # --config-out localhost.json + anvil -f $RPC_URL "${BLOCK_ARG[@]}" --timeout 90000 --print-traces --steps-tracing --auto-impersonate +else + yarn hardhat node --fork $RPC_URL "${BLOCK_ARG[@]}" --nocompile --trace --gascost --vvv +fi diff --git a/scripts/run-test-integration-upgrade.sh b/scripts/run-test-integration-upgrade.sh new file mode 100755 index 0000000000..a20ed5153d --- /dev/null +++ b/scripts/run-test-integration-upgrade.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +set -e +u +set -o pipefail + +if [ -f .env ]; then + . .env +fi + +if [[ -z ${NETWORK} ]]; then + echo "Error: Environment variable NETWORK must be set" + exit 1 +fi +echo "NETWORK: $NETWORK" + +# Derive RPC_URL from _RPC_URL if not set explicitly +if [[ -z ${RPC_URL} ]]; then + RPC_VAR="${NETWORK^^}_RPC_URL" + RPC_URL="${!RPC_VAR}" + if [[ -z ${RPC_URL} ]]; then + echo "Error: RPC_URL is not set and ${RPC_VAR} is also not set" + exit 1 + fi + echo "RPC_URL derived from \${${RPC_VAR}}" + export RPC_URL +fi +# echo "RPC_URL: $RPC_URL" + +# Set default to local test deployer +export DEPLOYER="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +echo "DEPLOYER: $DEPLOYER" + +# Generic migration steps files +export NETWORK_STATE_FILE=${NETWORK_STATE_FILE:="deployed-${NETWORK}.json"} +if [[ ! -f $NETWORK_STATE_FILE ]]; then + echo "Error: Network state file not found: $SOURCE_NETWORK_STATE_FILE" + exit 1 +fi +echo "NETWORK_STATE_FILE: $NETWORK_STATE_FILE" + +# Upgrade parameters file +export UPGRADE_PARAMETERS_FILE=${UPGRADE_PARAMETERS_FILE:="scripts/upgrade/upgrade-params-${NETWORK}.toml"} +if [[ ! -f $UPGRADE_PARAMETERS_FILE ]]; then + echo "Error: Upgrade params file not found: $UPGRADE_PARAMETERS_FILE" + exit 1 +fi +echo "UPGRADE_PARAMETERS_FILE: $UPGRADE_PARAMETERS_FILE" + +export STEPS_FILE=${STEPS_FILE:="upgrade/steps-mock-upgrade.json"} +echo "STEPS_FILE: $STEPS_FILE" + +TEMP_NETWORK_STATE_FILE="deployed-${NETWORK}-upgrade.json" +cp -f "$NETWORK_STATE_FILE" "$TEMP_NETWORK_STATE_FILE" +export NETWORK_STATE_FILE=$TEMP_NETWORK_STATE_FILE + +TEMP_UPGRADE_PARAMETERS_FILE="upgrade-params-${NETWORK}-upgrade.toml" +cp -f "$UPGRADE_PARAMETERS_FILE" "$TEMP_UPGRADE_PARAMETERS_FILE" +export UPGRADE_PARAMETERS_FILE=$TEMP_UPGRADE_PARAMETERS_FILE + +export ALLOW_SKIP_STEPS=1 +export AUTO_CONFIRM=1 +export TEMPLATE_TEST=${TEMPLATE_TEST=true:="false"} +export GAS_LIMIT=16000000 +export GAS_PRIORITY_FEE=1 +export GAS_MAX_FEE=100 + +yarn hardhat test test/integration/**/*.ts --trace --disabletracer diff --git a/scripts/run-test-integration.sh b/scripts/run-test-integration.sh new file mode 100755 index 0000000000..2b136c2f9b --- /dev/null +++ b/scripts/run-test-integration.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash +set -e +u +set -o pipefail + +. scripts/utils/common-env.sh + +load_env_var MODE "forking" +load_env_var UPGRADE "false" +load_env_var TEMPLATE_TEST "false" + +echo "MODE: $MODE" +echo "UPGRADE: $UPGRADE" + +load_env_var NETWORK "hardhat" +if [[ ${NETWORK:-} != "hardhat" ]]; then + if [[ ${NETWORK} == "local" ]]; then + # set default local rpc url if not + load_env_var LOCAL_RPC_URL "http://localhost:8545" + fi + # Derive RPC_URL from _RPC_URL if not set explicitly + load_env_var RPC_URL || { + RPC_VAR="${NETWORK^^}_RPC_URL" + + load_env_var "$RPC_VAR" || { + echo "Error: RPC_URL or ${RPC_VAR} must be set" + exit 1 + } + echo "Derive RPC_URL from ${RPC_VAR}" + export RPC_URL="${!RPC_VAR:-}" + } + # echo "RPC_URL: $RPC_URL" +fi + +case "${MODE:-}" in + scratch) + case "${NETWORK:-}" in + hardhat | local) + : + ;; + *) + export NETWORK="hardhat" + ;; + esac + + export NETWORK_STATE_FILE="deployed-hardhat.json" + if [[ -f $NETWORK_STATE_FILE ]]; then + rm -f $NETWORK_STATE_FILE + fi + + load_env_var SCRATCH_DEPLOY_CONFIG "scripts/scratch/deploy-params-testnet.toml" + echo "SCRATCH_DEPLOY_CONFIG: $SCRATCH_DEPLOY_CONFIG" + + load_env_var STEPS_FILE "scratch/steps.json" + echo "STEPS_FILE: $STEPS_FILE" + ;; + forking) + case "${NETWORK:-}" in + hardhat) + echo "Error: $(hardhat) network is not supported in forking mode" + exit 1 + ;; + local) + # override fork block number to avoid HardHat uses lastSafeBlockNumber + export FORKING_BLOCK_NUMBER="$(cast block-number --rpc-url "$LOCAL_RPC_URL")" + ;; + *) + : + ;; + esac + + load_env_var NETWORK_STATE_FILE "deployed-${NETWORK}.json" + echo "NETWORK_STATE_FILE: $NETWORK_STATE_FILE" + + if [[ ${UPGRADE:-} == "true" ]]; then + load_env_var UPGRADE_PARAMETERS_FILE "scripts/upgrade/upgrade-params-${NETWORK}.toml" + echo "UPGRADE_PARAMETERS_FILE: $UPGRADE_PARAMETERS_FILE" + + load_env_var STEPS_FILE "upgrade/steps-mock-upgrade.json" + echo "STEPS_FILE: $STEPS_FILE" + + TEMP_NETWORK_STATE_FILE="deployed-${NETWORK}-upgrade.json" + TEMP_UPGRADE_PARAMETERS_FILE="scripts/upgrade/upgrade-params-${NETWORK}-upgrade.toml" + + if [[ -f $TEMP_NETWORK_STATE_FILE ]]; then + rm -f $TEMP_NETWORK_STATE_FILE + fi + + if [[ -f $TEMP_UPGRADE_PARAMETERS_FILE ]]; then + rm -f $TEMP_UPGRADE_PARAMETERS_FILE + fi + + if [[ -f $NETWORK_STATE_FILE ]]; then + cp "$NETWORK_STATE_FILE" "$TEMP_NETWORK_STATE_FILE" + export NETWORK_STATE_FILE=$TEMP_NETWORK_STATE_FILE + fi + + if [[ -f $UPGRADE_PARAMETERS_FILE ]]; then + cp "$UPGRADE_PARAMETERS_FILE" "$TEMP_UPGRADE_PARAMETERS_FILE" + export UPGRADE_PARAMETERS_FILE=$TEMP_UPGRADE_PARAMETERS_FILE + fi + else + TEMP_NETWORK_STATE_FILE="deployed-hardhat.json" + if [[ -f $TEMP_NETWORK_STATE_FILE ]]; then + rm -f $TEMP_NETWORK_STATE_FILE + fi + + if [[ -f $NETWORK_STATE_FILE ]]; then + cp "$NETWORK_STATE_FILE" "$TEMP_NETWORK_STATE_FILE" + export NETWORK_STATE_FILE=$TEMP_NETWORK_STATE_FILE + fi + fi + + export NETWORK="hardhat" + ;; + *) + echo "Error: MODE must be set to $(scratch) or $(forking)" + exit 1 + ;; +esac + +export DEPLOYER="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + +export ALLOW_SKIP_STEPS=true +export AUTO_CONFIRM=true +export GAS_LIMIT=16000000 +export GAS_PRIORITY_FEE=1 +export GAS_MAX_FEE=100 + +export SKIP_INTERFACES_CHECK=true +export SKIP_CONTRACT_SIZE=true +export SKIP_GAS_REPORT=true +export SKIP_LINT_SOLIDITY=true + +case "${TRACE:-}" in + "") + TRACE_ARGS=(--disabletracer) + ;; + trace) + TRACE_ARGS=(--trace --disabletracer) + ;; + fulltrace) + TRACE_ARGS=(--fulltrace --disabletracer) + ;; + all) + TRACE_ARGS=(--fulltrace) + ;; + *) + echo "Error: TRACE must be empty, 'trace', or 'fulltrace'" + exit 1 + ;; +esac + +yarn hardhat --network $NETWORK test test/integration/**/*.ts "${TRACE_ARGS[@]}" diff --git a/scripts/scratch/deploy-params-testnet.toml b/scripts/scratch/deploy-params-testnet.toml index 86e7c04b48..26f1c7a084 100644 --- a/scripts/scratch/deploy-params-testnet.toml +++ b/scripts/scratch/deploy-params-testnet.toml @@ -7,11 +7,14 @@ slotsPerEpoch = 32 # Number of slots per epoch in Ethereum co secondsPerSlot = 12 # Duration of each slot in seconds # genesisTime and depositContract are set via environment variables or deployment scripts -# Gate seal configuration for testnet deployment -[gateSeal] -sealDuration = 518400 # Gate seal duration in seconds (6 days) -expiryTimestamp = 1714521600 # Gate seal expiry timestamp -sealingCommittee = [] # Empty sealing committee for testnet +# CircuitBreaker configuration for testnet deployment +[circuitBreaker] +minPauseDuration = 300 # Minimum pause duration in seconds (5 minutes) +maxPauseDuration = 5184000 # Maximum pause duration in seconds (60 days) +minHeartbeatInterval = 300 # Minimum heartbeat interval in seconds (5 minutes) +maxHeartbeatInterval = 94608000 # Maximum heartbeat interval in seconds (3 years) +initialPauseDuration = 1814400 # Initial pause duration in seconds (21 days) +initialHeartbeatInterval = 31536000 # Initial heartbeat interval in seconds (365 days) # Lido APM ENS configuration [lidoApm] @@ -79,7 +82,7 @@ maxLidoFeeRatePerSecond = "180000000000000000" # Maximum Lido fee rate per secon # Accounting oracle configuration [accountingOracle] -consensusVersion = 5 # Consensus version +consensusVersion = 6 # Consensus version # Hash consensus for validators exit bus oracle [hashConsensusForValidatorsExitBusOracle] @@ -88,11 +91,11 @@ epochsPerFrame = 4 # Epochs per consensus frame # Validators exit bus oracle configuration [validatorsExitBusOracle] -consensusVersion = 4 # Consensus version -maxValidatorsPerRequest = 600 # Maximum validators per request -maxExitRequestsLimit = 13000 # Maximum exit requests limit -exitsPerFrame = 1 # Exits per frame +maxValidatorsPerReport = 600 # Maximum validators per request +maxExitBalanceEth = 416000 +balancePerFrameEth = 32 frameDurationInSec = 48 # Frame duration in seconds +consensusVersion = 5 # Consensus version # Deposit security module configuration [depositSecurityModule] @@ -102,19 +105,22 @@ pauseIntentValidityPeriodBlocks = 6646 # Pause intent validity period in blocks # Oracle report sanity checker configuration [oracleReportSanityChecker] -exitedValidatorsPerDayLimit = 1500 # Exited validators per day limit -appearedValidatorsPerDayLimit = 1500 # Appeared validators per day limit -deprecatedOneOffCLBalanceDecreaseBPLimit = 500 # Deprecated one-off CL balance decrease limit (BP) +exitedEthAmountPerDayLimit = 57600 # Exited ETH amount per day limit +appearedEthAmountPerDayLimit = 57600 # Appeared ETH amount per day limit annualBalanceIncreaseBPLimit = 1000 # Annual balance increase limit (BP) simulatedShareRateDeviationBPLimit = 250 # Simulated share rate deviation limit (BP) -maxValidatorExitRequestsPerReport = 2000 # Maximum validator exit requests per report +maxBalanceExitRequestedPerReportInEth = 19200 # Maximum exit ETH per report (600*32) +maxEffectiveBalanceWeightWCType01 = 32 # maxEB equivalent weight for WC type 1 +maxEffectiveBalanceWeightWCType02 = 2048 # maxEB equivalent weight for WC type 2 maxItemsPerExtraDataTransaction = 8 # Maximum items per extra data transaction maxNodeOperatorsPerExtraDataItem = 24 # Maximum node operators per extra data item requestTimestampMargin = 128 # Request timestamp margin maxPositiveTokenRebase = 5000000 # Maximum positive token rebase -initialSlashingAmountPWei = 1000 # Initial slashing amount (pWei) -inactivityPenaltiesAmountPWei = 101 # Inactivity penalties amount (pWei) +maxCLBalanceDecreaseBP = 360 # Max CL balance decrease over sliding window (BP, 360 = 3.6%) clBalanceOraclesErrorUpperBPLimit = 50 # CL balance oracles error upper limit (BP) +consolidationEthAmountPerDayLimit = 93375 # Consolidation ETH amount per day limit +exitedValidatorEthAmountLimit = 32 # Exited validator ETH amount limit in ETH units +externalPendingBalanceCapEth = 300 # Extra external pending balance cap for bounded side deposits / top-ups # Oracle daemon configuration [oracleDaemonConfig] @@ -131,7 +137,7 @@ EXIT_EVENTS_LOOKBACK_WINDOW_IN_SLOTS = 7200 # Lookback window for exit # Node operators registry configuration [nodeOperatorsRegistry] -stakingModuleName = "Curated" # Staking module name +stakingModuleName = "curated-onchain-v1" # Staking module name stakingModuleTypeId = "curated-onchain-v1" # Staking module type ID stuckPenaltyDelay = 172800 # Stuck penalty delay in seconds (2 days) @@ -165,9 +171,27 @@ shardCommitteePeriodInSeconds = 98304 # Shard committee period in seconds ( # Triggerable withdrawals gateway for managing validator exit requests [triggerableWithdrawalsGateway] -maxExitRequestsLimit = 13000 # Maximum number of exit requests that can be processed +maxExitRequestsLimit = 250 # Maximum number of exit requests that can be processed exitsPerFrame = 1 # Number of exits processed per frame -frameDurationInSec = 48 # Duration of each processing frame in seconds +frameDurationInSec = 240 # Duration of each processing frame in seconds + +[consolidationGateway] +maxConsolidationRequestsLimit = 2900 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 36 # Duration of each processing frame in seconds +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (before fork) +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (after fork) +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +[consolidationBus] +initialBatchSize = 350 # Max number of requests in a batch +initialMaxGroupsInBatch = 10 # Max source groups in a batch +initialExecutionDelay = 0 # Delay before pending batch execution + +[consolidationMigrator] +sourceModuleId = 1 # Source staking module ID +targetModuleId = 1 # Target staking module ID, for scratch deploy testing, we use moduleId=1 which corresponds to NOR. + # Predeposit guarantee configuration for validator deposit guarantees [predepositGuarantee] @@ -185,3 +209,20 @@ forcedRebalanceThresholdBP = 4975 # Threshold for forced rebalancing in basi infraFeeBP = 100 # Infrastructure fee in basis points (5%) liquidityFeeBP = 650 # Liquidity provision fee in basis points (4%) reservationFeeBP = 0 # Reservation fee in basis points (1%) + +# Top-up gateway configuration for validator top-ups via Merkle proofs +[topUpGateway] +maxValidatorsPerTopUp = 100 # Maximum validators per top-up call +minBlockDistance = 1 # Minimum block distance between top-ups +maxRootAge = 300 # Maximum allowed age of beacon root relative to current block timestamp +targetBalanceGwei = 2046750000000 +minTopUpGwei = 1000000000 +# Generalized indices for validator/balance/pending state verification +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +# StakingRouter configuration +[stakingRouter] +maxEBType1 = "32000000000000000000" # Max EB value for WC type 1 +maxEBType2 = "2048000000000000000000" # Max EB value for WC type 2 diff --git a/scripts/scratch/scratch-deploy.ts b/scripts/scratch/scratch-deploy.ts new file mode 100644 index 0000000000..3bcf7762f1 --- /dev/null +++ b/scripts/scratch/scratch-deploy.ts @@ -0,0 +1,12 @@ +import { getProtocolContext } from "lib/protocol/context"; + +async function main() { + console.log("Starting scratch deploy..."); + await getProtocolContext(); + console.log("Scratch deploy complete!"); +} + +main().catch((error) => { + console.error(error); + process.exit(1); +}); diff --git a/scripts/scratch/steps.json b/scripts/scratch/steps.json index 7296dd6c87..8610ad5f9d 100644 --- a/scripts/scratch/steps.json +++ b/scripts/scratch/steps.json @@ -12,7 +12,7 @@ "scratch/steps/0083-deploy-core", "scratch/steps/0085-deploy-vaults", "scratch/steps/0090-upgrade-locator", - "scratch/steps/0100-gate-seal", + "scratch/steps/0100-deploy-circuit-breaker", "scratch/steps/0110-finalize-dao", "scratch/steps/0120-post-locator-initializers", "scratch/steps/0130-grant-roles", diff --git a/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts b/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts index 74e082453e..226ab400c0 100644 --- a/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts +++ b/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts @@ -15,13 +15,14 @@ function getEnvVariable(name: string, defaultValue?: string): string { export async function main() { // Retrieve environment variables const deployer = ethers.getAddress(getEnvVariable("DEPLOYER")); - const gateSealFactoryAddress = getEnvVariable("GATE_SEAL_FACTORY", ""); const genesisTime = parseInt(getEnvVariable("GENESIS_TIME")); const slotsPerEpoch = parseInt(getEnvVariable("SLOTS_PER_EPOCH", "32")); const depositContractAddress = getEnvVariable("DEPOSIT_CONTRACT", ""); const withdrawalQueueBaseUri = getEnvVariable("WITHDRAWAL_QUEUE_BASE_URI", ""); const dsmPredefinedAddress = getEnvVariable("DSM_PREDEFINED_ADDRESS", ""); const genesisForkVersion = getEnvVariable("GENESIS_FORK_VERSION", "0x00000000"); + const consolidationMigratorSourceModuleId = getEnvVariable("CONSOLIDATION_MIGRATOR_SOURCE_MODULE_ID", ""); + const consolidationMigratorTargetModuleId = getEnvVariable("CONSOLIDATION_MIGRATOR_TARGET_MODULE_ID", ""); await resetStateFileFromDeployParams(); const state = readNetworkState(); @@ -43,13 +44,6 @@ export async function main() { state.chainSpec.depositContract = ethers.getAddress(depositContractAddress); } - if (gateSealFactoryAddress) { - state.gateSeal = { - ...state.gateSeal, - factoryAddress: gateSealFactoryAddress, - }; - } - if (withdrawalQueueBaseUri) { state.withdrawalQueueERC721.deployParameters = { ...state.withdrawalQueueERC721.deployParameters, @@ -65,6 +59,12 @@ export async function main() { }; } + state.consolidationMigrator.deployParameters = { + ...state.consolidationMigrator.deployParameters, + ...(consolidationMigratorSourceModuleId && { sourceModuleId: parseInt(consolidationMigratorSourceModuleId) }), + ...(consolidationMigratorTargetModuleId && { targetModuleId: parseInt(consolidationMigratorTargetModuleId) }), + }; + // Initialize gas usage tracking state[Sk.scratchDeployGasUsed] = 0n.toString(); diff --git a/scripts/scratch/steps/0020-deploy-aragon-env.ts b/scripts/scratch/steps/0020-deploy-aragon-env.ts index f7436fdae6..57d994c114 100644 --- a/scripts/scratch/steps/0020-deploy-aragon-env.ts +++ b/scripts/scratch/steps/0020-deploy-aragon-env.ts @@ -147,7 +147,7 @@ export async function main() { if (state[Sk.miniMeTokenFactory].address) { log(`Using pre-deployed MiniMeTokenFactory: ${cy(state[Sk.miniMeTokenFactory].address)}`); } else { - await deployWithoutProxy(Sk.miniMeTokenFactory, "MiniMeTokenFactory", deployer, [], "address", true, { + await deployWithoutProxy(Sk.miniMeTokenFactory, "MiniMeTokenFactory", deployer, [], "address", true, undefined, { contractName: "MiniMeTokenFactory", }); } diff --git a/scripts/scratch/steps/0083-deploy-core.ts b/scripts/scratch/steps/0083-deploy-core.ts index f09153734a..2468189346 100644 --- a/scripts/scratch/steps/0083-deploy-core.ts +++ b/scripts/scratch/steps/0083-deploy-core.ts @@ -1,8 +1,15 @@ import { ethers } from "hardhat"; -import { StakingRouter, TriggerableWithdrawalsGateway } from "typechain-types"; - -import { getContractPath, loadContract } from "lib/contract"; +import { + ConsolidationBus, + ConsolidationGateway, + ConsolidationMigrator, + StakingRouter, + TopUpGateway, + TriggerableWithdrawalsGateway, +} from "typechain-types"; + +import { encodeFunctionCall, getContractPath, InitializeArgs, loadContract } from "lib/contract"; import { deployBehindOssifiableProxy, deployContract, @@ -10,6 +17,8 @@ import { deployWithoutProxy, makeTx, } from "lib/deploy"; +import { EIP7002_ADDRESS } from "lib/eips/eip7002"; +import { EIP7251_ADDRESS } from "lib/eips/eip7251"; import { log } from "lib/log"; import { readNetworkState, Sk, updateObjectInState } from "lib/state-file"; import { en0x } from "lib/string"; @@ -37,7 +46,6 @@ export async function main() { const hashConsensusForAccountingParams = state[Sk.hashConsensusForAccountingOracle].deployParameters; const hashConsensusForExitBusParams = state[Sk.hashConsensusForValidatorsExitBusOracle].deployParameters; const withdrawalQueueERC721Params = state[Sk.withdrawalQueueERC721].deployParameters; - const minFirstAllocationStrategyAddress = state[Sk.minFirstAllocationStrategy].address; const validatorExitDelayVerifierParams = state[Sk.validatorExitDelayVerifier].deployParameters; const proxyContractsOwner = deployer; @@ -108,8 +116,7 @@ export async function main() { [wstETH.address, withdrawalQueueERC721Params.name, withdrawalQueueERC721Params.symbol], ); const withdrawalQueue = await loadContract("WithdrawalQueueERC721", withdrawalQueue_.address); - const withdrawalQueueAdmin = deployer; - await makeTx(withdrawalQueue, "initialize", [withdrawalQueueAdmin], { from: deployer }); + await makeTx(withdrawalQueue, "initialize", [admin], { from: deployer }); const withdrawalQueueBaseUri = state["withdrawalQueueERC721"].deployParameters.baseUri; if (withdrawalQueueBaseUri !== null && withdrawalQueueBaseUri !== "") { @@ -149,24 +156,45 @@ export async function main() { // Deploy StakingRouter // + // deploy beacon chain depositor + const beaconChainDepositor = await deployWithoutProxy(Sk.beaconChainDepositor, "BeaconChainDepositor", deployer); + + // deploy SRLib + const minFirstAllocationStrategy = await deployWithoutProxy( + Sk.minFirstAllocationStrategy, + "MinFirstAllocationStrategy", + deployer, + ); + + const srLib = await deployWithoutProxy(Sk.srLib, "SRLib", deployer, [], "address", true, { + libraries: { + MinFirstAllocationStrategy: minFirstAllocationStrategy.address, + }, + }); + + const stakingRouterParams = state[Sk.stakingRouter].deployParameters; + const withdrawalCredentials = `0x010000000000000000000000${withdrawalsManagerProxy.address.slice(2)}`; + const stakingRouter_ = await deployBehindOssifiableProxy( Sk.stakingRouter, "StakingRouter", proxyContractsOwner, deployer, - [depositContract], + [depositContract, lidoAddress, locator.address, stakingRouterParams.maxEBType1, stakingRouterParams.maxEBType2], null, true, { - libraries: { MinFirstAllocationStrategy: minFirstAllocationStrategyAddress }, + libraries: { + BeaconChainDepositor: beaconChainDepositor.address, + SRLib: srLib.address, + }, }, + await encodeFunctionCall>("StakingRouter", "initialize", [ + admin, + withdrawalCredentials, + ]), ); - const withdrawalCredentials = `0x010000000000000000000000${withdrawalsManagerProxy.address.slice(2)}`; - const stakingRouterAdmin = deployer; const stakingRouter = await loadContract("StakingRouter", stakingRouter_.address); - await makeTx(stakingRouter, "initialize", [stakingRouterAdmin, lidoAddress, withdrawalCredentials], { - from: deployer, - }); // // Deploy or use predefined DepositSecurityModule @@ -178,7 +206,7 @@ export async function main() { await deployWithoutProxy(Sk.depositSecurityModule, "DepositSecurityModule", deployer, [ lidoAddress, depositContract, - stakingRouter.address, + stakingRouter_.address, depositSecurityModuleParams.pauseIntentValidityPeriodBlocks, depositSecurityModuleParams.maxOperatorsPerUnvetting, ]) @@ -189,14 +217,51 @@ export async function main() { ); } + // + // Deploy TopUpGateway behind OssifiableProxy (before StakingRouter initialization) + // + + const topUpGatewayParams = state[Sk.topUpGateway].deployParameters; + + const topUpGateway_ = await deployBehindOssifiableProxy( + Sk.topUpGateway, + "TopUpGateway", + proxyContractsOwner, + deployer, + [ + locator.address, + topUpGatewayParams.gIFirstValidatorPrev, + topUpGatewayParams.gIFirstValidatorCurr, + topUpGatewayParams.pivotSlot, + chainSpec.slotsPerEpoch, + ], + null, // implementation + true, // withStateFile + undefined, // factoryOptions + await encodeFunctionCall>("TopUpGateway", "initialize", [ + admin, + topUpGatewayParams.maxValidatorsPerTopUp, + topUpGatewayParams.minBlockDistance, + topUpGatewayParams.maxRootAge, + topUpGatewayParams.targetBalanceGwei, + topUpGatewayParams.minTopUpGwei, + ]), + ); + await loadContract("TopUpGateway", topUpGateway_.address); + // // Deploy Accounting // - const accounting = await deployBehindOssifiableProxy(Sk.accounting, "Accounting", proxyContractsOwner, deployer, [ - locator.address, - lidoAddress, - ]); + const accounting = await deployBehindOssifiableProxy( + Sk.accounting, + "Accounting", + proxyContractsOwner, + deployer, + [locator.address, lidoAddress], + null, + true, + ); // // Deploy AccountingOracle and its HashConsensus @@ -265,9 +330,9 @@ export async function main() { hashConsensusForVebo.address, validatorsExitBusOracleParams.consensusVersion, ZERO_LAST_PROCESSING_REF_SLOT, - validatorsExitBusOracleParams.maxValidatorsPerRequest, - validatorsExitBusOracleParams.maxExitRequestsLimit, - validatorsExitBusOracleParams.exitsPerFrame, + validatorsExitBusOracleParams.maxValidatorsPerReport, + validatorsExitBusOracleParams.maxExitBalanceEth, + validatorsExitBusOracleParams.balancePerFrameEth, validatorsExitBusOracleParams.frameDurationInSec, ], { from: deployer }, @@ -277,6 +342,7 @@ export async function main() { // Deploy Triggerable Withdrawals Gateway // + const triggerableWithdrawalsGatewayParams = state[Sk.triggerableWithdrawalsGateway].deployParameters; const triggerableWithdrawalsGateway_ = await deployWithoutProxy( Sk.triggerableWithdrawalsGateway, "TriggerableWithdrawalsGateway", @@ -284,9 +350,9 @@ export async function main() { [ admin, locator.address, - validatorsExitBusOracleParams.maxExitRequestsLimit, - validatorsExitBusOracleParams.exitsPerFrame, - validatorsExitBusOracleParams.frameDurationInSec, + triggerableWithdrawalsGatewayParams.maxExitRequestsLimit, + triggerableWithdrawalsGatewayParams.exitsPerFrame, + triggerableWithdrawalsGatewayParams.frameDurationInSec, ], ); await makeTx( @@ -306,6 +372,109 @@ export async function main() { { from: deployer }, ); + // + // Deploy Consolidation Gateway + // + + const consolidationGatewayParams = state[Sk.consolidationGateway].deployParameters; + const consolidationGateway_ = await deployWithoutProxy(Sk.consolidationGateway, "ConsolidationGateway", deployer, [ + admin, + locator.address, + consolidationGatewayParams.maxConsolidationRequestsLimit, + consolidationGatewayParams.consolidationsPerFrame, + consolidationGatewayParams.frameDurationInSec, + consolidationGatewayParams.gIFirstValidatorPrev, + consolidationGatewayParams.gIFirstValidatorCurr, + consolidationGatewayParams.pivotSlot, + ]); + + const consolidationGateway = await loadContract( + "ConsolidationGateway", + consolidationGateway_.address, + ); + + // + // Deploy Consolidation Bus + // + + const consolidationBusParams = state[Sk.consolidationBus].deployParameters; + const consolidationBus_ = await deployBehindOssifiableProxy( + Sk.consolidationBus, + "ConsolidationBus", + proxyContractsOwner, + deployer, + [consolidationGateway_.address], + ); + + const consolidationBus = await loadContract("ConsolidationBus", consolidationBus_.address); + + await makeTx( + consolidationBus, + "initialize", + [ + admin, + consolidationBusParams.initialBatchSize, + consolidationBusParams.initialMaxGroupsInBatch, + consolidationBusParams.initialExecutionDelay, + ], + { from: deployer }, + ); + + // Grant MANAGE_ROLE to deployer for testing + await makeTx(consolidationBus, "grantRole", [await consolidationBus.MANAGE_ROLE(), deployer], { from: deployer }); + + // Grant ADD_CONSOLIDATION_REQUEST_ROLE on Gateway to Bus + await makeTx( + consolidationGateway, + "grantRole", + [await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(), consolidationBus_.address], + { from: deployer }, + ); + + // Also grant ADD_CONSOLIDATION_REQUEST_ROLE to deployer for direct testing + await makeTx( + consolidationGateway, + "grantRole", + [await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(), deployer], + { from: deployer }, + ); + + // + // Deploy Consolidation Migrator + // + const consolidationMigratorParams = state[Sk.consolidationMigrator].deployParameters; + + const consolidationMigrator_ = await deployBehindOssifiableProxy( + Sk.consolidationMigrator, + "ConsolidationMigrator", + proxyContractsOwner, + deployer, + [ + stakingRouter_.address, + consolidationBus_.address, + consolidationMigratorParams.sourceModuleId, + consolidationMigratorParams.targetModuleId, + ], + ); + + const consolidationMigrator = await loadContract( + "ConsolidationMigrator", + consolidationMigrator_.address, + ); + + await makeTx(consolidationMigrator, "initialize", [admin], { from: deployer }); + + // Grant ALLOW_PAIR_ROLE to deployer for testing + await makeTx(consolidationMigrator, "grantRole", [await consolidationMigrator.ALLOW_PAIR_ROLE(), deployer], { + from: deployer, + }); + + // Register ConsolidationMigrator as publisher on ConsolidationBus + + await makeTx(consolidationBus, "grantRole", [await consolidationBus.PUBLISH_ROLE(), consolidationMigrator_.address], { + from: deployer, + }); + // // Deploy ValidatorExitDelayVerifier // @@ -344,6 +513,9 @@ export async function main() { lidoAddress, treasuryAddress, triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, ]); await makeTx(withdrawalsManagerProxy, "proxy_upgradeTo", [withdrawalVaultImpl.address, "0x"], { from: deployer }); @@ -365,26 +537,31 @@ export async function main() { // const sanityCheckerParams = state["oracleReportSanityChecker"].deployParameters; - const oracleReportSanityCheckerArgs = [ - locator.address, - accountingOracle.address, - accounting.address, - admin, - [ - sanityCheckerParams.exitedValidatorsPerDayLimit, - sanityCheckerParams.appearedValidatorsPerDayLimit, - sanityCheckerParams.annualBalanceIncreaseBPLimit, - sanityCheckerParams.simulatedShareRateDeviationBPLimit, - sanityCheckerParams.maxValidatorExitRequestsPerReport, - sanityCheckerParams.maxItemsPerExtraDataTransaction, - sanityCheckerParams.maxNodeOperatorsPerExtraDataItem, - sanityCheckerParams.requestTimestampMargin, - sanityCheckerParams.maxPositiveTokenRebase, - sanityCheckerParams.initialSlashingAmountPWei, - sanityCheckerParams.inactivityPenaltiesAmountPWei, - sanityCheckerParams.clBalanceOraclesErrorUpperBPLimit, - ], - ]; + // TODO: set final NEW sanity limits in deploy params before release deployment: + // - exitedEthAmountPerDayLimit + // - appearedEthAmountPerDayLimit + // - consolidationEthAmountPerDayLimit + // - exitedValidatorEthAmountLimit + const sanityLimits = { + exitedEthAmountPerDayLimit: sanityCheckerParams.exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit: sanityCheckerParams.appearedEthAmountPerDayLimit, + annualBalanceIncreaseBPLimit: sanityCheckerParams.annualBalanceIncreaseBPLimit, + simulatedShareRateDeviationBPLimit: sanityCheckerParams.simulatedShareRateDeviationBPLimit, + maxBalanceExitRequestedPerReportInEth: sanityCheckerParams.maxBalanceExitRequestedPerReportInEth, + maxEffectiveBalanceWeightWCType01: sanityCheckerParams.maxEffectiveBalanceWeightWCType01, + maxEffectiveBalanceWeightWCType02: sanityCheckerParams.maxEffectiveBalanceWeightWCType02, + maxItemsPerExtraDataTransaction: sanityCheckerParams.maxItemsPerExtraDataTransaction, + maxNodeOperatorsPerExtraDataItem: sanityCheckerParams.maxNodeOperatorsPerExtraDataItem, + requestTimestampMargin: sanityCheckerParams.requestTimestampMargin, + maxPositiveTokenRebase: sanityCheckerParams.maxPositiveTokenRebase, + maxCLBalanceDecreaseBP: sanityCheckerParams.maxCLBalanceDecreaseBP, + clBalanceOraclesErrorUpperBPLimit: sanityCheckerParams.clBalanceOraclesErrorUpperBPLimit, + consolidationEthAmountPerDayLimit: sanityCheckerParams.consolidationEthAmountPerDayLimit, + exitedValidatorEthAmountLimit: sanityCheckerParams.exitedValidatorEthAmountLimit, + externalPendingBalanceCapEth: sanityCheckerParams.externalPendingBalanceCapEth, + }; + + const oracleReportSanityCheckerArgs = [locator.address, accounting.address, admin, sanityLimits]; await deployWithoutProxy( Sk.oracleReportSanityChecker, diff --git a/scripts/scratch/steps/0090-upgrade-locator.ts b/scripts/scratch/steps/0090-upgrade-locator.ts index 62c2881e7c..3b3f60ed79 100644 --- a/scripts/scratch/steps/0090-upgrade-locator.ts +++ b/scripts/scratch/steps/0090-upgrade-locator.ts @@ -29,6 +29,7 @@ export async function main() { withdrawalVault: getAddress(Sk.withdrawalVault, state), validatorExitDelayVerifier: getAddress(Sk.validatorExitDelayVerifier, state), triggerableWithdrawalsGateway: getAddress(Sk.triggerableWithdrawalsGateway, state), + consolidationGateway: getAddress(Sk.consolidationGateway, state), oracleDaemonConfig: getAddress(Sk.oracleDaemonConfig, state), accounting: getAddress(Sk.accounting, state), predepositGuarantee: getAddress(Sk.predepositGuarantee, state), @@ -37,7 +38,7 @@ export async function main() { vaultFactory: getAddress(Sk.stakingVaultFactory, state), lazyOracle: getAddress(Sk.lazyOracle, state), operatorGrid: getAddress(Sk.operatorGrid, state), + topUpGateway: getAddress(Sk.topUpGateway, state), }; - await updateProxyImplementation(Sk.lidoLocator, "LidoLocator", locatorAddress, proxyContractsOwner, [locatorConfig]); } diff --git a/scripts/scratch/steps/0100-deploy-circuit-breaker.ts b/scripts/scratch/steps/0100-deploy-circuit-breaker.ts new file mode 100644 index 0000000000..79c6ab44b9 --- /dev/null +++ b/scripts/scratch/steps/0100-deploy-circuit-breaker.ts @@ -0,0 +1,118 @@ +import { execSync } from "child_process"; +import { HDNodeWallet } from "ethers"; +import fs from "fs"; +import { ethers, network as hardhatNetwork } from "hardhat"; +import os from "os"; +import path from "path"; + +import { cy, deployWithoutProxy, log } from "lib"; +import { readNetworkState, Sk, updateObjectInState } from "lib/state-file"; + +const CIRCUIT_BREAKER_REPO = "https://github.com/lidofinance/circuit-breaker.git"; +const CIRCUIT_BREAKER_BRANCH = "deploy-script"; + +export async function main() { + const deployer = (await ethers.provider.getSigner()).address; + const state = readNetworkState({ deployer }); + + // Check if CircuitBreaker address is already specified + if (state[Sk.circuitBreaker].address) { + log(`Using the specified CircuitBreaker address: ${cy(state[Sk.circuitBreaker].address)}`); + log.emptyLine(); + return; + } + + // deploy mock in case the network="hardhat" (there is no rpcUrl for in-memory node instance) + if (hardhatNetwork.name == "hardhat") { + log("In-memory 'hardhat' network detected, deploy CircuitBreakerMock contract..."); + const cb = await deployWithoutProxy(Sk.circuitBreaker, "CircuitBreakerMock", deployer, [60]); + log(`CircuitBreakerMock deployed at: ${cy(cb.address)}`); + log.emptyLine(); + return; + } + + const agentAddress = state[Sk.appAgent].proxy.address; + if (!agentAddress) { + throw new Error("AragonAgent proxy address is not set in the state — CircuitBreaker requires it as admin"); + } + + const params = state[Sk.circuitBreaker].deployParameters; + + // Clone the CircuitBreaker repo into a temp directory + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "circuit-breaker-")); + log(`Cloning CircuitBreaker repo to ${tmpDir}...`); + + try { + const cloneCmd = `git clone --depth 1 --branch ${CIRCUIT_BREAKER_BRANCH} ${CIRCUIT_BREAKER_REPO} ${tmpDir}`; + execSync(cloneCmd, { stdio: "inherit" }); + + // Install foundry dependencies + execSync("forge install", { cwd: tmpDir, stdio: "inherit" }); + + // Extract RPC URL and private key from Hardhat's network config + const networkConfig = hardhatNetwork.config; + const rpcUrl = "url" in networkConfig ? networkConfig.url : process.env.RPC_URL; + if (!rpcUrl) throw new Error("RPC URL is not available"); + + const accounts = networkConfig.accounts; + let privateKey: string; + if (Array.isArray(accounts) && accounts.length > 0) { + privateKey = accounts[0] as string; + } else if (typeof accounts === "object" && "mnemonic" in accounts) { + const wallet = HDNodeWallet.fromMnemonic(ethers.Mnemonic.fromPhrase(accounts.mnemonic), `m/44'/60'/0'/0/0`); + privateKey = wallet.privateKey; + } else { + // Fallback: derive from the default Hardhat mnemonic (used by "local" network with `npx hardhat node`) + const wallet = HDNodeWallet.fromMnemonic( + ethers.Mnemonic.fromPhrase("test test test test test test test test test test test junk"), + `m/44'/60'/0'/0/0`, + ); + privateKey = wallet.privateKey; + } + + const forgeArgs = [ + "forge script script/Deploy.s.sol:Deploy", + `--sig "run(address,uint256,uint256,uint256,uint256,uint256,uint256)"`, + agentAddress, + params.minPauseDuration.toString(), + params.maxPauseDuration.toString(), + params.minHeartbeatInterval.toString(), + params.maxHeartbeatInterval.toString(), + params.initialPauseDuration.toString(), + params.initialHeartbeatInterval.toString(), + `--rpc-url ${rpcUrl}`, + `--private-key ${privateKey}`, + "--broadcast", + ]; + + if (process.env.ETHERSCAN_API_KEY) { + forgeArgs.push("--verify", `--etherscan-api-key ${process.env.ETHERSCAN_API_KEY}`); + } + + log("Running CircuitBreaker deploy script..."); + execSync(forgeArgs.join(" "), { cwd: tmpDir, stdio: "inherit" }); + + // Read the deployment artifact + const network = await ethers.provider.getNetwork(); + const chainId = network.chainId.toString(); + const artifactName = process.env.DEPLOY_NAME || chainId; + const artifactPath = path.join(tmpDir, `${artifactName}.json`); + + if (!fs.existsSync(artifactPath)) { + throw new Error(`CircuitBreaker deploy artifact not found at ${artifactPath}`); + } + + const artifact = JSON.parse(fs.readFileSync(artifactPath, "utf8")); + const circuitBreakerAddress = artifact.circuitBreaker; + + log(`CircuitBreaker deployed at: ${cy(circuitBreakerAddress)}`); + log.emptyLine(); + + updateObjectInState(Sk.circuitBreaker, { + address: circuitBreakerAddress, + }); + } finally { + // Clean up the temp directory + fs.rmSync(tmpDir, { recursive: true, force: true }); + } +} diff --git a/scripts/scratch/steps/0100-gate-seal.ts b/scripts/scratch/steps/0100-gate-seal.ts deleted file mode 100644 index c19051a657..0000000000 --- a/scripts/scratch/steps/0100-gate-seal.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { ethers } from "hardhat"; - -import { loadContract } from "lib"; -import { makeTx } from "lib/deploy"; -import { findEvents } from "lib/event"; -import { cy, log } from "lib/log"; -import { readNetworkState, Sk, updateObjectInState } from "lib/state-file"; - -export async function main() { - const deployer = (await ethers.provider.getSigner()).address; - const state = readNetworkState({ deployer }); - - // Check if GateSeal address is already specified - if (state[Sk.gateSeal].address) { - log(`Using the specified GateSeal address: ${cy(state[Sk.gateSeal].address)}`); - log.emptyLine(); - return; - } - - // Check if GateSealFactory address is specified - if (!state[Sk.gateSeal].factoryAddress) { - log(`GateSealFactory not specified: skipping creating GateSeal instance`); - log.emptyLine(); - return; - } - - // Create new GateSeal instance - const sealableContracts = [state.withdrawalQueueERC721.proxy.address, state.validatorsExitBusOracle.proxy.address]; - const gateSealFactory = await loadContract("IGateSealFactory", state[Sk.gateSeal].factoryAddress); - - const receipt = await makeTx( - gateSealFactory, - "create_gate_seal", - [ - state[Sk.gateSeal].sealingCommittee, - state[Sk.gateSeal].sealDuration, - sealableContracts, - state[Sk.gateSeal].expiryTimestamp, - ], - { from: deployer }, - ); - - // Extract and log the new GateSeal address - const gateSealAddress = await findEvents(receipt, "GateSealCreated")[0].args.gate_seal; - log(`GateSeal created: ${cy(gateSealAddress)}`); - log.emptyLine(); - - // Update the state with the new GateSeal address - updateObjectInState(Sk.gateSeal, { - address: gateSealAddress, - }); -} diff --git a/scripts/scratch/steps/0130-grant-roles.ts b/scripts/scratch/steps/0130-grant-roles.ts index ca7f6fd2ee..639d483d3e 100644 --- a/scripts/scratch/steps/0130-grant-roles.ts +++ b/scripts/scratch/steps/0130-grant-roles.ts @@ -21,7 +21,7 @@ export async function main() { const agentAddress = state[Sk.appAgent].proxy.address; const nodeOperatorsRegistryAddress = state[Sk.appNodeOperatorsRegistry].proxy.address; const simpleDvtApp = state[Sk.appSimpleDvt].proxy.address; - const gateSealAddress = state.gateSeal.address; + const circuitBreakerAddress = state[Sk.circuitBreaker].address; const burnerAddress = state[Sk.burner].proxy.address; const stakingRouterAddress = state[Sk.stakingRouter].proxy.address; const withdrawalQueueAddress = state[Sk.withdrawalQueueERC721].proxy.address; @@ -46,9 +46,6 @@ export async function main() { [await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE(), accountingOracleAddress], { from: deployer }, ); - await makeTx(stakingRouter, "grantRole", [await stakingRouter.REPORT_REWARDS_MINTED_ROLE(), lidoAddress], { - from: deployer, - }); await makeTx(stakingRouter, "grantRole", [await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), agentAddress], { from: deployer, }); @@ -70,16 +67,21 @@ export async function main() { ); // ValidatorsExitBusOracle - if (gateSealAddress) { + if (circuitBreakerAddress) { const validatorsExitBusOracle = await loadContract( "ValidatorsExitBusOracle", validatorsExitBusOracleAddress, ); - await makeTx(validatorsExitBusOracle, "grantRole", [await validatorsExitBusOracle.PAUSE_ROLE(), gateSealAddress], { - from: deployer, - }); + await makeTx( + validatorsExitBusOracle, + "grantRole", + [await validatorsExitBusOracle.PAUSE_ROLE(), circuitBreakerAddress], + { + from: deployer, + }, + ); } else { - log(`GateSeal is not specified or deployed: skipping assigning PAUSE_ROLE of validatorsExitBusOracle`); + log(`CircuitBreaker is not specified or deployed: skipping assigning PAUSE_ROLE of validatorsExitBusOracle`); log.emptyLine(); } @@ -97,12 +99,12 @@ export async function main() { // WithdrawalQueue const withdrawalQueue = await loadContract("WithdrawalQueueERC721", withdrawalQueueAddress); - if (gateSealAddress) { - await makeTx(withdrawalQueue, "grantRole", [await withdrawalQueue.PAUSE_ROLE(), gateSealAddress], { + if (circuitBreakerAddress) { + await makeTx(withdrawalQueue, "grantRole", [await withdrawalQueue.PAUSE_ROLE(), circuitBreakerAddress], { from: deployer, }); } else { - log(`GateSeal is not specified or deployed: skipping assigning PAUSE_ROLE of withdrawalQueue`); + log(`CircuitBreaker is not specified or deployed: skipping assigning PAUSE_ROLE of withdrawalQueue`); log.emptyLine(); } diff --git a/scripts/scratch/steps/0140-plug-staking-modules.ts b/scripts/scratch/steps/0140-plug-staking-modules.ts index 3b15da7ad6..aff50146ae 100644 --- a/scripts/scratch/steps/0140-plug-staking-modules.ts +++ b/scripts/scratch/steps/0140-plug-staking-modules.ts @@ -1,5 +1,8 @@ import { ethers } from "hardhat"; +import { StakingRouter } from "typechain-types"; + +import { WithdrawalCredentialsType } from "lib"; import { loadContract } from "lib/contract"; import { makeTx } from "lib/deploy"; import { streccak } from "lib/keccak"; @@ -13,6 +16,7 @@ const NOR_STAKING_MODULE_MODULE_FEE_BP = 500; // 5% const NOR_STAKING_MODULE_TREASURY_FEE_BP = 500; // 5% const NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK = 150; const NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE = 25; +const NOR_WITHDRAWAL_TYPE = WithdrawalCredentialsType.WC0x01; const SDVT_STAKING_MODULE_TARGET_SHARE_BP = 400; // 4% const SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP = 10000; // 100% @@ -20,13 +24,14 @@ const SDVT_STAKING_MODULE_MODULE_FEE_BP = 800; // 8% const SDVT_STAKING_MODULE_TREASURY_FEE_BP = 200; // 2% const SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK = 150; const SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE = 25; +const SDVT_WITHDRAWAL_TYPE = WithdrawalCredentialsType.WC0x01; export async function main() { const deployer = (await ethers.provider.getSigner()).address; const state = readNetworkState({ deployer }); // Get contract instances - const stakingRouter = await loadContract("StakingRouter", state.stakingRouter.proxy.address); + const stakingRouter = await loadContract("StakingRouter", state.stakingRouter.proxy.address); // Grant STAKING_MODULE_MANAGE_ROLE to deployer await makeTx(stakingRouter, "grantRole", [STAKING_MODULE_MANAGE_ROLE, deployer], { from: deployer }); @@ -38,12 +43,15 @@ export async function main() { [ state.nodeOperatorsRegistry.deployParameters.stakingModuleName, state[Sk.appNodeOperatorsRegistry].proxy.address, - NOR_STAKING_MODULE_STAKE_SHARE_LIMIT_BP, - NOR_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, - NOR_STAKING_MODULE_MODULE_FEE_BP, - NOR_STAKING_MODULE_TREASURY_FEE_BP, - NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, - NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + { + stakeShareLimit: NOR_STAKING_MODULE_STAKE_SHARE_LIMIT_BP, + priorityExitShareThreshold: NOR_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, + stakingModuleFee: NOR_STAKING_MODULE_MODULE_FEE_BP, + treasuryFee: NOR_STAKING_MODULE_TREASURY_FEE_BP, + maxDepositsPerBlock: NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: NOR_WITHDRAWAL_TYPE, + }, ], { from: deployer }, ); @@ -55,16 +63,24 @@ export async function main() { [ state.simpleDvt.deployParameters.stakingModuleName, state[Sk.appSimpleDvt].proxy.address, - SDVT_STAKING_MODULE_TARGET_SHARE_BP, - SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, - SDVT_STAKING_MODULE_MODULE_FEE_BP, - SDVT_STAKING_MODULE_TREASURY_FEE_BP, - SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, - SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + { + stakeShareLimit: SDVT_STAKING_MODULE_TARGET_SHARE_BP, + priorityExitShareThreshold: SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, + stakingModuleFee: SDVT_STAKING_MODULE_MODULE_FEE_BP, + treasuryFee: SDVT_STAKING_MODULE_TREASURY_FEE_BP, + maxDepositsPerBlock: SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: SDVT_WITHDRAWAL_TYPE, + }, ], { from: deployer }, ); // Renounce STAKING_MODULE_MANAGE_ROLE from deployer await makeTx(stakingRouter, "renounceRole", [STAKING_MODULE_MANAGE_ROLE, deployer], { from: deployer }); + + // assert + if (await stakingRouter.hasRole(STAKING_MODULE_MANAGE_ROLE, deployer)) { + throw new Error("Failed to renounce STAKING_MODULE_MANAGE_ROLE"); + } } diff --git a/scripts/scratch/steps/0150-transfer-roles.ts b/scripts/scratch/steps/0150-transfer-roles.ts index 067e85a065..f95cca6f11 100644 --- a/scripts/scratch/steps/0150-transfer-roles.ts +++ b/scripts/scratch/steps/0150-transfer-roles.ts @@ -25,6 +25,10 @@ export async function main() { { name: "OracleDaemonConfig", address: state[Sk.oracleDaemonConfig].address }, { name: "OracleReportSanityChecker", address: state[Sk.oracleReportSanityChecker].address }, { name: "TriggerableWithdrawalsGateway", address: state[Sk.triggerableWithdrawalsGateway].address }, + { name: "ConsolidationGateway", address: state[Sk.consolidationGateway].address }, + { name: "ConsolidationBus", address: state[Sk.consolidationBus].proxy.address }, + { name: "ConsolidationMigrator", address: state[Sk.consolidationMigrator].proxy.address }, + { name: "TopUpGateway", address: state[Sk.topUpGateway].proxy.address }, { name: "VaultHub", address: state[Sk.vaultHub].proxy.address }, { name: "PredepositGuarantee", address: state[Sk.predepositGuarantee].proxy.address }, { name: "OperatorGrid", address: state[Sk.operatorGrid].proxy.address }, diff --git a/scripts/upgrade/steps-deploy-base.json b/scripts/upgrade/steps-deploy-base.json new file mode 100644 index 0000000000..0516fa0aa1 --- /dev/null +++ b/scripts/upgrade/steps-deploy-base.json @@ -0,0 +1,3 @@ +{ + "steps": ["upgrade/steps/0000-check-env", "upgrade/steps/0100-deploy-base-contracts"] +} diff --git a/scripts/upgrade/steps-deploy-template.json b/scripts/upgrade/steps-deploy-template.json new file mode 100644 index 0000000000..9103c63f72 --- /dev/null +++ b/scripts/upgrade/steps-deploy-template.json @@ -0,0 +1,7 @@ +{ + "steps": [ + "upgrade/steps/0000-check-env", + "upgrade/steps/0200-deploy-upgrade-template", + "upgrade/steps/0300-deploy-upgrade-vote-script" + ] +} diff --git a/scripts/upgrade/steps-mock-upgrade.json b/scripts/upgrade/steps-mock-upgrade.json new file mode 100644 index 0000000000..abc06af1f5 --- /dev/null +++ b/scripts/upgrade/steps-mock-upgrade.json @@ -0,0 +1,11 @@ +{ + "steps": [ + "upgrade/steps/0000-check-env", + "upgrade/steps/0050-deploy-mock-circuit-breaker", + "upgrade/steps/0100-deploy-base-contracts", + "upgrade/steps/0150-deploy-mock-et-factories", + "upgrade/steps/0200-deploy-upgrade-template", + "upgrade/steps/0300-deploy-upgrade-vote-script", + "upgrade/steps/0500-mock-upgrade" + ] +} diff --git a/scripts/upgrade/steps-mock-voting.json b/scripts/upgrade/steps-mock-voting.json new file mode 100644 index 0000000000..9a9580b600 --- /dev/null +++ b/scripts/upgrade/steps-mock-voting.json @@ -0,0 +1,11 @@ +{ + "steps": [ + "upgrade/steps/0000-check-env", + "upgrade/steps/0050-deploy-mock-circuit-breaker", + "upgrade/steps/0100-deploy-base-contracts", + "upgrade/steps/0150-deploy-mock-et-factories", + "upgrade/steps/0200-deploy-upgrade-template", + "upgrade/steps/0300-deploy-upgrade-vote-script", + "upgrade/steps/1000-mock-voting" + ] +} diff --git a/scripts/upgrade/steps-upgrade.json b/scripts/upgrade/steps-upgrade.json new file mode 100644 index 0000000000..7fbccbec24 --- /dev/null +++ b/scripts/upgrade/steps-upgrade.json @@ -0,0 +1,8 @@ +{ + "steps": [ + "upgrade/steps/0000-check-env", + "upgrade/steps/0100-deploy-base-contracts", + "upgrade/steps/0200-deploy-upgrade-template", + "upgrade/steps/0300-deploy-upgrade-vote-script" + ] +} diff --git a/scripts/upgrade/steps/0050-deploy-mock-circuit-breaker.ts b/scripts/upgrade/steps/0050-deploy-mock-circuit-breaker.ts new file mode 100644 index 0000000000..762ffc7d20 --- /dev/null +++ b/scripts/upgrade/steps/0050-deploy-mock-circuit-breaker.ts @@ -0,0 +1,18 @@ +import { ethers } from "hardhat"; + +import { deployWithoutProxy, getAddressValidated, isContractDeployed, log, readNetworkState, Sk } from "lib"; + +export async function skip(): Promise { + const state = readNetworkState(); + const address = getAddressValidated(Sk.circuitBreaker, state); + return !!(address && (await isContractDeployed(address))); +} + +export async function main() { + const deployer = (await ethers.provider.getSigner()).address; + + log.splitter(); + log.header("[Mocks] Deploy CircuitBreakerMock contract"); + + await deployWithoutProxy(Sk.circuitBreaker, "CircuitBreakerMock", deployer, [60]); +} diff --git a/scripts/upgrade/steps/0100-deploy-base-contracts.ts b/scripts/upgrade/steps/0100-deploy-base-contracts.ts new file mode 100644 index 0000000000..e294ccddc3 --- /dev/null +++ b/scripts/upgrade/steps/0100-deploy-base-contracts.ts @@ -0,0 +1,451 @@ +import { ethers } from "hardhat"; +import { checkArtifactDeployedAndLog, readUpgradeParameters } from "scripts/utils/upgrade"; + +import { + Accounting__factory, + AccountingOracle__factory, + ConsolidationBus, + ConsolidationBus__factory, + ConsolidationGateway__factory, + ConsolidationMigrator, + ConsolidationMigrator__factory, + DepositSecurityModule, + DepositSecurityModule__factory, + IOracleReportSanityChecker_preV4, + Lido__factory, + LidoLocator, + LidoLocator__factory, + OracleReportSanityChecker__factory, + StakingRouter__factory, + TopUpGateway, + TopUpGateway__factory, + UpgradeTemporaryAdmin, + UpgradeTemporaryAdmin__factory, + ValidatorsExitBusOracle__factory, + WithdrawalVault__factory, +} from "typechain-types"; + +import { + ConstructorArgs, + deployBehindOssifiableProxy, + deployImplementation, + deployWithoutProxy, + encodeFunctionCall, + getAddress, + InitializeArgs, + loadContract, + logArgs, + logConfirmReview as logConfirmReview, + logScriptHeader, + logStartReview as logStartReview, + makeTx, + MethodArgs, + readNetworkState, + Sk, +} from "lib"; + +export async function skip(): Promise { + return await checkArtifactDeployedAndLog(Sk.upgradeTemporaryAdmin); +} + +export async function main() { + const state = readNetworkState(); + const parameters = readUpgradeParameters(); + const deployer = (await ethers.provider.getSigner()).address; + + await logScriptHeader("SRv3/CMv2 — Deploy & setup Base Contracts", deployer); + + // + // Collect all param values + // + const chainSpec = state[Sk.chainSpec]; + const depositContractAddress = chainSpec.depositContract ?? chainSpec.depositContractAddress; + if (!depositContractAddress) { + throw new Error("Deposit contract address is missing in the state file"); + } + + const agentAddress = getAddress(Sk.appAgent, state); + const easyTrackAddress = getAddress(Sk.easyTrack, state); + const resealManagerAddress = getAddress(Sk.resealManager, state); + const circuitBreakerAddress = getAddress(Sk.circuitBreaker, state); + const locatorAddress = getAddress(Sk.lidoLocator, state); + const locator = await loadContract("LidoLocator", locatorAddress); + + const lidoAddress = await locator.lido(); + const stakingRouterAddress = await locator.stakingRouter(); + const accountingAddress = await locator.accounting(); + const triggerableWithdrawalsGatewayAddress = await locator.triggerableWithdrawalsGateway(); + + const treasuryAddress = agentAddress; + const proxyContractsOwner = agentAddress; + + // old sanity checker + const oldSanityChecker = await loadContract( + "IOracleReportSanityChecker_preV4", + await locator.oracleReportSanityChecker(), + ); + const oldCheckerLimits = await oldSanityChecker.getOracleReportLimits(); + const newCheckerLimits = { + exitedEthAmountPerDayLimit: parameters.oracleReportSanityChecker.exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit: parameters.oracleReportSanityChecker.appearedEthAmountPerDayLimit, + annualBalanceIncreaseBPLimit: oldCheckerLimits.annualBalanceIncreaseBPLimit, + simulatedShareRateDeviationBPLimit: oldCheckerLimits.simulatedShareRateDeviationBPLimit, + maxBalanceExitRequestedPerReportInEth: parameters.oracleReportSanityChecker.maxBalanceExitRequestedPerReportInEth, + maxEffectiveBalanceWeightWCType01: parameters.oracleReportSanityChecker.maxEffectiveBalanceWeightWCType01, + maxEffectiveBalanceWeightWCType02: parameters.oracleReportSanityChecker.maxEffectiveBalanceWeightWCType02, + maxItemsPerExtraDataTransaction: oldCheckerLimits.maxItemsPerExtraDataTransaction, + maxNodeOperatorsPerExtraDataItem: oldCheckerLimits.maxNodeOperatorsPerExtraDataItem, + requestTimestampMargin: oldCheckerLimits.requestTimestampMargin, + maxPositiveTokenRebase: oldCheckerLimits.maxPositiveTokenRebase, + maxCLBalanceDecreaseBP: parameters.oracleReportSanityChecker.maxCLBalanceDecreaseBP, + clBalanceOraclesErrorUpperBPLimit: oldCheckerLimits.clBalanceOraclesErrorUpperBPLimit, + consolidationEthAmountPerDayLimit: parameters.oracleReportSanityChecker.consolidationEthAmountPerDayLimit, + exitedValidatorEthAmountLimit: parameters.oracleReportSanityChecker.exitedValidatorEthAmountLimit, + externalPendingBalanceCapEth: parameters.oracleReportSanityChecker.externalPendingBalanceCapEth, + }; + + // + // Deploy TemporaryAdmin + // + const tempAdminConstructorArgs: ConstructorArgs = [agentAddress]; + logStartReview(); + await logArgs("UpgradeTemporaryAdmin", tempAdminConstructorArgs); + await logConfirmReview(); + + const tempAdmin = await deployWithoutProxy( + Sk.upgradeTemporaryAdmin, + "UpgradeTemporaryAdmin", + deployer, + tempAdminConstructorArgs, + ); + + const constructorArgs: { + Lido: ConstructorArgs; + Accounting: ConstructorArgs; + AccountingOracle: ConstructorArgs; + ValidatorsExitBusOracle: ConstructorArgs; + StakingRouter: ConstructorArgs; + TopUpGateway: ConstructorArgs; + DepositSecurityModule: ConstructorArgs; + OracleReportSanityChecker: ConstructorArgs; + ConsolidationGateway: ConstructorArgs; + } = { + Lido: [], + Accounting: [locatorAddress, lidoAddress], + AccountingOracle: [locatorAddress, Number(chainSpec.secondsPerSlot), Number(chainSpec.genesisTime)], + ValidatorsExitBusOracle: [Number(chainSpec.secondsPerSlot), Number(chainSpec.genesisTime), locatorAddress], + StakingRouter: [ + depositContractAddress, + lidoAddress, + locatorAddress, + parameters.stakingRouter.maxEBType1, + parameters.stakingRouter.maxEBType2, + ], + TopUpGateway: [ + locatorAddress, + parameters.topUpGateway.gIFirstValidatorPrev, + parameters.topUpGateway.gIFirstValidatorCurr, + parameters.topUpGateway.pivotSlot, + chainSpec.slotsPerEpoch, + ], + DepositSecurityModule: [ + lidoAddress, + depositContractAddress, + stakingRouterAddress, + parameters.depositSecurityModule.pauseIntentValidityPeriodBlocks, + parameters.depositSecurityModule.maxOperatorsPerUnvetting, + ], + OracleReportSanityChecker: [locatorAddress, accountingAddress, agentAddress, newCheckerLimits], + ConsolidationGateway: [ + tempAdmin.address, // grant DEFAULT_ADMIT role to TemporaryAdmin, + locatorAddress, + parameters.consolidationGateway.maxConsolidationRequestsLimit, + parameters.consolidationGateway.consolidationsPerFrame, + parameters.consolidationGateway.frameDurationInSec, + parameters.consolidationGateway.gIFirstValidatorPrev, + parameters.consolidationGateway.gIFirstValidatorCurr, + parameters.consolidationGateway.pivotSlot, + ], + }; + + const topUpGatewayInitArgs: InitializeArgs = [ + tempAdmin.address, // grant DEFAULT_ADMIT role to TemporaryAdmin + parameters.topUpGateway.maxValidatorsPerTopUp, + parameters.topUpGateway.minBlockDistance, + parameters.topUpGateway.maxRootAge, + parameters.topUpGateway.targetBalanceGwei, + parameters.topUpGateway.minTopUpGwei, + ]; + + logStartReview(); + await logArgs("Lido", constructorArgs.Lido); + await logArgs("Accounting", constructorArgs.Accounting); + await logArgs("AccountingOracle", constructorArgs.AccountingOracle); + await logArgs("ValidatorsExitBusOracle", constructorArgs.ValidatorsExitBusOracle); + await logArgs("StakingRouter", constructorArgs.StakingRouter); + await logArgs("TopUpGateway", constructorArgs.TopUpGateway); + await logArgs("TopUpGateway", topUpGatewayInitArgs, "initialize", "proxy init."); + await logArgs("DepositSecurityModule", constructorArgs.DepositSecurityModule); + await logArgs("OracleReportSanityChecker", constructorArgs.OracleReportSanityChecker); + await logArgs("ConsolidationGateway", constructorArgs.ConsolidationGateway); + await logConfirmReview(); + + // + // Deploy Lido new implementation + // + await deployImplementation(Sk.appLido, "Lido", deployer, constructorArgs.Lido); + + // + // Deploy Accounting & AccountingOracle + // + await deployImplementation(Sk.accounting, "Accounting", deployer, constructorArgs.Accounting); + await deployImplementation(Sk.accountingOracle, "AccountingOracle", deployer, constructorArgs.AccountingOracle); + + // + // Deploy ValidatorsExitBusOracle + // + await deployImplementation( + Sk.validatorsExitBusOracle, + "ValidatorsExitBusOracle", + deployer, + constructorArgs.ValidatorsExitBusOracle, + ); + + // + // Deploy libraries & StakingRouter + // + const beaconChainDepositor = await deployWithoutProxy(Sk.beaconChainDepositor, "BeaconChainDepositor", deployer); + const minFirstAllocationStrategy = await deployWithoutProxy( + Sk.minFirstAllocationStrategy, + "MinFirstAllocationStrategy", + deployer, + ); + const srLib = await deployWithoutProxy(Sk.srLib, "SRLib", deployer, [], "address", true, { + libraries: { + MinFirstAllocationStrategy: minFirstAllocationStrategy.address, + }, + }); + + await deployImplementation(Sk.stakingRouter, "StakingRouter", deployer, constructorArgs.StakingRouter, { + libraries: { + BeaconChainDepositor: beaconChainDepositor.address, + SRLib: srLib.address, + }, + }); + + // + // Deploy TopUpGateway + // + const topUpGateway = await deployBehindOssifiableProxy( + Sk.topUpGateway, + "TopUpGateway", + proxyContractsOwner, + deployer, + constructorArgs.TopUpGateway, + null, // implementation + true, // withStateFile + undefined, // factoryOptions + await encodeFunctionCall>("TopUpGateway", "initialize", topUpGatewayInitArgs), + ); + + // + // Deploy DepositSecurityModule + // + const depositSecurityModule_ = await deployWithoutProxy( + Sk.depositSecurityModule, + "DepositSecurityModule", + deployer, + constructorArgs.DepositSecurityModule, + ); + const depositSecurityModule = await loadContract( + "DepositSecurityModule", + depositSecurityModule_.address, + ); + await depositSecurityModule.setOwner(tempAdmin.address); + + // + // Deploy OracleReportSanityChecker + // + const oracleReportSanityChecker = await deployWithoutProxy( + Sk.oracleReportSanityChecker, + "OracleReportSanityChecker", + deployer, + constructorArgs.OracleReportSanityChecker, + ); + + // + // Deploy Consolidation Gateway + // + const consolidationGateway = await deployWithoutProxy( + Sk.consolidationGateway, + "ConsolidationGateway", + deployer, + constructorArgs.ConsolidationGateway, + ); + + // + // Deploy Consolidation Bus + // + const consolidationBusConstructorArgs: ConstructorArgs = [consolidationGateway.address]; + const consolidationBusInitArgs: InitializeArgs = [ + tempAdmin.address, // grant DEFAULT_ADMIT role to TemporaryAdmin + parameters.consolidationBus.initialBatchSize, + parameters.consolidationBus.initialMaxGroupsInBatch, + parameters.consolidationBus.initialExecutionDelay, + ]; + + logStartReview(); + await logArgs("ConsolidationBus", consolidationBusConstructorArgs); + await logArgs("ConsolidationBus", consolidationBusInitArgs, "initialize", "proxy init."); + await logConfirmReview(); + + const consolidationBus = await deployBehindOssifiableProxy( + Sk.consolidationBus, + "ConsolidationBus", + proxyContractsOwner, + deployer, + consolidationBusConstructorArgs, + null, // implementation + true, // withStateFile + undefined, // factoryOptions + await encodeFunctionCall>( + "ConsolidationBus", + "initialize", + consolidationBusInitArgs, + ), + ); + + // + // Deploy Consolidation Migrator + // + const consolidationMigratorConstructorArgs: ConstructorArgs = [ + stakingRouterAddress, + consolidationBus.address, + parameters.consolidationMigrator.sourceModuleId, + parameters.consolidationMigrator.targetModuleId, + ]; + const consolidationMigratorInitArgs: InitializeArgs = [ + tempAdmin.address, // grant DEFAULT_ADMIT role to TemporaryAdmin + ]; + + logStartReview(); + await logArgs("ConsolidationMigrator", consolidationMigratorConstructorArgs); + await logArgs("ConsolidationMigrator", consolidationMigratorInitArgs, "initialize", "proxy init."); + await logConfirmReview(); + + const consolidationMigrator = await deployBehindOssifiableProxy( + Sk.consolidationMigrator, + "ConsolidationMigrator", + proxyContractsOwner, + deployer, + consolidationMigratorConstructorArgs, + null, // implementation + true, // withStateFile + undefined, // factoryOptions + await encodeFunctionCall>( + "ConsolidationMigrator", + "initialize", + consolidationMigratorInitArgs, + ), + ); + + // + // Deploy Withdrawal Vault implementation + // + const withdrawalVaultConstructorArgs: ConstructorArgs = [ + lidoAddress, + treasuryAddress, + triggerableWithdrawalsGatewayAddress, + consolidationGateway.address, + parameters.withdrawalVault.withdrawalRequestContract, + parameters.withdrawalVault.consolidationRequestContract, + ]; + + logStartReview(); + await logArgs("WithdrawalVault", withdrawalVaultConstructorArgs); + await logConfirmReview(); + + await deployImplementation(Sk.withdrawalVault, "WithdrawalVault", deployer, withdrawalVaultConstructorArgs); + + // todo match locator vs state + // + // Deploy Lido Locator new implementation + // + const locatorConfig: LidoLocator.ConfigStruct = { + accountingOracle: await locator.accountingOracle(), + depositSecurityModule: depositSecurityModule.address, + elRewardsVault: await locator.elRewardsVault(), + lido: lidoAddress, + oracleReportSanityChecker: oracleReportSanityChecker.address, + postTokenRebaseReceiver: await locator.postTokenRebaseReceiver(), + burner: await locator.burner(), + stakingRouter: stakingRouterAddress, + treasury: await locator.treasury(), + validatorsExitBusOracle: await locator.validatorsExitBusOracle(), + withdrawalQueue: await locator.withdrawalQueue(), + withdrawalVault: await locator.withdrawalVault(), + oracleDaemonConfig: await locator.oracleDaemonConfig(), + validatorExitDelayVerifier: await locator.validatorExitDelayVerifier(), + triggerableWithdrawalsGateway: triggerableWithdrawalsGatewayAddress, + consolidationGateway: consolidationGateway.address, + accounting: accountingAddress, + predepositGuarantee: await locator.predepositGuarantee(), + wstETH: await locator.wstETH(), + vaultHub: await locator.vaultHub(), + vaultFactory: await locator.vaultFactory(), + lazyOracle: await locator.lazyOracle(), + operatorGrid: await locator.operatorGrid(), + topUpGateway: topUpGateway.address, + }; + + const lidoLocatorConstructorArgs: ConstructorArgs = [locatorConfig]; + + logStartReview(); + await logArgs("LidoLocator", lidoLocatorConstructorArgs); + await logConfirmReview(); + + const lidoLocatorImpl = await deployImplementation( + Sk.lidoLocator, + "LidoLocator", + deployer, + lidoLocatorConstructorArgs, + ); + + // + // Complete setup: grant all roles to agent, transfer admin + // + const tempAdminCompleteSetupArgs: MethodArgs = [ + lidoLocatorImpl.address, + easyTrackAddress, + resealManagerAddress, + circuitBreakerAddress, + consolidationMigrator.address, + parameters.consolidationMigrator.committee!, + consolidationBus.address, + parameters.topUpGateway.depositor!, + await locator.depositSecurityModule(), + ]; + + logStartReview(); + await logArgs("UpgradeTemporaryAdmin", tempAdminCompleteSetupArgs, "completeSetup", "complete initial setup"); + await logConfirmReview(); + + await makeTx( + tempAdmin, + "completeSetup", + [ + lidoLocatorImpl.address, + easyTrackAddress, + resealManagerAddress, + circuitBreakerAddress, + consolidationMigrator.address, + parameters.consolidationMigrator.committee!, + consolidationBus.address, + parameters.topUpGateway.depositor!, + await locator.depositSecurityModule(), + ], + { + from: deployer, + }, + ); +} diff --git a/scripts/upgrade/steps/0100-deploy-v3-02-implementations.ts b/scripts/upgrade/steps/0100-deploy-v3-02-implementations.ts index bbae416a90..57049cecd1 100644 --- a/scripts/upgrade/steps/0100-deploy-v3-02-implementations.ts +++ b/scripts/upgrade/steps/0100-deploy-v3-02-implementations.ts @@ -26,7 +26,7 @@ export async function main(): Promise { const locatorAddress = state[Sk.lidoLocator].proxy.address; const lidoAddress = state[Sk.appLido].proxy.address; const hashConsensusAddress = state[Sk.hashConsensusForAccountingOracle].address; - const { maxRelativeShareLimitBP } = readUpgradeParameters(true).vaultHub; + const { maxRelativeShareLimitBP } = readUpgradeParameters(true).vaultHub!; if (maxRelativeShareLimitBP === undefined) { throw new Error("vaultHub.maxRelativeShareLimitBP is not set in upgrade parameters"); } diff --git a/scripts/upgrade/steps/0150-deploy-mock-et-factories.ts b/scripts/upgrade/steps/0150-deploy-mock-et-factories.ts new file mode 100644 index 0000000000..967629ff70 --- /dev/null +++ b/scripts/upgrade/steps/0150-deploy-mock-et-factories.ts @@ -0,0 +1,92 @@ +import { artifacts, ethers } from "hardhat"; +import { readUpgradeParameters, writeUpgradeEasyTrackFactoryAddress } from "scripts/utils/upgrade"; + +import { + bl, + deployWithoutProxy, + isContractDeployed, + log, + or, + readNetworkState, + Sk, + updateObjectInState, + yl, +} from "lib"; + +type EasyTrackFactoriesStateMap = Partial>; +const EASY_TRACK_NEW_FACTORIES_SECTION = "easyTrack.newFactories"; + +export const easyTrackFactoriesStateMap = { + [Sk.stakingRouter]: ["UpdateStakingModuleShareLimits"], + [Sk.consolidationMigrator]: ["AllowConsolidationPair"], + [Sk.csm_CSM]: ["SetMerkleGateTree", "ReportWithdrawalsForSlashedValidators", "SettleGeneralDelayedPenalty"], + [Sk.csm_CM]: [ + "SetMerkleGateTree", + "ReportWithdrawalsForSlashedValidators", + "SettleGeneralDelayedPenalty", + "CreateOrUpdateOperatorGroup", + ], +} satisfies EasyTrackFactoriesStateMap; + +function getFactoryParamName(contractKey: Sk, etName: string): string { + if (contractKey === Sk.csm_CSM) { + return `${etName}ForCSM`; + } + + if (contractKey === Sk.csm_CM) { + return `${etName}ForCM`; + } + + return etName; +} + +export async function main() { + log.splitter(); + log.header("[Mocks] Deploy EasyTrack factories"); + + const deployer = (await ethers.provider.getSigner()).address; + let state = readNetworkState(); + const parameters = readUpgradeParameters(); + + // deploy ET + + for (const [contractKey, etNames] of Object.entries(easyTrackFactoriesStateMap) as [Sk, string[]][]) { + const deployedFactories: Record = { ...(state[contractKey]?.easyTrackFactories ?? {}) }; + + for (const etName of etNames) { + const paramName = getFactoryParamName(contractKey, etName) as keyof typeof parameters.easyTrack.newFactories; + const paramAddress = parameters.easyTrack.newFactories[paramName]; + const isParamDeployed = paramAddress ? await isContractDeployed(paramAddress) : false; + + if (isParamDeployed) { + log.success(`Skip ${yl(paramName)}[${bl(paramAddress)}] - found in parameters`); + deployedFactories[etName] = paramAddress; + state = updateObjectInState(contractKey, { easyTrackFactories: deployedFactories }); + continue; + } + + const stateAddress = deployedFactories[etName]; + const isStateDeployed = stateAddress ? await isContractDeployed(stateAddress) : false; + + if (isStateDeployed) { + log.success(`Skip ${yl(paramName)}[${bl(stateAddress)}] - found in state`); + writeUpgradeEasyTrackFactoryAddress(EASY_TRACK_NEW_FACTORIES_SECTION, paramName, stateAddress); + continue; + } + + const preferredArtifactName = `${etName}Mock`; + let artifactName = preferredArtifactName; + try { + await artifacts.readArtifact(preferredArtifactName); + } catch { + artifactName = "EasyTrackFactoryMock"; + } + + const deployedContract = await deployWithoutProxy(contractKey, artifactName, deployer, [], "address", false); + log.success(`Deployed ${or(artifactName)} aa ${yl(paramName)}[${bl(deployedContract.address)}]`); + deployedFactories[etName] = deployedContract.address; + state = updateObjectInState(contractKey, { easyTrackFactories: deployedFactories }); + writeUpgradeEasyTrackFactoryAddress(EASY_TRACK_NEW_FACTORIES_SECTION, paramName, deployedContract.address); + } + } +} diff --git a/scripts/upgrade/steps/0200-deploy-upgrade-template.ts b/scripts/upgrade/steps/0200-deploy-upgrade-template.ts new file mode 100644 index 0000000000..a045490aa8 --- /dev/null +++ b/scripts/upgrade/steps/0200-deploy-upgrade-template.ts @@ -0,0 +1,127 @@ +import { ethers } from "hardhat"; +import { checkArtifactDeployedAndLog, readUpgradeParameters } from "scripts/utils/upgrade"; + +import { + IAragonKernel, + IWithdrawalsManagerProxy__factory, + OssifiableProxy__factory, + UpgradeTemplate__factory, +} from "typechain-types"; +import { UpgradeParametersStruct } from "typechain-types/contracts/upgrade/UpgradeConfig"; + +import { + ConstructorArgs, + deployWithoutProxy, + getAddress, + loadContract, + logArgs, + logConfirmReview, + logScriptHeader, + logStartReview, + readNetworkState, + Sk, +} from "lib"; + +export async function skip(): Promise { + return await checkArtifactDeployedAndLog(Sk.upgradeTemplate); +} + +export async function main() { + const state = readNetworkState(); + const parameters = readUpgradeParameters(); + const deployer = await ethers.provider.getSigner(); + + await logScriptHeader("SRv3/CMv2 — Deploy UpgradeTemplate contract", deployer.address); + + const locatorAddress = getAddress(Sk.lidoLocator, state); + const locatorProxy = OssifiableProxy__factory.connect(locatorAddress, deployer); + const oldLocatorImpl = await locatorProxy.proxy__getImplementation(); + + const kernel = await loadContract("IAragonKernel", getAddress(Sk.aragonKernel, state)); + const oldLidoImpl = await kernel.getApp(await kernel.APP_BASES_NAMESPACE(), state[Sk.appLido].aragonApp.id); + + const accountingProxy = OssifiableProxy__factory.connect(getAddress(Sk.accounting, state), deployer); + const accountingOracleProxy = OssifiableProxy__factory.connect(getAddress(Sk.accountingOracle, state), deployer); + const stakingRouterProxy = OssifiableProxy__factory.connect(getAddress(Sk.stakingRouter, state), deployer); + const withdrawalVaultProxy = IWithdrawalsManagerProxy__factory.connect( + getAddress(Sk.withdrawalVault, state), + deployer, + ); + const validatorsExitBusOracleProxy = OssifiableProxy__factory.connect( + getAddress(Sk.validatorsExitBusOracle, state), + deployer, + ); + + const upgradeParams: UpgradeParametersStruct = { + locator: locatorAddress, + agent: getAddress(Sk.appAgent, state), + voting: getAddress(Sk.appVoting, state), + dualGovernance: getAddress(Sk.dgDualGovernance, state), + resealManager: getAddress(Sk.resealManager, state), + easyTrack: getAddress(Sk.easyTrack, state), + circuitBreaker: getAddress(Sk.circuitBreaker, state), + + newFactories: parameters.easyTrack.newFactories, + oldFactories: parameters.easyTrack.oldFactories, + + coreUpgrade: { + oldLocatorImpl, + oldLidoImpl, + oldAccountingImpl: await accountingProxy.proxy__getImplementation(), + oldAccountingOracleImpl: await accountingOracleProxy.proxy__getImplementation(), + oldStakingRouterImpl: await stakingRouterProxy.proxy__getImplementation(), + oldWithdrawalVaultImpl: await withdrawalVaultProxy.implementation(), + oldValidatorsExitBusOracleImpl: await validatorsExitBusOracleProxy.proxy__getImplementation(), + + newLocatorImpl: state[Sk.lidoLocator].implementation.address, + newLidoImpl: state[Sk.appLido].implementation.address, + newAccountingImpl: state[Sk.accounting].implementation.address, + newAccountingOracleImpl: state[Sk.accountingOracle].implementation.address, + newStakingRouterImpl: state[Sk.stakingRouter].implementation.address, + newWithdrawalVaultImpl: state[Sk.withdrawalVault].implementation.address, + newValidatorsExitBusOracleImpl: state[Sk.validatorsExitBusOracle].implementation.address, + consolidationBusImpl: state[Sk.consolidationBus].implementation.address, + consolidationMigratorImpl: state[Sk.consolidationMigrator].implementation.address, + + // TopUp GW + topUpGatewayImpl: state[Sk.topUpGateway].implementation.address, + topUpGateway: getAddress(Sk.topUpGateway, state), + topUpGatewayDepositor: parameters.topUpGateway.depositor!, + + // TW GW + twMaxExitRequestsLimit: parameters.triggerableWithdrawalsGateway.maxExitRequestsLimit, + twExitsPerFrame: parameters.triggerableWithdrawalsGateway.exitsPerFrame, + twFrameDurationInSec: parameters.triggerableWithdrawalsGateway.frameDurationInSec, + + // Oracle configs + aoConsensusVersion: parameters.accountingOracle.consensusVersion, + veboMaxValidatorsPerReport: parameters.validatorsExitBusOracle.maxValidatorsPerReport, + veboMaxExitBalanceEth: parameters.validatorsExitBusOracle.maxExitBalanceEth, + veboBalancePerFrameEth: parameters.validatorsExitBusOracle.balancePerFrameEth, + veboFrameDurationInSec: parameters.validatorsExitBusOracle.frameDurationInSec, + veboConsensusVersion: parameters.validatorsExitBusOracle.consensusVersion, + + // Consolidation + consolidationBus: getAddress(Sk.consolidationBus, state), + + consolidationMigrator: getAddress(Sk.consolidationMigrator, state), + curatedModuleCommittee: parameters.consolidationMigrator.committee!, + consolidationGatewayPauser: parameters.consolidationGateway.pauser!, + + lidoDepositsReserveTarget: parameters.lido.lidoDepositsReserveTarget, + }, + csmUpgrade: parameters.csmUpgrade, + curatedModule: parameters.curatedModule, + }; + + const upgradeTemplateConstructorArgs: ConstructorArgs = [ + upgradeParams, + parameters.upgradeVoteScript.expiryTimestamp, + ]; + + logStartReview(); + await logArgs("UpgradeTemplate", upgradeTemplateConstructorArgs); + await logConfirmReview(); + + await deployWithoutProxy(Sk.upgradeTemplate, "UpgradeTemplate", deployer.address, upgradeTemplateConstructorArgs); +} diff --git a/scripts/upgrade/steps/0300-deploy-upgrade-vote-script.ts b/scripts/upgrade/steps/0300-deploy-upgrade-vote-script.ts new file mode 100644 index 0000000000..02e88fb36c --- /dev/null +++ b/scripts/upgrade/steps/0300-deploy-upgrade-vote-script.ts @@ -0,0 +1,51 @@ +import { ethers } from "hardhat"; +import { checkArtifactDeployedAndLog, readUpgradeParameters } from "scripts/utils/upgrade"; + +import { UpgradeVoteScript__factory } from "typechain-types"; +import { UpgradeVoteScript } from "typechain-types/contracts/upgrade/UpgradeVoteScript"; + +import { + ConstructorArgs, + deployWithoutProxy, + logArgs, + logConfirmReview, + logScriptHeader, + logStartReview, + readNetworkState, + Sk, +} from "lib"; + +export async function skip(): Promise { + return await checkArtifactDeployedAndLog(Sk.upgradeVoteScript); +} + +export async function main() { + const state = readNetworkState(); + const parameters = readUpgradeParameters(); + const deployer = (await ethers.provider.getSigner()).address; + + await logScriptHeader("SRv3/CMv2 — Deploy UpgradeVotingScript contract", deployer); + + const template = state[Sk.upgradeTemplate]; + + const votingScriptParams: UpgradeVoteScript.ScriptParamsStruct = { + upgradeTemplate: template.address, + timeConstraints: parameters.upgradeVoteScript.timeConstraintsContract, + enabledDaySpanStart: parameters.upgradeVoteScript.enabledDaySpanStart, + enabledDaySpanEnd: parameters.upgradeVoteScript.enabledDaySpanEnd, + }; + const upgradeVoteScriptConstructorArgs: ConstructorArgs = [votingScriptParams]; + + logStartReview(); + await logArgs("UpgradeVoteScript", upgradeVoteScriptConstructorArgs); + await logConfirmReview(); + + await deployWithoutProxy(Sk.upgradeVoteScript, "UpgradeVoteScript", deployer, [ + [ + template.address, + parameters.upgradeVoteScript.timeConstraintsContract, + parameters.upgradeVoteScript.enabledDaySpanStart, + parameters.upgradeVoteScript.enabledDaySpanEnd, + ], + ]); +} diff --git a/scripts/upgrade/steps/0500-mock-dg-voting.ts b/scripts/upgrade/steps/0500-mock-dg-voting.ts new file mode 100644 index 0000000000..d4b6fcb320 --- /dev/null +++ b/scripts/upgrade/steps/0500-mock-dg-voting.ts @@ -0,0 +1,8 @@ +import { mockDGAragonVoting } from "scripts/utils/upgrade"; + +import { readNetworkState } from "lib/state-file"; + +export async function main(): Promise> { + const state = readNetworkState(); + return mockDGAragonVoting(state); +} diff --git a/scripts/upgrade/steps/0500-mock-upgrade.ts b/scripts/upgrade/steps/0500-mock-upgrade.ts new file mode 100644 index 0000000000..53f60fdf5d --- /dev/null +++ b/scripts/upgrade/steps/0500-mock-upgrade.ts @@ -0,0 +1,81 @@ +import { ethers } from "hardhat"; +import { VoteItem } from "scripts/utils/omnibus"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { IDualGovernance, UpgradeTemplate, UpgradeVoteScript } from "typechain-types"; + +import { + ether, + getAddress, + getAddressValidated, + impersonate, + isContractDeployed, + loadContract, + log, + or, + readNetworkState, + Sk, +} from "lib"; + +export async function skip(): Promise { + const state = readNetworkState(); + // NOT skip if contract object exists in deployed state but address set as empty string or zero address + const address = getAddressValidated(Sk.upgradeTemplate, state); + // NOT skip if contract not deployed yet + const isDeployed = !!(address && (await isContractDeployed(address))); + + if (isDeployed) { + log(`UpgradeTemplate already deployed at ${address}`); + const template = await loadContract("UpgradeTemplate", address); + + const isFinished = await template.isUpgradeFinished(); + log(`isUpgradeFinished is ${isFinished}`); + return isFinished; + } + + return false; +} + +export async function main() { + const deployer = await ethers.provider.getSigner(); + const state = readNetworkState(); + + const voteScript = await loadContract( + "UpgradeVoteScript", + getAddress(Sk.upgradeVoteScript, state), + deployer, + ); + + // non-DG items + const voteItems = (await voteScript.getVotingVoteItems()) as VoteItem[]; + const voting = await impersonate(getAddress(Sk.appVoting, state), ether("100")); + await execVoteItems(voteItems, voting); + + // DG items + // const voteItemsDG = (await voteScript.getVoteItemsRaw()) as VoteItem[]; + // const agent = await impersonate(getAddress(Sk.appAgent, state), ether("100")); + // await execVoteItems(voteItemsDG, agent); + + const dg = await loadContract("IDualGovernance", getAddress(Sk.dgDualGovernance, state)); + const proposers = await dg.getProposers(); + if (!proposers.length) { + throw new Error("No proposer found in DualGovernance."); + } + + const voteItemsDG = (await voteScript.getVoteItems()) as VoteItem[]; + const executor = await impersonate(proposers[0].executor, ether("100")); + await execVoteItems(voteItemsDG, executor); +} + +async function execVoteItems(voteItems: VoteItem[], executor: HardhatEthersSigner) { + for (const item of voteItems) { + log(`Execute vote item: ${or(item.description)}`); + const tx = await executor.sendTransaction({ + to: item.call.to, + data: ethers.hexlify(item.call.data), + value: 0n, + }); + await tx.wait(); + } +} diff --git a/scripts/upgrade/steps/1000-mock-voting.ts b/scripts/upgrade/steps/1000-mock-voting.ts new file mode 100644 index 0000000000..ee1f1c27d9 --- /dev/null +++ b/scripts/upgrade/steps/1000-mock-voting.ts @@ -0,0 +1,8 @@ +import { mockAragonVoting } from "scripts/utils/upgrade"; + +import { readNetworkState } from "lib"; + +export async function main() { + const state = readNetworkState(); + await mockAragonVoting(state); +} diff --git a/scripts/upgrade/upgrade-params-devnet1.toml b/scripts/upgrade/upgrade-params-devnet1.toml new file mode 100644 index 0000000000..af13076f7e --- /dev/null +++ b/scripts/upgrade/upgrade-params-devnet1.toml @@ -0,0 +1,173 @@ +# Lido Protocol Upgrade Parameters - Devnet Configuration + +[easyTrack.newFactories] +UpdateStakingModuleShareLimits = "0x7ed96eEfB57E1e4C6d64D19818F3A78664318829" +AllowConsolidationPair = "0xD6aC302915790074c7291c3bD64ddD5aeC5A7708" + +SetMerkleGateTreeForCSM = "0xF417BBcc9682B6d5B8312e8F8588Cd6489C5B85F" +ReportWithdrawalsForSlashedValidatorsForCSM = "0x8418692bf1390F1a9f2E3996475612514F746a77" +SettleGeneralDelayedPenaltyForCSM = "0xF3865826309f55cD3E104a22Ab9d04724d2D9020" + +SetMerkleGateTreeForCM = "0x9BC48d538C9d8dd7a559f4460CCb2327fF39C1f4" +ReportWithdrawalsForSlashedValidatorsForCM = "0x612d23ba265ad1fDc62B2e3A08722668E1D0ECa3" +SettleGeneralDelayedPenaltyForCM = "0xd51c0d3C9BF00FF5690f901BBB8E85e976374BAE" +CreateOrUpdateOperatorGroupForCM = "0x29D272Db50946716466cf00bC7d4bc435d8Ea44E" +[easyTrack.oldFactories] +# TODO: fill with corresponding devnet Easy Track factory address before execution +CSMSettleElStealingPenalty = "0x00000000000000000000000000000000000eee07" +# TODO: fill with corresponding devnet Easy Track factory address before execution +CSMSetVettedGateTree = "0x00000000000000000000000000000000000eee16" + +# Oracle report sanity checker configuration +[oracleReportSanityChecker] +exitedEthAmountPerDayLimit = 57600 # Exited ETH amount per day limit +appearedEthAmountPerDayLimit = 57600 # Appeared ETH amount per day limit +annualBalanceIncreaseBPLimit = 1000 # Annual balance increase limit (BP) +simulatedShareRateDeviationBPLimit = 250 # Simulated share rate deviation limit (BP) +maxBalanceExitRequestedPerReportInEth = 19200 # Maximum exit ETH per report (600*32) +maxEffectiveBalanceWeightWCType01 = 32 # maxEB equivalent weight for WC type 1 +maxEffectiveBalanceWeightWCType02 = 2048 # maxEB equivalent weight for WC type 2 +maxItemsPerExtraDataTransaction = 8 # Maximum items per extra data transaction +maxNodeOperatorsPerExtraDataItem = 24 # Maximum node operators per extra data item +requestTimestampMargin = 128 # Request timestamp margin +maxPositiveTokenRebase = 5000000 # Maximum positive token rebase +maxCLBalanceDecreaseBP = 360 # Max CL balance decrease over sliding window (BP, 360 = 3.6%) +clBalanceOraclesErrorUpperBPLimit = 50 # CL balance oracles error upper limit (BP) +consolidationEthAmountPerDayLimit = 93375 # Consolidation ETH amount per day limit +exitedValidatorEthAmountLimit = 32 # Exited validator ETH amount limit in ETH units +externalPendingBalanceCapEth = 300 # Extra external pending balance cap for bounded side deposits / top-ups + +[lido] +lidoDepositsReserveTarget = "1000000000000000000000" # 1000 eth + +[withdrawalVault] +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7002.md#specification +withdrawalRequestContract = "0x00000961Ef480Eb55e80D19ad83579A64c007002" +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7251.md#specification +consolidationRequestContract = "0x0000BBdDc7CE488642fb579F8B00f3a590007251" + +[triggerableWithdrawalsGateway] +maxExitRequestsLimit = 250 +exitsPerFrame = 1 +frameDurationInSec = 240 + +# Accounting oracle configuration +[accountingOracle] +consensusVersion = 6 + +# Validators exit bus oracle configuration +[validatorsExitBusOracle] +maxValidatorsPerReport = 600 +maxExitBalanceEth = 416000 +balancePerFrameEth = 32 +frameDurationInSec = 48 +consensusVersion = 5 + +# Deposit security module configuration +[depositSecurityModule] +maxOperatorsPerUnvetting = 200 # Maximum operators per unveiling +pauseIntentValidityPeriodBlocks = 6646 # Pause intent validity period in blocks + +[consolidationGateway] +maxConsolidationRequestsLimit = 2900 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 36 # Duration of each processing frame in seconds +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (before fork) +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (after fork) +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +pauser = "0x8943545177806ED17B9F23F0a21ee5948eCaa776" # devnet deployer + +[consolidationBus] +initialBatchSize = 350 # Max number of requests in a batch +initialMaxGroupsInBatch = 10 # Max source groups in a batch +initialExecutionDelay = 0 # Delay before pending batch execution + +[consolidationMigrator] +sourceModuleId = 1 # Source staking module ID +targetModuleId = 4 # Target staking module ID, for scratch deploy testing, we use moduleId=1 which corresponds to NOR. +committee = "0x8943545177806ED17B9F23F0a21ee5948eCaa776" # devnet deployer + +# Top-up gateway configuration for validator top-ups via Merkle proofs +[topUpGateway] +maxValidatorsPerTopUp = 100 # Maximum validators per top-up call +minBlockDistance = 1 # Minimum block distance between top-ups +maxRootAge = 300 # Maximum allowed age of beacon root relative to current block timestamp +targetBalanceGwei = 2046750000000 +minTopUpGwei = 1000000000 +# Generalized indices for validator/balance/pending state verification +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" +# Pivot slot for fork-aware gIndex selection +pivotSlot = 0 +depositor = "0x8943545177806ED17B9F23F0a21ee5948eCaa776" # devnet deployer + +# StakingRouter configuration +[stakingRouter] +maxEBType1 = "32000000000000000000" # Max EB value for WC type 1 +maxEBType2 = "2048000000000000000000" # Max EB value for WC type 2 + +[csmUpgrade] +csmProxy = "0x531C17C4E2Eef4cE6aA72CCA0aacf41FD06bcA84" +csmImpl = "0xc8249cbe40477bAFfB7c3944A54e8Bcc04442E61" +parametersRegistryProxy = "0xb9ecA5123336eA9ea9F95B6E6F04dDedD148c6CE" +parametersRegistryImpl = "0x21a846DB6B1fde38874Da0175195ed257bBee656" +feeOracleProxy = "0x23f7a4cE6FF9d64042D2e1eE4d8Ab20c3dBF24AF" +feeOracleImpl = "0x5Ab1E1A3F85EA5b350ff524bC6a2E74d212E09D8" +feeOracleConsensusVersion = 4 +vettedGateProxy = "0xd04E46D444cD12eD1136Db229d9eB7357b0D4685" +# TODO: replace with deployed IDVTC gate before execution +identifiedDVTClusterGate = "0x0000000000000000000000000000000000000000" +# TODO: replace with deployed IDVTC curve setup before execution +identifiedDVTClusterCurveSetup = "0x0000000000000000000000000000000000000000" +# TODO: replace with expected IDVTC bond curve id before execution +identifiedDVTClusterBondCurveId = 0 +vettedGateImpl = "0x2FbCe6cF3DA01Fc80A67491574e04396379d8C21" +accountingProxy = "0x38339391CFBa34802b224408dd78de0B91bF0C66" +accountingImpl = "0xE50288Ddfe4bCDfC9c47CAE59C55A525921Cf6f8" +feeDistributorProxy = "0x5D80cA6C7c3FB6b999286a9c2325D6b07dd25296" +feeDistributorImpl = "0xCf7f0285Df627aFDcFAA3f8FcAa9EC4fDC892815" +exitPenaltiesProxy = "0xFBEd979da64dEd080ba2ef2bD297B000A4e8391b" +exitPenaltiesImpl = "0x386C57D258383052D2ee23F01081ecd3b0bD8cd7" +strikesProxy = "0x19628bBE586737722a14eb898A5a787EDE955662" +strikesImpl = "0x948c0D6d635010314e010DcEB86C72Bfc10b69bB" +oldPermissionlessGate = "0x5676715F8537DCbC5a13003Bf1bb7EF6a3196dB9" +newPermissionlessGate = "0xD29438F352dF79e948B9F79f273e3Fda528C5C06" +oldVerifier = "0x9De74784543D1428Fa73AAFF05B3E334769c7C50" +newVerifier = "0x50d6EDfE6669e91dcab40503E5Aa1f49A6933F19" +ejector = "0xE9D7a318F531AB07abb7E5560aa55E23365a9515" +csmCommittee = "0xE25583099BA105D9ec0A67f5Ae86D90e50036425" + +[curatedModule] +module = "0x1c5A849640229610800c9E27F84039fCb6912f23" +# TODO: replace with deployed curated gates before execution +curatedGates = [ + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", +] +# TODO: replace with deployed curated verifier before execution +verifier = "0x0000000000000000000000000000000000000000" +circuitBreakerPauser = "0x8943545177806ED17B9F23F0a21ee5948eCaa776" +moduleName = "curated-onchain-v2" +stakeShareLimit = 2000 +priorityExitShareThreshold = 2500 +stakingModuleFee = 800 +treasuryFee = 200 +maxDepositsPerBlock = 30 +minDepositBlockDistance = 25 +feeOracleConsensusVersion = 4 +hashConsensusInitialEpoch = 2694 # Mon Apr 20 15:50:07 CEST 2026 + +[upgradeVoteScript] +# Expiry timestamp after which the upgrade transaction will revert +# Format: Unix timestamp (seconds since epoch) +# The upgrade transaction must be executed before this deadline +expiryTimestamp = 1780272000 # June 1, 2026 at 00:00:00 UTC +timeConstraintsContract = "0x2a30F5aC03187674553024296bed35Aa49749DDa" +enabledDaySpanStart = 0 # 00:00 UTC +enabledDaySpanEnd = 86400 # 24:00 UTC diff --git a/scripts/upgrade/upgrade-params-hoodi.toml b/scripts/upgrade/upgrade-params-hoodi.toml index 78c690664e..7f83355b2a 100644 --- a/scripts/upgrade/upgrade-params-hoodi.toml +++ b/scripts/upgrade/upgrade-params-hoodi.toml @@ -1,6 +1,172 @@ # Lido Protocol Upgrade Parameters - Hoodi Configuration -# This file contains deployment parameters for upgrading Lido protocol contracts on Ethereum Hoodi testnet +[easyTrack.newFactories] +UpdateStakingModuleShareLimits = "0xD63cf25df1bA6144db27A81A98120Dfc53dE4540" +AllowConsolidationPair = "0x22D36e7616F541A527989C5652fDA4d527bB461C" + +SetMerkleGateTreeForCSM = "0xf71fcB20B9FB8468653Bcb24E31F39bc069D5995" +ReportWithdrawalsForSlashedValidatorsForCSM = "0x4EaB04775837A6F0218750A10454119f349258FE" +SettleGeneralDelayedPenaltyForCSM = "0xd0c38B2F0C1F760976dA010C1c35D828331Ff9E2" + +SetMerkleGateTreeForCM = "0x5194cC02B6F477B4a23DFA422fFC238c8B5b1736" +ReportWithdrawalsForSlashedValidatorsForCM = "0x6E40FED7c28bAA93a798cA10f8A93965a19eC52e" +SettleGeneralDelayedPenaltyForCM = "0x3486B872768D361309e405A046C4BF995c21CC6c" +CreateOrUpdateOperatorGroupForCM = "0x44D9b39bBdc2182Aa1af6f16f8F55E0eA038294d" + +[easyTrack.oldFactories] +CSMSettleElStealingPenalty = "0x5c0af5b9f96921d3F61503e1006CF0ab9867279E" +CSMSetVettedGateTree = "0xa890fc73e1b771Ee6073e2402E631c312FF92Cd9" + +# Oracle report sanity checker configuration +[oracleReportSanityChecker] +exitedEthAmountPerDayLimit = 57600 # Exited ETH amount per day limit +appearedEthAmountPerDayLimit = 57600 # Appeared ETH amount per day limit +annualBalanceIncreaseBPLimit = 1000 # Annual balance increase limit (BP) +simulatedShareRateDeviationBPLimit = 250 # Simulated share rate deviation limit (BP) +maxBalanceExitRequestedPerReportInEth = 19200 # Maximum exit ETH per report (600*32) +maxEffectiveBalanceWeightWCType01 = 32 # maxEB equivalent weight for WC type 1 +maxEffectiveBalanceWeightWCType02 = 2048 # maxEB equivalent weight for WC type 2 +maxItemsPerExtraDataTransaction = 8 # Maximum items per extra data transaction +maxNodeOperatorsPerExtraDataItem = 24 # Maximum node operators per extra data item +requestTimestampMargin = 128 # Request timestamp margin +maxPositiveTokenRebase = 5000000 # Maximum positive token rebase +maxCLBalanceDecreaseBP = 360 # Max CL balance decrease over sliding window (BP, 360 = 3.6%) +clBalanceOraclesErrorUpperBPLimit = 50 # CL balance oracles error upper limit (BP) +consolidationEthAmountPerDayLimit = 93375 # Consolidation ETH amount per day limit +exitedValidatorEthAmountLimit = 32 # Exited validator ETH amount limit in ETH units +externalPendingBalanceCapEth = 300 # Extra external pending balance cap for bounded side deposits / top-ups + +[lido] +lidoDepositsReserveTarget = "1000000000000000000000" # 1000 eth + +[withdrawalVault] +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7002.md#specification +withdrawalRequestContract = "0x00000961Ef480Eb55e80D19ad83579A64c007002" +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7251.md#specification +consolidationRequestContract = "0x0000BBdDc7CE488642fb579F8B00f3a590007251" + +[triggerableWithdrawalsGateway] +maxExitRequestsLimit = 250 +exitsPerFrame = 1 +frameDurationInSec = 240 + +# Accounting oracle configuration +[accountingOracle] +consensusVersion = 6 + +# Validators exit bus oracle configuration +[validatorsExitBusOracle] +maxValidatorsPerReport = 600 +maxExitBalanceEth = 358400 +balancePerFrameEth = 32 +frameDurationInSec = 48 +consensusVersion = 5 + +# Deposit security module configuration +[depositSecurityModule] +maxOperatorsPerUnvetting = 200 # Maximum operators per unveiling +pauseIntentValidityPeriodBlocks = 6646 # Pause intent validity period in blocks + +[consolidationGateway] +maxConsolidationRequestsLimit = 2900 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 30 # Duration of each processing frame in seconds +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (before fork) +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (after fork) +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +pauser = "0x83BCE68B4e8b7071b2a664a26e6D3Bc17eEe3102" # DG reseal committee + +[consolidationBus] +initialBatchSize = 350 # Max number of requests in a batch +initialMaxGroupsInBatch = 10 # Max source groups in a batch +initialExecutionDelay = 86400 # Delay before pending batch execution + +[consolidationMigrator] +sourceModuleId = 1 # Source staking module ID +targetModuleId = 5 # Target staking module ID, for scratch deploy testing, we use moduleId=1 which corresponds to NOR. + +committee = "0x84DffcfB232594975C608DE92544Ff239a24c9E9" #m-sig + +# Top-up gateway configuration for validator top-ups via Merkle proofs +[topUpGateway] +maxValidatorsPerTopUp = 32 # Maximum number of validators a single topUp can process _minBlockDistance 1 Minimum block distance between topUp calls +minBlockDistance = 1 +maxRootAge = 600 # Maximum age (seconds) of the beacon root used to prove validator state +targetBalanceGwei = 2046750000000 # (2046.75 ETH) Validator target balance ceiling after top-up (leaves 1.25 ETH safety margin below MaxEB) +minTopUpGwei = 2000000000 # (2 ETH) Minimum top-up amount; smaller calculated top-ups are skipped +# Generalized indices for validator/balance/pending state verification +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" +# Pivot slot for fork-aware gIndex selection +pivotSlot = 0 +depositor = "0x9b186cE78Ddd6fF098b4a533Dd17a139e1FFeD76" # depositor bot + +# StakingRouter configuration +[stakingRouter] +maxEBType1 = "32000000000000000000" # Max EB value for WC type 1 +maxEBType2 = "2048000000000000000000" # Max EB value for WC type 2 + +[csmUpgrade] +csmProxy = "0x79CEf36D84743222f37765204Bec41E92a93E59d" +csmImpl = "0x161b1DAa658fD0D78a4603860edd8Ed06f98F4cA" +parametersRegistryProxy = "0xA4aD5236963f9Fe4229864712269D8d79B65C5Ad" +parametersRegistryImpl = "0x58376D8B192813E85532b25685D948EB49c2A8B5" +feeOracleProxy = "0xe7314f561B2e72f9543F1004e741bab6Fc51028B" +feeOracleImpl = "0x27d1Ff0353AF6b7480CBc902169d0F89b49334B5" +feeOracleConsensusVersion = 4 +vettedGateProxy = "0x10a254E724fe2b7f305F76f3F116a3969c53845f" +identifiedDVTClusterGate = "0x887F8512F9998045f4b5993e6eaa6BCfE5F02A94" +identifiedDVTClusterCurveSetup = "0x75CC99052fB05eEb4D9f80Ba94A5D077e3a721C1" +identifiedDVTClusterBondCurveId = 4 +vettedGateImpl = "0x3b834c6d043F4CE5C61d84723bA737D405B2e276" +accountingProxy = "0xA54b90BA34C5f326BC1485054080994e38FB4C60" +accountingImpl = "0x3a18675fFB2C37A4296dD794A7Ed94644225F881" +feeDistributorProxy = "0xaCd9820b0A2229a82dc1A0770307ce5522FF3582" +feeDistributorImpl = "0x74c5be19CcD1a264899FbCf8dB1a64C1e3fb73Ac" +exitPenaltiesProxy = "0xD259b31083Be841E5C85b2D481Cfc17C14276800" +exitPenaltiesImpl = "0xf38A3DA25B417D83182EEDD30d00557d78c35C96" +strikesProxy = "0x8fBA385C3c334D251eE413e79d4D3890db98693c" +strikesImpl = "0x47F96DCD5cf3e94492CD050c00C9F6e33b3ca677" +oldPermissionlessGate = "0x5553077102322689876A6AdFd48D75014c28acfb" +oldVerifier = "0x1773b2Ff99A030F6000554Cb8A5Ec93145650cbA" +newVerifier = "0xC96406b0eADdAC5708aFCa04DcCA67BAdC9642Fd" +newPermissionlessGate = "0xd7bD8D2A9888D1414c770B35ACF55890B15de26a" +ejector = "0xCAe028378d69D54dc8bF809e6C44CF751F997b80" +csmCommittee = "0x4AF43Ee34a6fcD1fEcA1e1F832124C763561dA53" + +[curatedModule] +module = "0x87EB69Ae51317405FD285efD2326a4a11f6173b9" +curatedGates = [ + "0xF1862d120831eBE31f7202378Ff3Ae63A5658ae3", + "0x410A309dF81B782190188CDB3d215729cc6bC1f3", + "0xa5A604b172787e017b1b118F02fE54fC1D696519", + "0xE966874cDB6A4282ED75Cd10439e3799e5531a2D", + "0x5c063da03e3f21443716D75a2205EE16706e1153", + "0x1cD655Ac53CfE8269DE0DBfc0140B074623C4A6B", + "0x28518be9894C20135F280a9539617783b08a04c7", +] +verifier = "0x209190Ebc2Be80367a15d05e626784Eb94d6A880" +circuitBreakerPauser = "0x84DffcfB232594975C608DE92544Ff239a24c9E9" +moduleName = "curated-onchain-v2" +stakeShareLimit = 10000 +priorityExitShareThreshold = 10000 +stakingModuleFee = 400 +treasuryFee = 600 +maxDepositsPerBlock = 150 +minDepositBlockDistance = 25 +feeOracleConsensusVersion = 4 +hashConsensusInitialEpoch = 93833 + +[upgradeVoteScript] +# Expiry timestamp after which the upgrade transaction will revert +# Format: Unix timestamp (seconds since epoch) +# The upgrade transaction must be executed before this deadline +# TODO set actual +expiryTimestamp = 1780272000 # June 1, 2026 at 00:00:00 UTC +timeConstraintsContract = "0xB26Fd3b50280AbC55c572EE73071778A51088408" +enabledDaySpanStart = 0 # 00:00:000 UTC +enabledDaySpanEnd = 86399 # 23:59:59 UTC # Vault hub configuration [vaultHub] maxRelativeShareLimitBP = 1000 # 10%, absolute max shareLimit of a vault relative to Lido TVL (in basis points) diff --git a/scripts/upgrade/upgrade-params-mainnet.toml b/scripts/upgrade/upgrade-params-mainnet.toml index 56885ad317..267d2934ab 100644 --- a/scripts/upgrade/upgrade-params-mainnet.toml +++ b/scripts/upgrade/upgrade-params-mainnet.toml @@ -1,6 +1,175 @@ # Lido Protocol Upgrade Parameters - Mainnet Configuration -# This file contains deployment parameters for upgrading Lido protocol contracts on Ethereum mainnet +[easyTrack.newFactories] +# TODO: fill with deployed Easy Track factory address before execution +UpdateStakingModuleShareLimits = "0x0000000000000000000000000000000000000000" +AllowConsolidationPair = "0x0000000000000000000000000000000000000000" + +SetMerkleGateTreeForCSM = "0x0000000000000000000000000000000000000000" +ReportWithdrawalsForSlashedValidatorsForCSM = "0x0000000000000000000000000000000000000000" +SettleGeneralDelayedPenaltyForCSM = "0x0000000000000000000000000000000000000000" + +SetMerkleGateTreeForCM = "0x0000000000000000000000000000000000000000" +ReportWithdrawalsForSlashedValidatorsForCM = "0x0000000000000000000000000000000000000000" +SettleGeneralDelayedPenaltyForCM = "0x0000000000000000000000000000000000000000" +CreateOrUpdateOperatorGroupForCM = "0x0000000000000000000000000000000000000000" + +[easyTrack.oldFactories] +CSMSettleElStealingPenalty = "0xF6B6E7997338C48Ea3a8BCfa4BB64a315fDa76f4" +CSMSetVettedGateTree = "0xBc5642bDD6F2a54b01A75605aAe9143525D97308" + +# Oracle report sanity checker configuration +[oracleReportSanityChecker] +exitedEthAmountPerDayLimit = 57600 # Exited ETH amount per day limit +appearedEthAmountPerDayLimit = 57600 # Appeared ETH amount per day limit +annualBalanceIncreaseBPLimit = 1000 # Annual balance increase limit (BP) +simulatedShareRateDeviationBPLimit = 250 # Simulated share rate deviation limit (BP) +maxBalanceExitRequestedPerReportInEth = 19200 # Maximum exit ETH per report (600*32) +maxEffectiveBalanceWeightWCType01 = 32 # maxEB equivalent weight for WC type 1 +maxEffectiveBalanceWeightWCType02 = 2048 # maxEB equivalent weight for WC type 2 +maxItemsPerExtraDataTransaction = 8 # Maximum items per extra data transaction +maxNodeOperatorsPerExtraDataItem = 24 # Maximum node operators per extra data item +requestTimestampMargin = 128 # Request timestamp margin +maxPositiveTokenRebase = 5000000 # Maximum positive token rebase +maxCLBalanceDecreaseBP = 360 # Max CL balance decrease over sliding window (BP, 360 = 3.6%) +clBalanceOraclesErrorUpperBPLimit = 50 # CL balance oracles error upper limit (BP) +consolidationEthAmountPerDayLimit = 93375 # Consolidation ETH amount per day limit +exitedValidatorEthAmountLimit = 32 # Exited validator ETH amount limit in ETH units +externalPendingBalanceCapEth = 300 # Extra external pending balance cap for bounded side deposits / top-ups + +[lido] +lidoDepositsReserveTarget = "1000000000000000000000" # 1000 eth + +[withdrawalVault] +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7002.md#specification +withdrawalRequestContract = "0x00000961Ef480Eb55e80D19ad83579A64c007002" +# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7251.md#specification +consolidationRequestContract = "0x0000BBdDc7CE488642fb579F8B00f3a590007251" + +[triggerableWithdrawalsGateway] +maxExitRequestsLimit = 250 +exitsPerFrame = 1 +frameDurationInSec = 240 + +# Accounting oracle configuration +[accountingOracle] +consensusVersion = 6 + +# Validators exit bus oracle configuration +[validatorsExitBusOracle] +maxValidatorsPerReport = 600 +maxExitBalanceEth = 416000 +balancePerFrameEth = 32 +frameDurationInSec = 48 +consensusVersion = 5 + +[depositSecurityModule] +maxOperatorsPerUnvetting = 200 # Maximum operators per unveiling +pauseIntentValidityPeriodBlocks = 6646 # Pause intent validity period in blocks + +[consolidationGateway] +maxConsolidationRequestsLimit = 2900 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 30 # Duration of each processing frame in seconds +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (before fork) +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (after fork) +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +pauser = "0xFFe21561251c49AdccFad065C94Fb4931dF49081" # DG reseal committee + +[consolidationBus] +initialBatchSize = 350 # Max number of requests in a batch +initialMaxGroupsInBatch = 10 # Max source groups in a batch +initialExecutionDelay = 86400 # Delay before pending batch execution + +[consolidationMigrator] +sourceModuleId = 1 # Source staking module ID +targetModuleId = 4 # Target staking module ID, for scratch deploy testing, we use moduleId=1 which corresponds to NOR. +#TODO update upon deployment +committee = "0x0000000000000000000000000000000000000000" + + # Frame duration in seconds +[topUpGateway] +maxValidatorsPerTopUp = 32 # Maximum number of validators a single topUp can process _minBlockDistance 1 Minimum block distance between topUp calls +minBlockDistance = 1 +maxRootAge = 600 # Maximum age (seconds) of the beacon root used to prove validator state +targetBalanceGwei = 2046750000000 # (2046.75 ETH) Validator target balance ceiling after top-up (leaves 1.25 ETH safety margin below MaxEB) +minTopUpGwei = 2000000000 # (2 ETH) Minimum top-up amount; smaller calculated top-ups are skipped +# Generalized indices for validator/balance/pending state verification +# TODO: replace with mainnet values +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" +# Pivot slot for fork-aware gIndex selection +pivotSlot = 0 +#TODO update upon deployment +depositor = "0x0000000000000000000000000000000000000000" + +[stakingRouter] +maxEBType1 = "32000000000000000000" +maxEBType2 = "2048000000000000000000" + +[csmUpgrade] +csmProxy = "0xdA7dE2ECdDfccC6c3AF10108Db212ACBBf9EA83F" +csmImpl = "0x1eB6d4da13ca9566c17F526aE0715325d7a07665" +vettedGateProxy = "0xB314D4A76C457c93150d308787939063F4Cc67E0" +# TODO: replace with deployed IDVTC gate before execution +identifiedDVTClusterGate = "0x0000000000000000000000000000000000000000" +# TODO: replace with deployed IDVTC curve setup before execution +identifiedDVTClusterCurveSetup = "0x0000000000000000000000000000000000000000" +# TODO: replace with expected IDVTC bond curve id before execution +identifiedDVTClusterBondCurveId = 0 +parametersRegistryImpl = "0x25fdC3BE9977CD4da679dF72A64C8B6Bd5216A78" +feeOracleImpl = "0xe0B234f99E413E27D9Bc31aBba9A49A3e570Da97" +feeOracleConsensusVersion = 4 +vettedGateImpl = "0x65D4D92Cd0EabAa05cD5A46269C24b71C21cfdc4" +accountingImpl = "0x6f09d2426c7405C5546413e6059F884D2D03f449" +feeDistributorImpl = "0x5DCF7cF7c6645E9E822a379dF046a8b0390251A1" +exitPenaltiesImpl = "0xDa22fA1CEa40d05Fe4CD536967afdD839586D546" +strikesImpl = "0x3E5021424c9e13FC853e523Cd68ebBec848956a0" +oldPermissionlessGate = "0xcF33a38111d0B1246A3F38a838fb41D626B454f0" +oldVerifier = "0xdC5FE1782B6943f318E05230d688713a560063DC" +# TODO: replace with deployed new verifier +newVerifier = "0x0000000000000000000000000000000000000000" +newPermissionlessGate = "0xcF33a38111d0B1246A3F38a838fb41D626B454f0" +# TODO: replace with new deployed ejector +ejector = "0x0000000000000000000000000000000000000000" +csmCommittee = "0xC52fC3081123073078698F1EAc2f1Dc7Bd71880f" + +[curatedModule] +# TODO: replace with deployed curated module addresses +module = "0x0000000000000000000000000000000000000000" +# TODO: replace with deployed curated gates before execution +curatedGates = [ + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000", +] +# TODO: replace with deployed curated verifier before execution +verifier = "0x0000000000000000000000000000000000000000" +circuitBreakerPauser = "0x0000000000000000000000000000000000000000" +moduleName = "curated-onchain-v2" +stakeShareLimit = 2000 +priorityExitShareThreshold = 2500 +stakingModuleFee = 800 +treasuryFee = 200 +maxDepositsPerBlock = 30 +minDepositBlockDistance = 25 +feeOracleConsensusVersion = 4 +hashConsensusInitialEpoch = 47480 + +[upgradeVoteScript] +# Expiry timestamp after which the upgrade transaction will revert +# Format: Unix timestamp (seconds since epoch) +# The upgrade transaction must be executed before this deadline +# TODO set actual +expiryTimestamp = 1780272000 # June 1, 2026 at 00:00:00 UTC +timeConstraintsContract = "0x2a30F5aC03187674553024296bed35Aa49749DDa" +enabledDaySpanStart = 50400 # 14:00 UTC +enabledDaySpanEnd = 82800 # 23:00 UTC # Vault hub configuration [vaultHub] maxRelativeShareLimitBP = 1000 # 10%, absolute max shareLimit of a vault relative to Lido TVL (in basis points) diff --git a/scripts/utils/common-env.sh b/scripts/utils/common-env.sh new file mode 100644 index 0000000000..f8b7051c0c --- /dev/null +++ b/scripts/utils/common-env.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# +# helpers +# + +load_env_var() { + local name="$1" + local default="${2-}" + + # try load from env + if [[ -n "${!name:-}" ]]; then + export "$name" + return 0 + fi + + # try load from .env + if [[ -f .env ]]; then + local value + value="$( + set -a + . ./.env + printf '%s' "${!name:-}" + )" + + if [[ -n "$value" ]]; then + export "$name=$value" + return 0 + fi + fi + + # use default value if provided + if [[ $# -ge 2 ]]; then + export "$name=$default" + return 0 + fi + + return 1 +} diff --git a/scripts/utils/mine.ts b/scripts/utils/mine.ts index 65807512bc..561b88feca 100644 --- a/scripts/utils/mine.ts +++ b/scripts/utils/mine.ts @@ -1,13 +1,12 @@ -import { ethers } from "hardhat"; - -import { log } from "lib/log"; +import { advanceChainTime, log } from "lib"; async function main() { log.scriptStart(__filename); - // 0x01 is too little, 0x80 works, although less might be enough - await ethers.provider.send("hardhat_mine", ["0x80"]); - log.success(`Sent "hardhat_mine"`); + // // 0x01 is too little, 0x80 works, although less might be enough + // await ethers.provider.send("hardhat_mine", ["0x80"]); + await advanceChainTime(10n); + log.success(`Sent "advanceChainTime +10s"`); log.scriptFinish(__filename); } diff --git a/scripts/utils/omnibus.ts b/scripts/utils/omnibus.ts new file mode 100644 index 0000000000..4e82d28cff --- /dev/null +++ b/scripts/utils/omnibus.ts @@ -0,0 +1,39 @@ +import { BigNumberish, BytesLike, dataLength, getAddress, solidityPacked, toBeHex } from "ethers"; + +export type EvmScriptHex = `0x${string}`; + +export interface ScriptCall { + to: string; + data: BytesLike; +} + +export interface ProposalCall { + target: string; + value: bigint; + payload: BytesLike; +} + +export interface VoteItem { + description: string; + call: ScriptCall; +} + +export const CALLS_SCRIPT_SPEC_ID = 1; +export const EMPTY_CALLS_SCRIPT = createExecutorId(CALLS_SCRIPT_SPEC_ID); + +export function createExecutorId(id: BigNumberish): EvmScriptHex { + return toBeHex(id, 4) as EvmScriptHex; +} + +// Encodes an array of actions ({ to: address, calldata: bytes }) into the EVM call script format: +// [ 4 bytes (spec id) ] + N * ([ 20 bytes (address) ] + [ 4 bytes (uint32: calldata length) ] + [ calldata ]) +export function encodeCallScript( + calls: readonly ScriptCall[], + specId: BigNumberish = CALLS_SCRIPT_SPEC_ID, +): EvmScriptHex { + return calls.reduce((script, { to, data }) => { + const encodedAction = solidityPacked(["address", "uint32", "bytes"], [getAddress(to), dataLength(data), data]); + + return `${script}${encodedAction.slice(2)}` as EvmScriptHex; + }, createExecutorId(specId)); +} diff --git a/scripts/utils/scratch.ts b/scripts/utils/scratch.ts index 5b34cb048b..bc78473f4d 100644 --- a/scripts/utils/scratch.ts +++ b/scripts/utils/scratch.ts @@ -27,12 +27,9 @@ export function readScratchParameters(): ScratchParameters { export function scratchParametersToDeploymentState(params: ScratchParameters): Record { return { deployer: null, // Set by deployment scripts - gateSeal: { + circuitBreaker: { address: null, // Set by deployment scripts - factoryAddress: null, // Set by deployment scripts - sealDuration: params.gateSeal.sealDuration, - expiryTimestamp: params.gateSeal.expiryTimestamp, - sealingCommittee: params.gateSeal.sealingCommittee, + deployParameters: params.circuitBreaker, }, lidoApmEnsName: params.lidoApm.ensName, lidoApmEnsRegDurationSec: params.lidoApm.ensRegDurationSec, @@ -115,11 +112,26 @@ export function scratchParametersToDeploymentState(params: ScratchParameters): R triggerableWithdrawalsGateway: { deployParameters: params.triggerableWithdrawalsGateway, }, + consolidationGateway: { + deployParameters: params.consolidationGateway, + }, + consolidationBus: { + deployParameters: params.consolidationBus, + }, + consolidationMigrator: { + deployParameters: params.consolidationMigrator, + }, predepositGuarantee: { deployParameters: params.predepositGuarantee, }, operatorGrid: { deployParameters: params.operatorGrid, }, + topUpGateway: { + deployParameters: params.topUpGateway, + }, + stakingRouter: { + deployParameters: params.stakingRouter, + }, }; } diff --git a/scripts/utils/upgrade.ts b/scripts/utils/upgrade.ts index 861db09a51..3811faeee5 100644 --- a/scripts/utils/upgrade.ts +++ b/scripts/utils/upgrade.ts @@ -1,25 +1,72 @@ -import { TransactionReceipt, TransactionResponse } from "ethers"; +import { ContractTransactionReceipt, ContractTransactionResponse } from "ethers"; import fs from "fs"; +import { getMode } from "hardhat.helpers"; import * as toml from "@iarna/toml"; +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { IDualGovernance, IEmergencyProtectedTimelock } from "typechain-types"; +import { IDualGovernance, ITimelock, TokenManager, UpgradeTemplate, UpgradeVoteScript, Voting } from "typechain-types"; -import { advanceChainTime, ether, log } from "lib"; -import { impersonate } from "lib/account"; +import { + advanceChainTime, + bl, + ConvertibleToString, + ether, + findEventsWithInterfaces, + getCurrentBlockTimestamp, + getSignerOrImpersonate, + impersonate, + isContractDeployed, + loadContract, + LoadedContract, + log, + or, + yl, +} from "lib"; import { UpgradeParameters, validateUpgradeParameters } from "lib/config-schemas"; -import { loadContract } from "lib/contract"; -import { DeploymentState, getAddress, Sk } from "lib/state-file"; +import { getTxLink } from "lib/explorer"; +import { + DeploymentState, + getAddress, + getAddressValidated, + readNetworkState, + Sk, + updateObjectInState, +} from "lib/state-file"; -import { ONE_HOUR } from "test/suite"; +import { FUSAKA_TX_GAS_LIMIT, ONE_HOUR } from "test/suite"; -const FUSAKA_TX_LIMIT = 2n ** 24n; // 16M = 16_777_216 +import { encodeCallScript, VoteItem } from "./omnibus"; const UPGRADE_PARAMETERS_FILE = process.env.UPGRADE_PARAMETERS_FILE; +const PROPOSAL_ID = BigInt(process.env.PROPOSAL_ID || "0"); +const PROPOSAL_METADATA = process.env.PROPOSAL_METADATA || "proposal-metadata"; +const VOTE_ID = BigInt(process.env.VOTE_ID || "0"); +const VOTE_DESCRIPTION = process.env.VOTE_DESCRIPTION || "vote-description"; +const VOTE_MODE = process.env.VOTE_MODE || "dg"; // DG mode by default export { UpgradeParameters }; +/// +/// ---- Upgrade helpers ---- +/// export function readUpgradeParameters(skipValidation: boolean = false): UpgradeParameters { + const filePath = getUpgradeParametersFilePath(); + const rawData = fs.readFileSync(filePath, "utf8"); + const parsedData = toml.parse(rawData); + + if (skipValidation) { + return parsedData as UpgradeParameters; + } + + try { + return validateUpgradeParameters(parsedData); + } catch (error) { + throw new Error(`Invalid upgrade parameters (${UPGRADE_PARAMETERS_FILE}): ${error}`); + } +} + +function getUpgradeParametersFilePath(): string { if (!UPGRADE_PARAMETERS_FILE) { throw new Error("UPGRADE_PARAMETERS_FILE is not set"); } @@ -28,71 +75,394 @@ export function readUpgradeParameters(skipValidation: boolean = false): UpgradeP throw new Error(`Upgrade parameters file not found: ${UPGRADE_PARAMETERS_FILE}`); } - const rawData = fs.readFileSync(UPGRADE_PARAMETERS_FILE, "utf8"); - const parsedData = toml.parse(rawData); + return UPGRADE_PARAMETERS_FILE; +} - if (skipValidation) { - return parsedData as UpgradeParameters; +function escapeRegExp(value: string): string { + return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); +} + +function getLineEnding(content: string): string { + return content.includes("\r\n") ? "\r\n" : "\n"; +} + +/** + * Updates a single key in TOML section while preserving the rest of the file as-is. + * If the key doesn't exist in the section, appends it at the end of the section. + */ +export function writeUpgradeEasyTrackFactoryAddress(sectionName: string, paramKey: string, address: string): void { + const filePath = getUpgradeParametersFilePath(); + const content = fs.readFileSync(filePath, "utf8"); + + const sectionHeaderRegex = new RegExp(`^\\s*\\[${escapeRegExp(sectionName)}\\]\\s*$`, "m"); + const sectionHeaderMatch = sectionHeaderRegex.exec(content); + if (!sectionHeaderMatch) { + throw new Error(`Section [${sectionName}] not found in ${filePath}`); } - try { - return validateUpgradeParameters(parsedData); - } catch (error) { - throw new Error(`Invalid upgrade parameters (${UPGRADE_PARAMETERS_FILE}): ${error}`); + const sectionStart = sectionHeaderMatch.index + sectionHeaderMatch[0].length; + const contentAfterSectionHeader = content.slice(sectionStart); + const nextSectionMatch = /^\s*\[[^\]]+\]\s*$/m.exec(contentAfterSectionHeader); + const sectionEnd = nextSectionMatch ? sectionStart + nextSectionMatch.index : content.length; + + const beforeSection = content.slice(0, sectionStart); + const sectionContent = content.slice(sectionStart, sectionEnd); + const afterSection = content.slice(sectionEnd); + + const keyLineRegex = new RegExp(`^(\\s*${escapeRegExp(paramKey)}\\s*=\\s*")([^"]*)(".*)$`, "m"); + let updatedSectionContent: string; + + if (keyLineRegex.test(sectionContent)) { + updatedSectionContent = sectionContent.replace(keyLineRegex, `$1${address}$3`); + } else { + const lineEnding = getLineEnding(content); + const separator = sectionContent.endsWith("\n") || sectionContent.endsWith("\r\n") ? "" : lineEnding; + updatedSectionContent = `${sectionContent}${separator}${paramKey} = "${address}"${lineEnding}`; + } + + const updatedContent = `${beforeSection}${updatedSectionContent}${afterSection}`; + if (updatedContent !== content) { + fs.writeFileSync(filePath, updatedContent, "utf8"); } } -export async function mockDGAragonVoting(state: DeploymentState): Promise<{ - proposalId: bigint; - scheduleReceipt: TransactionReceipt; - proposalExecutedReceipt: TransactionReceipt; -}> { +export const mockAragonVoting = async (state: DeploymentState) => { + const holderAddress = process.env.HOLDER || process.env.DEPLOYER || ""; + const holder = await getSignerOrImpersonate(holderAddress, ether("100")); log("Starting mock Aragon voting..."); - const agentAddress = getAddress(Sk.appAgent, state); - const deployer = await impersonate(agentAddress, ether("100")); - const timelock = await loadContract( - "IEmergencyProtectedTimelock", - state[Sk.dgEmergencyProtectedTimelock].proxy.address, - ); + let voteId = VOTE_ID; + + if (!voteId) { + // try to get voteId from state + voteId = state[Sk.upgradeVoteScript].voteState?.voteId; + } else { + log.warning("Using provided voteId:", voteId); + } + if (!voteId) { + // create new vote + const voteDescription = VOTE_DESCRIPTION; + voteId = await newAragonVoting(state, holder, voteDescription); + + // save voteId in deployed state + updateObjectInState(Sk.upgradeVoteScript, { + voteState: { + voteId, + voteDescription, + }, + }); + } else { + log.warning("Using saved in state voteId:", voteId); + } + + let receipt = await mockEnactAragonVoting(state, voteId, holder); + + if (VOTE_MODE === "dg") { + const { dg } = await upgCtx(state); + const proposalId = findEventsWithInterfaces(receipt, "ProposalSubmitted", [dg.interface])[0].args.proposalId; + log.success("submitted proposalId:", proposalId); + const agent = await impersonate(getAddress(Sk.appAgent, state), ether("100")); + receipt = await mockEnactDGProposal(state, proposalId, agent); + } + const { template } = await upgCtx(state); + const event = findEventsWithInterfaces(receipt, "UpgradeFinished", [template.interface])[0]; + if (!event) { + throw new Error("UpgradeFinished event not found"); + } +}; + +async function newAragonVoting( + state: DeploymentState, + holder: HardhatEthersSigner, + voteDescription: string, +): Promise { + const { tm, voting, voteScript } = await upgCtx(state); + let voteItems: VoteItem[] = []; + let evmScriptNewVote; + if (VOTE_MODE === "dg") { + evmScriptNewVote = await voteScript.getNewVoteCallBytecode(VOTE_DESCRIPTION, PROPOSAL_METADATA); + } else { + log("Creating new vote (no DG):", voteDescription); + if (VOTE_MODE !== "skipVotingItems") { + const items = (await voteScript.getVotingVoteItems()) as VoteItem[]; + voteItems = voteItems.concat(items); + } + + if (VOTE_MODE !== "skipDGItems") { + const items = (await voteScript.getVoteItems()) as VoteItem[]; + voteItems = voteItems.concat(items); + } + log("items:"); + log(voteItems.map(({ description }) => description)); + const evmScript = encodeCallScript(voteItems.map(({ call }) => ({ to: call.to, data: call.data }))); + evmScriptNewVote = encodeCallScript([ + { + to: voting.address, + data: voting.interface.encodeFunctionData("newVote(bytes,string,bool,bool)", [ + evmScript, + voteDescription, + false, + false, + ]), + }, + ]); + } + + log("Forwarding evmScript via TokenManager to create a new vote..."); + const tx = await tm.connect(holder).forward(evmScriptNewVote); + const receipt = await txWaitAndLog(tx); + const voteId = findEventsWithInterfaces(receipt, "StartVote", [voting.interface])[0].args.voteId; + log.success("New vote created. voteId:", voteId); + return voteId; +} + +async function mockEnactAragonVoting(state: DeploymentState, voteId: bigint, holder: HardhatEthersSigner) { + const { voting } = await upgCtx(state); + + const vote = await voting.getVote(voteId); + + if (!vote.startDate || vote.executed) { + throw new Error(`VoteId ${voteId} does not exist or already executed`); + } + + if ((await voting.canVote(voteId, holder)) && (await voting.getVoterState(voteId, holder)) !== 1n) { + log("Try to cast..."); + const voteTx = await voting.connect(holder).vote(voteId, true, true); + await txWaitAndLog(voteTx); + log.success("Cast “Yes” on voteId:", voteId); + } else { + log.warning("Can't cast voteId:", voteId); + } + + if (getMode() === "forking") { + const voteTime = await voting.voteTime(); + const endTime = vote.startDate + voteTime; + const currentTime = await getCurrentBlockTimestamp(); + if (currentTime < endTime) { + const timeToAdvance = endTime - currentTime + 60n; + log.warning(`Advancing chain time by ${timeToAdvance} seconds to reach vote start time...`); + await advanceChainTime(timeToAdvance); + } + } + + if (await voting.canExecute(voteId)) { + log("Try to execute..."); + const execTx = await voting.connect(holder).executeVote(voteId); + const receipt = await txWaitAndLog(execTx); + log.success("executed voteId:", voteId); + + if (receipt.gasUsed > FUSAKA_TX_GAS_LIMIT) { + throw new Error("Gas used exceeds FUSAKA_TX_GAS_LIMIT"); + } + + return receipt; + } else { + throw new Error(`VoteId ${voteId} is not ready for execution`); + } +} + +async function mockEnactDGProposal(state: DeploymentState, proposalId: bigint, executor: HardhatEthersSigner) { + const { dg, timelock } = await upgCtx(state); + const afterSubmitDelay = await timelock.getAfterSubmitDelay(); const afterScheduleDelay = await timelock.getAfterScheduleDelay(); - const dualGovernance = await loadContract( - "IDualGovernance", - state[Sk.dgDualGovernance].proxy.address, - ); - - const proposalId = 6n; // https://dg.lido.fi/proposals/6 - log.success("Proposal submitted: proposalId", proposalId); - - await advanceChainTime(afterSubmitDelay); - const scheduleTx = await dualGovernance.connect(deployer).scheduleProposal(proposalId); - const scheduleReceipt = (await scheduleTx.wait())!; - log.success("Proposal scheduled: gas used", scheduleReceipt.gasUsed); - - await advanceChainTime(afterScheduleDelay); - let proposalExecutedTx: TransactionResponse; - let revertedDueToTimeConstraints: boolean = true; - let attempts: number = 0; - - while (revertedDueToTimeConstraints && attempts < 24) { - try { - proposalExecutedTx = await timelock.connect(deployer).execute(proposalId); - revertedDueToTimeConstraints = false; - } catch { - await advanceChainTime(ONE_HOUR); - attempts++; + let { status } = await timelock.getProposalDetails(proposalId); + + if (status < 1n || status > 2n) { + throw new Error("Proposal not submitted or already executed"); + } + + if (status == 1n) { + log("Proposal submitted, try for schedule..."); + let canSchedule = await timelock.canSchedule(proposalId); + if (!canSchedule) { + await advanceChainTime(afterSubmitDelay); + canSchedule = await timelock.canSchedule(proposalId); + if (!canSchedule) { + throw new Error("Proposal can't be scheduled"); + } } + + const scheduleTx = await dg.connect(executor).scheduleProposal(proposalId); + const scheduleReceipt = (await scheduleTx.wait())!; + log.success("Proposal scheduled: gas used", scheduleReceipt.gasUsed); + ({ status } = await timelock.getProposalDetails(proposalId)); } - const proposalExecutedReceipt = (await proposalExecutedTx!.wait())!; - log.success("Proposal executed: gas used", proposalExecutedReceipt.gasUsed); + if (status == 2n) { + log("Proposal scheduled, try for execute..."); + let canExecute = await timelock.canExecute(proposalId); + if (!canExecute) { + await advanceChainTime(afterScheduleDelay); + canExecute = await timelock.canExecute(proposalId); + if (!canExecute) { + throw new Error("Proposal can't be executed"); + } + } - if (proposalExecutedReceipt.gasUsed > FUSAKA_TX_LIMIT) { - log.error("Proposal executed: gas used exceeds FUSAKA_TX_LIMIT"); - process.exit(1); + let execTx: ContractTransactionResponse; + let revertedDueToTimeConstraints: boolean = true; + let attempts: number = 0; + let lastError: unknown; + + while (revertedDueToTimeConstraints && attempts < 24) { + try { + execTx = await timelock.connect(executor).execute(proposalId); + revertedDueToTimeConstraints = false; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } catch (e: any) { + // const data = e?.data ?? e?.error?.data ?? e?.revert?.data; + // if (data) { + // try { + // const { name, args } = template.interface.parseError(data)!; + // log.error("Error name:", name); + // log.error("Error args:", args); + // } catch { + // log.error("Can't parse error:", data); + // } + // } + + await advanceChainTime(ONE_HOUR); + attempts++; + lastError = e; + } + } + if (revertedDueToTimeConstraints) { + log.error("Failed to execute proposal", proposalId); + throw lastError; + } + // const execTx = await timelock.connect(executor).execute(proposalId); + const receipt = await txWaitAndLog(execTx!); + log.success("executed proposalId:", proposalId); + + if (receipt.gasUsed > FUSAKA_TX_GAS_LIMIT) { + throw new Error("Gas used exceeds FUSAKA_TX_GAS_LIMIT"); + } + return receipt; } - return { proposalId, scheduleReceipt, proposalExecutedReceipt }; + throw new Error("Proposal not scheduled"); +} + +export async function mockDGAragonVoting(state: DeploymentState) { + log("Starting mock DG Aragon voting..."); + + let proposalId = PROPOSAL_ID; + + const agent = await impersonate(getAddress(Sk.appAgent, state), ether("100")); + + const { dg, voteScript } = await upgCtx(state); + + const proposers = await dg.getProposers(); + if (!proposers.length) { + throw new Error("No proposer found in DualGovernance."); + } + const proposer = await impersonate(proposers[0].account, ether("100")); + + if (proposalId) { + log.warning("Using provided proposal ID:", proposalId); + } else { + // const evmScript = await script.getEVMScript(proposalMetadata); + // console.log(evmScript); + const dgItems = await voteScript.getVoteItems(); + const proposalMetadata = PROPOSAL_METADATA; + const proposalCalls = dgItems.map(({ call: { to, data } }) => ({ target: to, value: 0n, payload: data })); + log.info("Collect DG proposal", { + callsCount: proposalCalls.length, + metadata: proposalMetadata, + }); + + proposalId = (await dg + .connect(proposer) + .getFunction("submitProposal") + .staticCall(proposalCalls, proposalMetadata)) as bigint; + + const submitTx = await dg.connect(proposer).submitProposal(proposalCalls, proposalMetadata); + await log.txLink(submitTx.hash); + const submitReceipt = (await submitTx.wait())!; + log.success("Proposal submitted ID:", proposalId); + log.success("Proposal submit gas used", submitReceipt.gasUsed); + } + + const receipt = await mockEnactDGProposal(state, proposalId, agent); + const { template } = await upgCtx(state); + const event = findEventsWithInterfaces(receipt, "UpgradeFinished", [template.interface])[0]; + if (!event) { + throw new Error("UpgradeFinished event not found"); + } +} + +/// ---- helpers ---- + +type Ctx = { + tm: LoadedContract; + dg: LoadedContract; + voting: LoadedContract; + template: LoadedContract; + voteScript: LoadedContract; + timelock: LoadedContract; +}; + +let ctxPromise: Promise | undefined; + +export const upgCtx = (state: DeploymentState): Promise => { + if (!ctxPromise) { + ctxPromise = (async () => { + try { + const [tm, dg, voting, template, voteScript, timelock] = await Promise.all([ + loadContract("TokenManager", getAddress(Sk.appTokenManager, state)), + loadContract("IDualGovernance", getAddress(Sk.dgDualGovernance, state)), + loadContract("Voting", getAddress(Sk.appVoting, state)), + loadContract("UpgradeTemplate", getAddress(Sk.upgradeTemplate, state)), + loadContract("UpgradeVoteScript", getAddress(Sk.upgradeVoteScript, state)), + loadContract("ITimelock", getAddress(Sk.dgEmergencyProtectedTimelock, state)), + ]); + + return { + tm, + dg, + voting, + template, + voteScript, + timelock, + }; + } catch (error) { + ctxPromise = undefined; + throw error; + } + })(); + } + + return ctxPromise; +}; + +export async function txWaitAndLog(tx: ContractTransactionResponse): Promise { + const receipt = await tx.wait(); + if (!receipt) { + throw new Error(`Transaction ${tx.hash} did not return a receipt`); + } + + const logData = Object.fromEntries( + Object.entries({ + GasUsed: receipt.gasUsed, + Link: await getTxLink(tx.hash), + }).filter(([, v]) => v !== null), + ) as Record; + + log.info("Transaction", logData); + return receipt; +} + +export async function checkArtifactDeployedAndLog(artifactName: Sk): Promise { + const state = readNetworkState(); + // check if contract object exists in deployed state but address set as empty string or zero address + const address = getAddressValidated(artifactName, state); + // check if contract not deployed yet + const isDeployed = !!(address && (await isContractDeployed(address))); + if (isDeployed) { + log.splitter(); + log(yl(`Artifact <${or(Sk.upgradeTemplate)}> exists and deployed at [${bl(address)}], skipping step...`)); + } + return isDeployed; } diff --git a/tasks/check-interfaces.ts b/tasks/check-interfaces.ts index 726de62595..29950863f1 100644 --- a/tasks/check-interfaces.ts +++ b/tasks/check-interfaces.ts @@ -53,6 +53,21 @@ const PAIRS_TO_SKIP: { "function transferFrom(address sender, address recipient, uint256 amount) returns (bool)", ], }, + { + interfaceFqn: "contracts/0.4.24/Lido.sol:IAccountingOracle", + contractFqn: "contracts/0.8.9/oracle/AccountingOracle.sol:AccountingOracle", + reason: "Fixing requires Lido redeploy", + }, + { + interfaceFqn: "contracts/0.4.24/Lido.sol:IStakingRouter", + contractFqn: "contracts/0.8.25/sr/StakingRouter.sol:StakingRouter", + reason: "only var names/state modifiers are diff., can be safely ignored", + }, + { + interfaceFqn: "contracts/0.8.25/sr/SRTypes.sol:IAccountingOracle", + contractFqn: "contracts/0.8.9/oracle/AccountingOracle.sol:AccountingOracle", + reason: "Optimization to avoid memory struct allocation on each deposit.", + }, ]; task("check-interfaces").setAction(async (_, hre) => { diff --git a/tasks/index.ts b/tasks/index.ts index 73c5f20bea..68bb0d854d 100644 --- a/tasks/index.ts +++ b/tasks/index.ts @@ -6,3 +6,4 @@ import "./compile"; import "./check-interfaces"; import "./validate-configs"; import "./lint-solidity"; +import "./protocol-get-addresses"; diff --git a/tasks/protocol-get-addresses.ts b/tasks/protocol-get-addresses.ts new file mode 100644 index 0000000000..92b56e0bcb --- /dev/null +++ b/tasks/protocol-get-addresses.ts @@ -0,0 +1,7 @@ +import { task } from "hardhat/config"; + +task("protocol:get-addresses", "Get deployed protocol contract addresses").setAction(async () => { + const { readNetworkState } = await import("lib/state-file"); + const state = readNetworkState(); + console.log(JSON.stringify(state, null, 2)); +}); diff --git a/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol b/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol new file mode 100644 index 0000000000..7e980ffd3e --- /dev/null +++ b/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract AccountingOracle__MockForStakingRouter { + uint256 currentFrameRefSlot; + uint256 lastProcessingRefSlot; + bool mainDataSubmitted; + bool extraDataSubmitted; + + uint256 public constant SECONDS_PER_SLOT = 4; + uint256 public constant GENESIS_TIME = 100; + + constructor() { + currentFrameRefSlot = 0; + mainDataSubmitted = false; + extraDataSubmitted = false; + } + + struct ProcessingState { + uint256 currentFrameRefSlot; + uint256 processingDeadlineTime; + bytes32 mainDataHash; + bool mainDataSubmitted; + bytes32 extraDataHash; + uint256 extraDataFormat; + bool extraDataSubmitted; + uint256 extraDataItemsCount; + uint256 extraDataItemsSubmitted; + } + + function getProcessingState() external view returns (ProcessingState memory result) { + result.currentFrameRefSlot = currentFrameRefSlot; + result.mainDataSubmitted = mainDataSubmitted; + result.extraDataSubmitted = extraDataSubmitted; + } + + function getLastProcessingRefSlot() external view returns (uint256) { + return lastProcessingRefSlot; + } + + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp) { + refSlot = currentFrameRefSlot; + refSlotTimestamp = GENESIS_TIME + refSlot * SECONDS_PER_SLOT; + } + + function mock_setProcessingState(uint256 _refSlot, bool _mainDataSubmitted, bool _extraDataSubmitted) external { + currentFrameRefSlot = _refSlot; + lastProcessingRefSlot = _refSlot; + mainDataSubmitted = _mainDataSubmitted; + extraDataSubmitted = _extraDataSubmitted; + } +} diff --git a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol deleted file mode 100644 index c6ee34ced4..0000000000 --- a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -// for testing purposes only - -pragma solidity 0.4.24; - -import {Lido} from "contracts/0.4.24/Lido.sol"; -import {UnstructuredStorage} from "@aragon/os/contracts/apps/AragonApp.sol"; - -contract Lido__HarnessForFinalizeUpgradeV3 is Lido { - using UnstructuredStorage for bytes32; - - bytes32 constant LIDO_LOCATOR_POSITION = keccak256("lido.Lido.lidoLocator"); - bytes32 constant TOTAL_SHARES_POSITION = keccak256("lido.StETH.totalShares"); - bytes32 constant BUFFERED_ETHER_POSITION = keccak256("lido.Lido.bufferedEther"); - bytes32 constant CL_VALIDATORS_POSITION = keccak256("lido.Lido.beaconValidators"); - bytes32 constant CL_BALANCE_POSITION = keccak256("lido.Lido.beaconBalance"); - bytes32 constant DEPOSITED_VALIDATORS_POSITION = keccak256("lido.Lido.depositedValidators"); - - bytes32 internal constant TOTAL_SHARES_POSITION_V3 = - 0x6038150aecaa250d524370a0fdcdec13f2690e0723eaf277f41d7cae26b359e6; - - function harness_initialize_v2(address _lidoLocator) external payable { - _bootstrapInitialHolder(); // stone in the elevator - - initialized(); - - _resume(); - - _setContractVersion(2); - - BUFFERED_ETHER_POSITION.setStorageUint256(msg.value); - LIDO_LOCATOR_POSITION.setStorageAddress(_lidoLocator); - TOTAL_SHARES_POSITION.setStorageUint256(TOTAL_SHARES_POSITION_V3.getStorageUint256()); - CL_VALIDATORS_POSITION.setStorageUint256(100); - CL_BALANCE_POSITION.setStorageUint256(101); - DEPOSITED_VALIDATORS_POSITION.setStorageUint256(102); - } - - function harness_setContractVersion(uint256 _version) external { - _setContractVersion(_version); - } - - function harness_mintShares_v2(address _to, uint256 _sharesAmount) external { - _mintShares(_to, _sharesAmount); - _emitTransferAfterMintingShares(_to, _sharesAmount); - TOTAL_SHARES_POSITION.setStorageUint256(TOTAL_SHARES_POSITION_V3.getStorageUint256()); - } -} diff --git a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol new file mode 100644 index 0000000000..be905985c7 --- /dev/null +++ b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.4.24; + +import {Lido} from "contracts/0.4.24/Lido.sol"; +import {UnstructuredStorage} from "@aragon/os/contracts/apps/AragonApp.sol"; +import {UnstructuredStorageExt} from "contracts/0.4.24/utils/UnstructuredStorageExt.sol"; + +contract Lido__HarnessForFinalizeUpgradeV4 is Lido { + using UnstructuredStorage for bytes32; + using UnstructuredStorageExt for bytes32; + + // v3 storage positions + bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = + keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); + bytes32 internal constant CL_BALANCE_AND_CL_VALIDATORS_POSITION = keccak256("lido.Lido.clBalanceAndClValidators"); + + function harness_initialize_v3(address _lidoLocator) external payable { + _bootstrapInitialHolder(); // stone in the elevator + + _setLidoLocator(_lidoLocator); + emit LidoLocatorSet(_lidoLocator); + + initialized(); + + _resume(); + + _setContractVersion(3); + + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowUint128(msg.value); + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setHighUint128(120); + + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setLowUint128(100 * 32 ether); + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setHighUint128(100); + } + + function harness_setContractVersion(uint256 _version) external { + _setContractVersion(_version); + } +} diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol index 9b5e9b87e6..20abf3ec60 100644 --- a/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol @@ -34,6 +34,20 @@ contract StakingRouter__MockForLidoAccounting { emit Mock__MintedRewardsReported(); } + function receiveDepositableEther() external payable { + // Mock implementation - no-op + } + + uint256 private depositAmountFromLastSlot__mocked; + + function onAccountingReport(uint256) external { + // Mock implementation - no-op + } + + function getDepositAmountFromLastSlot(uint256) external view returns (uint256) { + return depositAmountFromLastSlot__mocked; + } + function mock__getStakingRewardsDistribution( address[] calldata _recipients, uint256[] calldata _stakingModuleIds, @@ -47,4 +61,8 @@ contract StakingRouter__MockForLidoAccounting { totalFee__mocked = _totalFee; precisionPoint__mocked = _precisionPoints; } + + function mock__setDepositAmountFromLastSlot(uint256 _amount) external { + depositAmountFromLastSlot__mocked = _amount; + } } diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol index d046ec24c9..36565a4cda 100644 --- a/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol @@ -6,7 +6,10 @@ pragma solidity 0.8.9; contract StakingRouter__MockForLidoMisc { event Mock__DepositCalled(); + uint256 public constant INITIAL_DEPOSIT_SIZE = 32 ether; + uint256 private stakingModuleMaxDepositsCount; + uint256 private stakingModuleMaxInitialDepositsAmount; function getWithdrawalCredentials() external pure returns (bytes32) { return 0x010000000000000000000000b9d7934878b5fb9610b3fe8a5e441e8fad7e293f; // Lido Withdrawal Creds @@ -29,6 +32,13 @@ contract StakingRouter__MockForLidoMisc { modulesFee = 500; } + function getStakingModuleMaxInitialDepositsAmount( + uint256 stakingModuleId, + uint256 eth + ) external view returns (uint256, uint256) { + return (stakingModuleMaxInitialDepositsAmount, stakingModuleMaxDepositsCount); + } + function getStakingModuleMaxDepositsCount( uint256, // _stakingModuleId, uint256 // _maxDepositsValue @@ -37,14 +47,22 @@ contract StakingRouter__MockForLidoMisc { } function deposit( - uint256, // _depositsCount, uint256, // _stakingModuleId, bytes calldata // _depositCalldata ) external payable { emit Mock__DepositCalled(); } + function receiveDepositableEther() external payable { + // Mock function to receive ETH from Lido.withdrawDepositableEther + } + function mock__getStakingModuleMaxDepositsCount(uint256 newValue) external { stakingModuleMaxDepositsCount = newValue; + stakingModuleMaxInitialDepositsAmount = newValue * INITIAL_DEPOSIT_SIZE; + } + + function mock__setStakingModuleMaxInitialDepositsAmount(uint256 newValue) external { + stakingModuleMaxInitialDepositsAmount = newValue; } } diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol new file mode 100644 index 0000000000..1f708a7dea --- /dev/null +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract StakingRouter__MockForLidoTopUp { + event Mock__TopUpCalled(uint256 stakingModuleId, bytes pubkeysPacked, uint256[] topUpLimitsGwei); + + uint256 private _amount; + bytes private _pubkeysPacked; + uint256[] private _topUpAmounts; + + uint256 public topUpCalls; + bool public shouldRevert; + + function getTopUpDepositAmount( + uint256, + uint256, + uint256[] calldata, + uint256[] calldata, + bytes calldata, + uint256[] calldata + ) external view returns (uint256 amount, bytes memory pubkeysPacked, uint256[] memory allocations) { + amount = _amount; + pubkeysPacked = _pubkeysPacked; + allocations = _topUpAmounts; + } + + function topUp( + uint256 stakingModuleId, + bytes calldata pubkeysPacked, + uint256[] calldata topUpAmountsGwei + ) external payable { + require(!shouldRevert, "StakingRouter: revert"); + ++topUpCalls; + emit Mock__TopUpCalled(stakingModuleId, pubkeysPacked, topUpAmountsGwei); + } + + function mock__setTopUpAmount( + uint256 topUpDepositAmount, + bytes calldata pubkeysPacked, + uint256[] calldata topUpAmounts + ) external { + _amount = topUpDepositAmount; + _pubkeysPacked = pubkeysPacked; + _topUpAmounts = topUpAmounts; + } + + function mock__setShouldRevert(bool value) external { + shouldRevert = value; + } +} diff --git a/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol b/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol index 6811039b20..5dd465c6ee 100644 --- a/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol +++ b/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol @@ -13,6 +13,8 @@ contract WithdrawalQueue__MockForAccounting { ); bool public isPaused; + bool public isBunkerModeActive; + uint256 public unfinalizedStETH; uint256 private ethToLock_; uint256 private sharesToBurn_; @@ -32,6 +34,12 @@ contract WithdrawalQueue__MockForAccounting { function finalize(uint256 _lastRequestIdToBeFinalized, uint256 _maxShareRate) external payable { _maxShareRate; + if (unfinalizedStETH > msg.value) { + unfinalizedStETH -= msg.value; + } else { + unfinalizedStETH = 0; + } + // some random fake event values uint256 firstRequestIdToFinalize = 0; uint256 sharesToBurn = msg.value; @@ -53,4 +61,8 @@ contract WithdrawalQueue__MockForAccounting { ethToLock_ = _ethToLock; sharesToBurn_ = _sharesToBurn; } + + function mock__unfinalizedStETH(uint256 _unfinalizedStETH) external { + unfinalizedStETH = _unfinalizedStETH; + } } diff --git a/test/0.4.24/lido/lido.accounting.test.ts b/test/0.4.24/lido/lido.accounting.test.ts index 0fda2c4d7f..63a4eea0e0 100644 --- a/test/0.4.24/lido/lido.accounting.test.ts +++ b/test/0.4.24/lido/lido.accounting.test.ts @@ -4,6 +4,8 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + AccountingOracle__MockForStakingRouter, + AccountingOracle__MockForStakingRouter__factory, ACL, Burner__MockForAccounting, Burner__MockForAccounting__factory, @@ -37,16 +39,18 @@ describe("Lido:accounting", () => { let burner: Burner__MockForAccounting; let elRewardsVault: LidoExecutionLayerRewardsVault__MockForLidoAccounting; let withdrawalVault: WithdrawalVault__MockForLidoAccounting; + let accountingOracle: AccountingOracle__MockForStakingRouter; beforeEach(async () => { [deployer, stranger] = await ethers.getSigners(); - [stakingRouter, withdrawalQueue, burner, elRewardsVault, withdrawalVault] = await Promise.all([ + [stakingRouter, withdrawalQueue, burner, elRewardsVault, withdrawalVault, accountingOracle] = await Promise.all([ new StakingRouter__MockForLidoAccounting__factory(deployer).deploy(), new WithdrawalQueue__MockForAccounting__factory(deployer).deploy(), new Burner__MockForAccounting__factory(deployer).deploy(), new LidoExecutionLayerRewardsVault__MockForLidoAccounting__factory(deployer).deploy(), new WithdrawalVault__MockForLidoAccounting__factory(deployer).deploy(), + new AccountingOracle__MockForStakingRouter__factory(deployer).deploy(), ]); ({ lido, acl } = await deployLidoDao({ @@ -58,13 +62,13 @@ describe("Lido:accounting", () => { burner, elRewardsVault, withdrawalVault, + accountingOracle, }, })); locator = LidoLocator__factory.connect(await lido.getLidoLocator(), deployer); await acl.createPermission(deployer, lido, await lido.RESUME_ROLE(), deployer); await acl.createPermission(deployer, lido, await lido.PAUSE_ROLE(), deployer); - await acl.createPermission(deployer, lido, await lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(), deployer); await lido.resume(); }); @@ -84,36 +88,42 @@ describe("Lido:accounting", () => { await expect( lido.processClStateUpdate( ...args({ - postClValidators: 100n, - postClBalance: 100n, + clValidatorsBalance: 100n, + clPendingBalance: 50n, }), ), ) - .to.emit(lido, "CLValidatorsUpdated") - .withArgs(0n, 0n, 100n); + .to.emit(lido, "CLBalancesUpdated") + .withArgs(0n, 100n, 50n); }); - type ArgsTuple = [bigint, bigint, bigint, bigint]; + type ArgsTuple = [bigint, bigint, bigint]; interface Args { reportTimestamp: bigint; - preClValidators: bigint; - postClValidators: bigint; - postClBalance: bigint; + clValidatorsBalance: bigint; + clPendingBalance: bigint; } function args(overrides?: Partial): ArgsTuple { return Object.values({ reportTimestamp: 0n, - preClValidators: 0n, - postClValidators: 0n, - postClBalance: 0n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, ...overrides, }) as ArgsTuple; } }); context("collectRewardsAndProcessWithdrawals", async () => { + async function getAccountingSigner() { + return impersonate(await locator.accounting(), ether("100.0")); + } + + async function getStakingRouterSigner() { + return impersonate(await locator.stakingRouter(), ether("1.0")); + } + it("Reverts when contract is stopped", async () => { await lido.connect(deployer).stop(); await expect(lido.collectRewardsAndProcessWithdrawals(...args())).to.be.revertedWith("CONTRACT_IS_STOPPED"); @@ -134,7 +144,7 @@ describe("Lido:accounting", () => { expect(initialBufferedEther).greaterThanOrEqual(ethToLock); await withdrawalQueue.mock__prefinalizeReturn(ethToLock, 0n); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await lido.collectRewardsAndProcessWithdrawals(...args({ etherToLockOnWithdrawalQueue: ethToLock })); @@ -147,7 +157,7 @@ describe("Lido:accounting", () => { await updateBalance(await elRewardsVault.getAddress(), elRewardsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ elRewardsToWithdraw }))) @@ -166,7 +176,7 @@ describe("Lido:accounting", () => { await updateBalance(await withdrawalVault.getAddress(), withdrawalsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ withdrawalsToWithdraw }))) @@ -187,7 +197,7 @@ describe("Lido:accounting", () => { await updateBalance(await elRewardsVault.getAddress(), elRewardsToWithdraw); await updateBalance(await withdrawalVault.getAddress(), withdrawalsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ elRewardsToWithdraw, withdrawalsToWithdraw }))) @@ -224,7 +234,7 @@ describe("Lido:accounting", () => { const precisionPoints = 10n ** 20n; await stakingRouter.mock__getStakingRewardsDistribution([], [], [], totalFee, precisionPoints); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect( lido.collectRewardsAndProcessWithdrawals( @@ -238,6 +248,155 @@ describe("Lido:accounting", () => { .withArgs(reportTimestamp, preCLBalance, clBalance, withdrawals, elRewards, bufferedEther); }); + it("Resyncs deposits reserve to target on report processing when reserve was spent", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("3.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserve()).to.equal(0n); + + await accountingOracle.mock_setProcessingState(1, true, true); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const bufferedAfterSync = await lido.getBufferedEther(); + const expectedReserveAfterSync = bufferedAfterSync < reserveTarget ? bufferedAfterSync : reserveTarget; + + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + + const stakingRouterSigner = await getStakingRouterSigner(); + const spendAmount = ether("1.0"); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(spendAmount, 1n); + + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + expect(await lido.getDepositsReserve()).to.equal(reserveTarget - spendAmount); + + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())) + .to.emit(lido, "DepositsReserveSet") + .withArgs(reserveTarget); + + expect(await lido.getDepositsReserve()).to.equal(reserveTarget); + }); + + it("Does not emit DepositsReserveSet on report processing when reserve already matches target", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("2.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + + const accountingSigner = await getAccountingSigner(); + // First report syncs reserve to target after target increase. + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())) + .to.emit(lido, "DepositsReserveSet") + .withArgs(reserveTarget); + + const bufferedAfterSync = await lido.getBufferedEther(); + const expectedReserveAfterSync = bufferedAfterSync < reserveTarget ? bufferedAfterSync : reserveTarget; + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())).not.to.emit( + lido, + "DepositsReserveSet", + ); + }); + + it("Keeps effective deposits reserve capped by buffered ether after report sync", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("100.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + + const bufferedBefore = await lido.getBufferedEther(); + expect(bufferedBefore).to.be.gt(0n); + + const reserveBefore = await lido.getDepositsReserve(); + expect(reserveBefore).to.equal(0n); + + // Target increase is deferred until report processing. + expect(await lido.getDepositsReserve()).to.equal(reserveBefore); + + const submitted = ether("1.0"); + await lido.submit(await deployer.getAddress(), { value: submitted }); + + const bufferedAfterSubmit = await lido.getBufferedEther(); + expect(bufferedAfterSubmit).to.equal(bufferedBefore + submitted); + expect(await lido.getDepositsReserve()).to.equal(reserveBefore); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedAfterSubmit); + const expectedReserveAfterSync = bufferedAfter < reserveTarget ? bufferedAfter : reserveTarget; + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + }); + + it("Consumes withdrawals reserve on withdrawal finalization (when deposits reserve = 0)", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + await lido.setDepositsReserveTarget(0n); + + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + + const unfinalizedBefore = ether("6.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalizedBefore); + + const bufferedBefore = await lido.getBufferedEther(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalizedBefore); + + const lockAmount = ether("2.0"); + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals( + ...args({ + lastWithdrawalRequestToFinalize: 1n, + simulatedShareRate: 1n, + etherToLockOnWithdrawalQueue: lockAmount, + }), + ); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedBefore - lockAmount); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalizedBefore - lockAmount); + }); + + it("Consumes withdrawals reserve on withdrawal finalization (when deposits reserve > 0)", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("3.0"); + await lido.setDepositsReserveTarget(reserveTarget); + + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + await withdrawalQueue.mock__unfinalizedStETH(ether("6.0")); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const bufferedBefore = await lido.getBufferedEther(); + + expect(depositsReserveBefore).to.be.gt(0n); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const lockAmount = ether("2.0"); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals( + ...args({ + lastWithdrawalRequestToFinalize: 1n, + simulatedShareRate: 1n, + etherToLockOnWithdrawalQueue: lockAmount, + }), + ); + + expect(await lido.getBufferedEther()).to.equal(bufferedBefore - lockAmount); + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore - lockAmount); + }); + type ArgsTuple = [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint]; interface Args { diff --git a/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts b/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts deleted file mode 100644 index 1914a98c3b..0000000000 --- a/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts +++ /dev/null @@ -1,211 +0,0 @@ -import { expect } from "chai"; -import { MaxUint256, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { time } from "@nomicfoundation/hardhat-network-helpers"; - -import { - Burner, - Burner__MockForMigration, - ICSModule__factory, - Lido__HarnessForFinalizeUpgradeV3, - LidoLocator, - OssifiableProxy__factory, -} from "typechain-types"; - -import { certainAddress, ether, getStorageAtPosition, impersonate, proxify, TOTAL_BASIS_POINTS } from "lib"; - -import { deployLidoLocator } from "test/deploy"; -import { Snapshot } from "test/suite"; - -describe("Lido.sol:finalizeUpgrade_v3", () => { - let deployer: HardhatEthersSigner; - - let impl: Lido__HarnessForFinalizeUpgradeV3; - let lido: Lido__HarnessForFinalizeUpgradeV3; - let locator: LidoLocator; - - const initialValue = 1n; - const finalizeVersion = 3n; - - let withdrawalQueueAddress: string; - let burner: Burner; - let oldBurner: Burner__MockForMigration; - - const dummyLocatorAddress = certainAddress("dummy-locator"); - let simpleDvtAddress: string; - let nodeOperatorsRegistryAddress: string; - let csmAccountingAddress: string; - - const oldCoverSharesBurnRequested = 100n; - const oldNonCoverSharesBurnRequested = 200n; - const oldTotalCoverSharesBurnt = 300n; - const oldTotalNonCoverSharesBurnt = 400n; - const sharesOnOldBurner = 1000n; - - let originalState: string; - - before(async () => { - [deployer] = await ethers.getSigners(); - impl = await ethers.deployContract("Lido__HarnessForFinalizeUpgradeV3"); - [lido] = await proxify({ impl, admin: deployer }); - - burner = await ethers.deployContract("Burner", [dummyLocatorAddress, lido]); - - const proxyFactory = new OssifiableProxy__factory(deployer); - const burnerProxy = await proxyFactory.deploy(burner, deployer, new Uint8Array()); - burner = burner.attach(burnerProxy) as Burner; - - const isMigrationAllowed = true; - await burner.connect(deployer).initialize(deployer, isMigrationAllowed); - const stakingRouter = await ethers.deployContract("StakingRouter__MockForLidoUpgrade"); - - nodeOperatorsRegistryAddress = (await stakingRouter.getStakingModule(1)).stakingModuleAddress; - simpleDvtAddress = (await stakingRouter.getStakingModule(2)).stakingModuleAddress; - csmAccountingAddress = await ICSModule__factory.connect( - (await stakingRouter.getStakingModule(3)).stakingModuleAddress, - deployer, - ).accounting(); - - locator = await deployLidoLocator({ burner, stakingRouter }); - - withdrawalQueueAddress = await locator.withdrawalQueue(); - - oldBurner = await ethers.deployContract("Burner__MockForMigration", []); - await oldBurner - .connect(deployer) - .setSharesRequestedToBurn(oldCoverSharesBurnRequested, oldNonCoverSharesBurnRequested); - await oldBurner.connect(deployer).setSharesBurnt(oldTotalCoverSharesBurnt, oldTotalNonCoverSharesBurnt); - - await lido.connect(await impersonate(nodeOperatorsRegistryAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(simpleDvtAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(csmAccountingAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(withdrawalQueueAddress, ether("1"))).approve(oldBurner, MaxUint256); - }); - - beforeEach(async () => (originalState = await Snapshot.take())); - afterEach(async () => await Snapshot.restore(originalState)); - - it("Reverts if not initialized", async () => { - await expect(lido.finalizeUpgrade_v3(ZeroAddress, [], 0)).to.be.revertedWith("NOT_INITIALIZED"); - }); - - context("initialized", () => { - before(async () => { - const latestBlock = BigInt(await time.latestBlock()); - - await lido.connect(deployer).harness_initialize_v2(locator, { value: initialValue }); - - expect(await impl.getInitializationBlock()).to.equal(MaxUint256); - expect(await lido.getInitializationBlock()).to.equal(latestBlock + 1n); - }); - - it("Reverts if contract version does not equal 2", async () => { - const unexpectedVersion = 1n; - await lido.harness_setContractVersion(unexpectedVersion); - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ).to.be.revertedWith("UNEXPECTED_CONTRACT_VERSION"); - }); - - it("Reverts if old burner is the same as new burner", async () => { - await expect(lido.finalizeUpgrade_v3(burner, [], 0)).to.be.revertedWith("OLD_BURNER_SAME_AS_NEW"); - }); - - it("Reverts if old burner is zero address", async () => { - await expect(lido.finalizeUpgrade_v3(ZeroAddress, [], 0)).to.be.revertedWith("OLD_BURNER_ADDRESS_ZERO"); - }); - - it("Sets contract version to 3 and max external ratio to 10", async () => { - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 10, - ), - ) - .to.emit(lido, "ContractVersionSet") - .withArgs(finalizeVersion) - .and.emit(lido, "MaxExternalRatioBPSet") - .withArgs(10); - expect(await lido.getContractVersion()).to.equal(finalizeVersion); - expect(await lido.getMaxExternalRatioBP()).to.equal(10); - }); - - it("Reverts if initial max external ratio is greater than total basis points", async () => { - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - TOTAL_BASIS_POINTS + 1n, - ), - ).to.be.revertedWith("INVALID_MAX_EXTERNAL_RATIO"); - }); - - it("Migrates storage successfully", async () => { - const totalShares = await getStorageAtPosition(lido, "lido.StETH.totalShares"); - const bufferedEther = await getStorageAtPosition(lido, "lido.Lido.bufferedEther"); - - const beaconValidators = await getStorageAtPosition(lido, "lido.Lido.beaconValidators"); - const beaconBalance = await getStorageAtPosition(lido, "lido.Lido.beaconBalance"); - const depositedValidators = await getStorageAtPosition(lido, "lido.Lido.depositedValidators"); - - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ).to.not.be.reverted; - - expect(await lido.getLidoLocator()).to.equal(locator); - expect(await lido.getTotalShares()).to.equal(totalShares); - expect(await lido.getBufferedEther()).to.equal(bufferedEther); - - expect((await lido.getBeaconStat()).beaconBalance).to.equal(beaconBalance); - expect((await lido.getBeaconStat()).beaconValidators).to.equal(beaconValidators); - expect((await lido.getBeaconStat()).depositedValidators).to.equal(depositedValidators); - }); - - it("Migrates burner successfully", async () => { - await lido.harness_mintShares_v2(oldBurner, sharesOnOldBurner); - expect(await lido.sharesOf(oldBurner)).to.equal(sharesOnOldBurner); - - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ) - .to.emit(lido, "TransferShares") - .withArgs(oldBurner, burner, sharesOnOldBurner); - - expect(await lido.sharesOf(oldBurner)).to.equal(0n); - expect(await lido.sharesOf(burner)).to.equal(sharesOnOldBurner); - - expect(await burner.getCoverSharesBurnt()).to.equal(oldTotalCoverSharesBurnt); - expect(await burner.getNonCoverSharesBurnt()).to.equal(oldTotalNonCoverSharesBurnt); - const [coverShares, nonCoverShares] = await burner.getSharesRequestedToBurn(); - expect(coverShares).to.equal(oldCoverSharesBurnRequested); - expect(nonCoverShares).to.equal(oldNonCoverSharesBurnRequested); - - // Check old burner allowances are revoked - expect(await lido.allowance(nodeOperatorsRegistryAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(simpleDvtAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(csmAccountingAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(withdrawalQueueAddress, oldBurner)).to.equal(0n); - - // Check new burner allowances are set - expect(await lido.allowance(nodeOperatorsRegistryAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(simpleDvtAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(csmAccountingAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(withdrawalQueueAddress, burner)).to.equal(MaxUint256); - }); - }); -}); diff --git a/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts b/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts new file mode 100644 index 0000000000..78e603d8dd --- /dev/null +++ b/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts @@ -0,0 +1,102 @@ +import { expect } from "chai"; +import { MaxUint256 } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { time } from "@nomicfoundation/hardhat-network-helpers"; + +import { + AccountingOracle__MockForStakingRouter, + Lido__HarnessForFinalizeUpgradeV4, + LidoLocator, +} from "typechain-types"; + +import { ether, getStorageAtPositionAsUint128Pair, proxify } from "lib"; + +import { deployLidoLocator } from "test/deploy/locator"; +import { Snapshot } from "test/suite"; + +describe("Lido.sol:finalizeUpgrade_v4", () => { + let deployer: HardhatEthersSigner; + + let impl: Lido__HarnessForFinalizeUpgradeV4; + let lido: Lido__HarnessForFinalizeUpgradeV4; + let accountingOracle: AccountingOracle__MockForStakingRouter; + let locator: LidoLocator; + + const initialValue = 1n; + const finalizeVersion = 4n; + + let originalState: string; + + before(async () => { + [deployer] = await ethers.getSigners(); + impl = await ethers.deployContract("Lido__HarnessForFinalizeUpgradeV4", { + signer: deployer, + }); + [lido] = await proxify({ impl, admin: deployer }); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + locator = await deployLidoLocator({ lido, accountingOracle }, deployer); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + afterEach(async () => await Snapshot.restore(originalState)); + + it("Reverts if not initialized", async () => { + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("NOT_INITIALIZED"); + }); + + context("initialized", () => { + before(async () => { + const latestBlock = BigInt(await time.latestBlock()); + + await lido.connect(deployer).harness_initialize_v3(locator, { value: initialValue }); + + expect(await impl.getInitializationBlock()).to.equal(MaxUint256); + expect(await lido.getInitializationBlock()).to.equal(latestBlock + 1n); + }); + + it("Reverts if contract version does not equal 3", async () => { + const unexpectedVersion = 1n; + await lido.harness_setContractVersion(unexpectedVersion); + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("UNEXPECTED_CONTRACT_VERSION"); + }); + + it("Sets contract version to 4", async () => { + await expect(lido.finalizeUpgrade_v4()).to.emit(lido, "ContractVersionSet").withArgs(finalizeVersion); + expect(await lido.getContractVersion()).to.equal(finalizeVersion); + }); + + it("Reverts upgrade if occurred before report", async () => { + // simulate no report + await accountingOracle.mock_setProcessingState(1, false, false); + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("NO_REPORT"); + }); + + it("Migrates storage successfully after report and before next frame", async () => { + // simulate report + await accountingOracle.mock_setProcessingState(1, true, true); + const { low: bufferedEther, high: depositedValidators } = await getStorageAtPositionAsUint128Pair( + lido, + "lido.Lido.bufferedEtherAndDepositedValidators", + ); + const { low: clBalance, high: clValidators } = await getStorageAtPositionAsUint128Pair( + lido, + "lido.Lido.clBalanceAndClValidators", + ); + + const depositedBalance = (depositedValidators - clValidators) * ether("32"); + + await expect(lido.finalizeUpgrade_v4()).to.not.be.reverted; + + expect(await lido.getBufferedEther()).to.equal(bufferedEther); + expect((await lido.getBeaconStat()).beaconBalance).to.equal(clBalance); + expect((await lido.getBeaconStat()).beaconValidators).to.equal(depositedValidators); + expect((await lido.getBeaconStat()).depositedValidators).to.equal(depositedValidators); + expect((await lido.getBalanceStats()).clValidatorsBalanceAtLastReport).to.equal(clBalance); + expect((await lido.getBalanceStats()).clPendingBalanceAtLastReport).to.equal(0); + expect((await lido.getBalanceStats()).depositedSinceLastReport).to.equal(depositedBalance); + expect((await lido.getBalanceStats()).depositedForCurrentReport).to.equal(0); + }); + }); +}); diff --git a/test/0.4.24/lido/lido.initialize.test.ts b/test/0.4.24/lido/lido.initialize.test.ts index 4f2238b385..dda86a8aff 100644 --- a/test/0.4.24/lido/lido.initialize.test.ts +++ b/test/0.4.24/lido/lido.initialize.test.ts @@ -21,7 +21,9 @@ describe("Lido.sol:initialize", () => { before(async () => { [deployer] = await ethers.getSigners(); - const impl = await ethers.deployContract("Lido", deployer); + const impl = await ethers.deployContract("Lido", { + signer: deployer, + }); expect(await impl.getInitializationBlock()).to.equal(MaxUint256); [lido] = await proxify({ impl, admin: deployer }); @@ -33,7 +35,7 @@ describe("Lido.sol:initialize", () => { context("initialize", () => { const initialValue = 1n; - const contractVersion = 3n; + const contractVersion = 4n; let withdrawalQueueAddress: string; let burnerAddress: string; diff --git a/test/0.4.24/lido/lido.misc.test.ts b/test/0.4.24/lido/lido.misc.test.ts index ac108f49a0..06a254de56 100644 --- a/test/0.4.24/lido/lido.misc.test.ts +++ b/test/0.4.24/lido/lido.misc.test.ts @@ -5,6 +5,8 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + Accounting__MockForAccountingOracle, + AccountingOracle__MockForStakingRouter, ACL, Lido, LidoLocator, @@ -29,6 +31,8 @@ describe("Lido.sol:misc", () => { let locator: LidoLocator; let withdrawalQueue: WithdrawalQueue__MockForLidoMisc; let stakingRouter: StakingRouter__MockForLidoMisc; + let accounting: Accounting__MockForAccountingOracle; + let accountingOracle: AccountingOracle__MockForStakingRouter; const elRewardsVaultBalance = ether("100.0"); const withdrawalsVaultBalance = ether("100.0"); @@ -39,6 +43,9 @@ describe("Lido.sol:misc", () => { withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForLidoMisc", deployer); stakingRouter = await ethers.deployContract("StakingRouter__MockForLidoMisc", deployer); + accounting = await ethers.deployContract("Accounting__MockForAccountingOracle", deployer); + accounting = await ethers.deployContract("Accounting__MockForAccountingOracle", deployer); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); ({ lido, acl } = await deployLidoDao({ rootAccount: deployer, @@ -47,12 +54,14 @@ describe("Lido.sol:misc", () => { withdrawalQueue, stakingRouter, depositSecurityModule, + accounting, + accountingOracle, }, })); + await acl.createPermission(user, lido, await lido.STAKING_CONTROL_ROLE(), deployer); await acl.createPermission(user, lido, await lido.RESUME_ROLE(), deployer); await acl.createPermission(user, lido, await lido.PAUSE_ROLE(), deployer); - await acl.createPermission(user, lido, await lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(), deployer); lido = lido.connect(user); locator = await ethers.getContractAt("LidoLocator", await lido.getLidoLocator(), user); @@ -143,9 +152,10 @@ describe("Lido.sol:misc", () => { }); context("canDeposit", () => { - it("Returns true if Lido is not stopped and bunkerMode is disabled", async () => { + it("Returns true if Lido is not stopped and bunkerMode is disabled, and report is submitted", async () => { await lido.resume(); await withdrawalQueue.mock__bunkerMode(false); + await accountingOracle.mock_setProcessingState(1, true, true); expect(await lido.canDeposit()).to.equal(true); }); @@ -168,23 +178,11 @@ describe("Lido.sol:misc", () => { expect(await lido.canDeposit()).to.equal(false); }); - }); - - context("unsafeChangeDepositedValidators", () => { - it("Sets the number of deposited validators", async () => { - const { depositedValidators } = await lido.getBeaconStat(); - - const updatedDepositedValidators = depositedValidators + 50n; - await expect(lido.unsafeChangeDepositedValidators(updatedDepositedValidators)) - .to.emit(lido, "DepositedValidatorsChanged") - .withArgs(updatedDepositedValidators); - - expect((await lido.getBeaconStat()).depositedValidators).to.equal(updatedDepositedValidators); - }); + it("Returns false if main phase of report is not submitted", async () => { + await accountingOracle.mock_setProcessingState(1, false, false); - it("Reverts if the caller is unauthorized", async () => { - await expect(lido.connect(stranger).unsafeChangeDepositedValidators(100n)).to.be.revertedWith("APP_AUTH_FAILED"); + expect(await lido.canDeposit()).to.equal(false); }); }); @@ -221,9 +219,11 @@ describe("Lido.sol:misc", () => { }); context("getDepositableEther", () => { - it("Returns the amount of ether eligible for deposits", async () => { + it("Returns the amount of ether eligible for deposits (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEtherBefore = await lido.getBufferedEther(); // top up buffer @@ -233,9 +233,11 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(bufferedEtherBefore + deposit); }); - it("Returns 0 if reserved by the buffered ether is fully reserved for withdrawals", async () => { + it("Returns 0 if buffered ether is fully reserved for withdrawals (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEther = await lido.getBufferedEther(); // reserve all buffered ether for withdrawals @@ -244,9 +246,11 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(0); }); - it("Returns the difference if the buffered ether is partially reserved", async () => { + it("Returns buffered-minus-withdrawals reserve (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEther = await lido.getBufferedEther(); // reserve half of buffered ether for withdrawals @@ -255,56 +259,524 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(bufferedEther - reservedForWithdrawals); }); + + it("Spending depositable ether does not affect withdrawals reserve", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + const stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + const unfinalized = ether("50.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + const bufferedBefore = await lido.getBufferedEther(); + const depositsReserveBefore = await lido.getDepositsReserve(); + const expectedWithdrawalsReserveBefore = + bufferedBefore - depositsReserveBefore < unfinalized ? bufferedBefore - depositsReserveBefore : unfinalized; + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBefore).to.equal(expectedWithdrawalsReserveBefore); + expect(withdrawalsReserveBefore).to.be.gt(0n); + const depositableBefore = await lido.getDepositableEther(); + expect(depositableBefore).to.be.gt(1n); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore / 2n, 0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + + const remainingDepositable = await lido.getDepositableEther(); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(remainingDepositable, 0n); + + expect(await lido.getDepositableEther()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Returns deposits reserve when withdrawals demand saturates remaining buffer", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + const depositsReserve = await lido.getDepositsReserve(); + expect(depositsReserve).to.equal(ether("25.0")); + expect(await lido.getDepositableEther()).to.be.gt(depositsReserve); + + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Keeps depositable unchanged on reserve target increase before report sync", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + + const depositableBefore = await lido.getDepositableEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositsReserveBefore = await lido.getDepositsReserve(); + + await lido.setDepositsReserveTarget(ether("50.0")); + + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(await lido.getDepositableEther()).to.equal(depositableBefore); + }); }); - context("deposit", () => { - const maxDepositsCount = 100n; - const stakingModuleId = 1n; - const depositCalldata = new Uint8Array(); + context("depositsReserve", () => { + let stakingRouterSigner: HardhatEthersSigner; + let accountingSigner: HardhatEthersSigner; + + const syncReserveWithOracleReport = async () => { + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + }; + + const assertDepositsReserveInvariants = async () => { + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositable = await lido.getDepositableEther(); + + expect(depositsReserve).to.be.lte(buffered); + expect(withdrawalsReserve).to.be.lte(buffered); + expect(depositable).to.be.lte(buffered); + expect(depositable).to.equal(buffered - withdrawalsReserve); + expect(depositsReserve + withdrawalsReserve).to.be.lte(buffered); + }; beforeEach(async () => { await lido.resume(); - lido = lido.connect(depositSecurityModule); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + }); + + it("Reverts if caller has no BUFFER_RESERVE_MANAGER_ROLE", async () => { + await expect(lido.connect(stranger).setDepositsReserveTarget(ether("1.0"))).to.be.revertedWith("APP_AUTH_FAILED"); + }); + + it("Calculates allocation consistently with withdrawals reserve and target", async () => { + const deposit = ether("100.0"); + const reserveTarget = ether("30.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(reserveTarget); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const unfinalized = ether("40.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + const expectedDepositsReserve = buffered < reserveTarget ? buffered : reserveTarget; + const remainingAfterDeposits = buffered - expectedDepositsReserve; + const expectedWithdrawalsReserve = remainingAfterDeposits < unfinalized ? remainingAfterDeposits : unfinalized; + + expect(await lido.getDepositsReserve()).to.equal(expectedDepositsReserve); + expect(await lido.getWithdrawalsReserve()).to.equal(expectedWithdrawalsReserve); + expect(await lido.getDepositableEther()).to.equal(buffered - expectedWithdrawalsReserve); }); - it("Reverts if the caller is not `DepositSecurityModule`", async () => { - lido = lido.connect(stranger); + it("Does not increase current reserve immediately when target is increased", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("10.0")); + + await lido.setDepositsReserveTarget(ether("60.0")); + // Reserve increase is deferred until report processing. + expect(await lido.getDepositsReserve()).to.equal(ether("10.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("60.0")); + }); + + it("Keeps depositable unchanged on target increase before report sync", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + + const depositableBefore = await lido.getDepositableEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + + await lido.setDepositsReserveTarget(ether("50.0")); + + expect(await lido.getDepositableEther()).to.equal(depositableBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Caps current reserve immediately when target is lowered", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("70.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("70.0")); + + await lido.setDepositsReserveTarget(ether("20.0")); + expect(await lido.getDepositsReserve()).to.equal(ether("20.0")); + }); + + it("Decreases depositable immediately on target decrease in saturated withdrawals demand", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + + const buffered = await lido.getBufferedEther(); + expect(await lido.getDepositableEther()).to.equal(ether("40.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - ether("40.0")); + + await lido.setDepositsReserveTarget(ether("20.0")); + + expect(await lido.getDepositableEther()).to.equal(ether("20.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - ether("20.0")); + }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)).to.be.revertedWith( - "APP_AUTH_DSM_FAILED", + it("Updates depositable immediately when unfinalized withdrawals demand changes", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + + await withdrawalQueue.mock__unfinalizedStETH(ether("10.0")); + expect(await lido.getDepositableEther()).to.equal(buffered - ether("10.0")); + + await withdrawalQueue.mock__unfinalizedStETH(ether("50.0")); + expect(await lido.getDepositableEther()).to.equal(buffered - ether("50.0")); + + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Keeps depositable at deposits reserve when unfinalized demand reaches allocation boundary", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + const boundary = buffered - depositsReserve; + + await withdrawalQueue.mock__unfinalizedStETH(boundary); + expect(await lido.getWithdrawalsReserve()).to.equal(boundary); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + + await withdrawalQueue.mock__unfinalizedStETH(boundary + 1n); + expect(await lido.getWithdrawalsReserve()).to.equal(boundary); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Handles setting reserve target to zero", async () => { + const deposit = ether("100.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("40.0")); + + const unfinalized = ether("30.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + await lido.setDepositsReserveTarget(0n); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalized); + const buffered = await lido.getBufferedEther(); + expect(await lido.getDepositableEther()).to.equal(buffered - unfinalized); + }); + + it("Consumes deposits reserve once when CL-depositable ether is spent and reserve target exceeds buffer", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + const bufferedBefore = await lido.getBufferedEther(); + + // Keep all buffered ether depositable and make stored reserve larger than the buffer. + await withdrawalQueue.mock__unfinalizedStETH(0n); + await lido.setDepositsReserveTarget(bufferedBefore + ether("100.0")); + await syncReserveWithOracleReport(); + + const spentDepositableEther = ether("10.0"); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(spentDepositableEther, 1n); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedBefore - spentDepositableEther); + expect(await lido.getDepositsReserve()).to.equal(bufferedAfter); + expect(await lido.getDepositableEther()).to.equal(bufferedAfter); + }); + + it("Does not decrease withdrawals reserve when all depositable ether is withdrawn", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("50.0")); + + const bufferedBefore = await lido.getBufferedEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositableBefore = await lido.getDepositableEther(); + expect(depositableBefore).to.equal(bufferedBefore - withdrawalsReserveBefore); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + + expect(await lido.getDepositableEther()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Emits only target event on target increase and emits reserve update on target decrease", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + + const increasedTarget = ether("25.0"); + await expect(lido.setDepositsReserveTarget(increasedTarget)) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(increasedTarget) + .and.not.to.emit(lido, "DepositsReserveSet"); + + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const loweredTarget = ether("10.0"); + await expect(lido.setDepositsReserveTarget(loweredTarget)) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(loweredTarget) + .and.to.emit(lido, "DepositsReserveSet") + .withArgs(loweredTarget); + }); + + it("Keeps deposits reserve at zero when buffer is empty and target is positive", async () => { + await withdrawalQueue.mock__unfinalizedStETH(0n); + const depositableBefore = await lido.getDepositableEther(); + if (depositableBefore > 0n) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + } + + expect(await lido.getBufferedEther()).to.equal(0n); + + const target = ether("50.0"); + await lido.setDepositsReserveTarget(target); + expect(await lido.getDepositsReserveTarget()).to.equal(target); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositableEther()).to.equal(0n); + + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositableEther()).to.equal(0n); + }); + + it("Reverts withdraw when requested amount is above depositable with withdrawals reserve present", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("80.0")); + + const depositable = await lido.getDepositableEther(); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(depositable + 1n, 0n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", ); }); - it("Reverts if the contract is stopped", async () => { - await lido.connect(user).stop(); + it("Syncs reserve to target after non-zero accounting buffer movements", async () => { + await lido.connect(elRewardsVault).receiveELRewards({ value: ether("3.0") }); + await lido.submit(ZeroAddress, { value: ether("100.0") }); + + await lido.setDepositsReserveTarget(ether("20.0")); + await syncReserveWithOracleReport(); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(ether("5.0"), 0n); + expect(await lido.getDepositsReserve()).to.equal(ether("15.0")); + + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)).to.be.revertedWith( - "CAN_NOT_DEPOSIT", + expect(await lido.getDepositsReserveTarget()).to.equal(ether("20.0")); + expect(await lido.getDepositsReserve()).to.equal(ether("20.0")); + }); + + it("Exhausts CL-depositable ether via multiple withdrawDepositableEther() calls and then reverts", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("20.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("70.0")); + + const chunk = ether("5.0"); + + while ((await lido.getDepositableEther()) >= chunk) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(chunk, 0n); + await assertDepositsReserveInvariants(); + } + + const remaining = await lido.getDepositableEther(); + expect(remaining).to.be.lt(chunk); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(chunk, 0n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", ); + await assertDepositsReserveInvariants(); + }); + + it("Preserves reserve invariants over submit/withdraw/target-update/report sequence", async () => { + await lido.submit(ZeroAddress, { value: ether("50.0") }); + await lido.setDepositsReserveTarget(ether("15.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("20.0")); + await assertDepositsReserveInvariants(); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(ether("10.0"), 0n); + await assertDepositsReserveInvariants(); + + await lido.connect(elRewardsVault).receiveELRewards({ value: ether("7.0") }); + await assertDepositsReserveInvariants(); + + await lido.setDepositsReserveTarget(ether("30.0")); + // target increased, reserve increase is deferred until report + await assertDepositsReserveInvariants(); + + await syncReserveWithOracleReport(); + await assertDepositsReserveInvariants(); }); + }); + + context("withdrawalsReserve", () => { + let stakingRouterSigner: HardhatEthersSigner; + let accountingSigner: HardhatEthersSigner; + + beforeEach(async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + }); + + it("Returns 0 when unfinalizedStETH is zero", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(0n); - it("Emits `Unbuffered` and `DepositedValidatorsChanged` events if there are deposits", async () => { + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + }); + + it("Is capped by remaining buffer after deposits reserve", async () => { + const deposit = ether("100.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(ether("40.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("80.0")); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - depositsReserve); + }); + + it("Decreases when deposits reserve target increases (priority to deposits reserve)", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + const buffered = await lido.getBufferedEther(); + // Make withdrawals demand effectively unbounded so withdrawalsReserve == buffered - depositsReserve. + await withdrawalQueue.mock__unfinalizedStETH(buffered); + + const lowTarget = ether("10.0"); + const highTarget = ether("50.0"); + + await lido.setDepositsReserveTarget(lowTarget); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + const withdrawalsReserveWithLowTarget = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveWithLowTarget).to.equal(buffered - lowTarget); + + await lido.setDepositsReserveTarget(highTarget); + // target increase is deferred until report, so withdrawals reserve is unchanged before sync + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveWithLowTarget); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + const withdrawalsReserveWithHighTarget = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveWithHighTarget).to.equal(buffered - highTarget); + expect(withdrawalsReserveWithHighTarget).to.be.lt(withdrawalsReserveWithLowTarget); + }); + + it("Does not change on oracle report when no withdrawals are finalized", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("40.0")); + const before = await lido.getWithdrawalsReserve(); + + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + expect(await lido.getWithdrawalsReserve()).to.equal(before); + }); + + it("Returns 0 when buffer is empty even if unfinalizedStETH is non-zero", async () => { + await withdrawalQueue.mock__unfinalizedStETH(0n); + const depositableBefore = await lido.getDepositableEther(); + if (depositableBefore > 0n) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + } + + expect(await lido.getBufferedEther()).to.equal(0n); + + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + }); + }); + + context("withdrawDepositableEther", () => { + let stakingRouterSigner: HardhatEthersSigner; + + beforeEach(async () => { + await lido.resume(); + // Get stakingRouter signer to call withdrawDepositableEther + const stakingRouterAddress = await locator.stakingRouter(); + stakingRouterSigner = await impersonate(stakingRouterAddress, ether("1.0")); + // simulate success report + await accountingOracle.mock_setProcessingState(1, true, true); + }); + + it("Reverts if the caller is not `StakingRouter`", async () => { const oneDepositWorthOfEther = ether("32.0"); - // top up Lido buffer enough for 1 deposit of 32 ether await lido.submit(ZeroAddress, { value: oneDepositWorthOfEther }); - expect(await lido.getDepositableEther()).to.be.greaterThanOrEqual(oneDepositWorthOfEther); + await expect(lido.connect(stranger).withdrawDepositableEther(oneDepositWorthOfEther, 1n)).to.be.revertedWith( + "APP_AUTH_FAILED", + ); + }); + + it("Reverts if amount is zero", async () => { + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(0n, 0n)).to.be.revertedWith( + "ZERO_AMOUNT", + ); + }); + + it("Reverts if not enough depositable ether", async () => { + const tooMuchEther = ether("1000.0"); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(tooMuchEther, 1n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", + ); + }); - // mock StakingRouter.getStakingModuleMaxDepositsCount returning 1 deposit - await stakingRouter.mock__getStakingModuleMaxDepositsCount(1); + it("Emits `Unbuffered`, `DepositedValidatorsChanged` events when withdrawing ether", async () => { + const depositAmount = ether("32.0"); + // top up Lido buffer enough for deposit + await lido.submit(ZeroAddress, { value: depositAmount }); + + // Get actual depositable ether which may be less due to withdrawal reservations + const depositableEther = await lido.getDepositableEther(); + expect(depositableEther).to.be.greaterThan(0n); const beforeDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), stakingRouterBalance: ethers.provider.getBalance(stakingRouter), beaconStat: lido.getBeaconStat(), + balanceStats: lido.getBalanceStats(), }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)) + // Use actual depositable amount + const amountToWithdraw = depositableEther; + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(amountToWithdraw, 1n)) .to.emit(lido, "Unbuffered") - .withArgs(oneDepositWorthOfEther) + .withArgs(amountToWithdraw) .and.to.emit(lido, "DepositedValidatorsChanged") - .withArgs(beforeDeposit.beaconStat.depositedValidators + 1n) - .and.to.emit(stakingRouter, "Mock__DepositCalled"); + .withArgs(beforeDeposit.beaconStat.depositedValidators + 1n); const afterDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), @@ -313,19 +785,19 @@ describe("Lido.sol:misc", () => { }); expect(afterDeposit.beaconStat.depositedValidators).to.equal(beforeDeposit.beaconStat.depositedValidators + 1n); - expect(afterDeposit.lidoBalance).to.equal(beforeDeposit.lidoBalance - oneDepositWorthOfEther); - expect(afterDeposit.stakingRouterBalance).to.equal(beforeDeposit.stakingRouterBalance + oneDepositWorthOfEther); + // Verify ETH moved from Lido to StakingRouter + expect(afterDeposit.lidoBalance).to.be.lessThan(beforeDeposit.lidoBalance); + expect(afterDeposit.stakingRouterBalance).to.be.greaterThan(beforeDeposit.stakingRouterBalance); }); - it("Does not emit `Unbuffered` and `DepositedValidatorsChanged` events if the staking module cannot accomodate new deposit", async () => { - const oneDepositWorthOfEther = ether("32.0"); - // top up Lido buffer enough for 1 deposit of 32 ether - await lido.submit(ZeroAddress, { value: oneDepositWorthOfEther }); - - expect(await lido.getDepositableEther()).to.be.greaterThanOrEqual(oneDepositWorthOfEther); + it("Does not emit `DepositedValidatorsChanged` event when depositsCount is 0 (top-up scenario)", async () => { + const depositAmount = ether("10.0"); + // top up Lido buffer + await lido.submit(ZeroAddress, { value: depositAmount }); - // mock StakingRouter.getStakingModuleMaxDepositsCount returning 1 deposit - await stakingRouter.mock__getStakingModuleMaxDepositsCount(0); + // Get actual depositable ether + const depositableEther = await lido.getDepositableEther(); + expect(depositableEther).to.be.greaterThan(0n); const beforeDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), @@ -333,9 +805,13 @@ describe("Lido.sol:misc", () => { beaconStat: lido.getBeaconStat(), }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)) - .to.emit(stakingRouter, "Mock__DepositCalled") - .not.to.emit(lido, "Unbuffered") + // Use a smaller amount that's definitely available + const amountToWithdraw = depositableEther < depositAmount ? depositableEther : depositAmount; + + // depositsCount = 0 for top-up scenario (existing validators, not new ones) + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(amountToWithdraw, 0n)) + .to.emit(lido, "Unbuffered") + .withArgs(amountToWithdraw) .and.not.to.emit(lido, "DepositedValidatorsChanged"); const afterDeposit = await batch({ @@ -344,9 +820,11 @@ describe("Lido.sol:misc", () => { beaconStat: lido.getBeaconStat(), }); + // depositedValidators should not change for top-ups expect(afterDeposit.beaconStat.depositedValidators).to.equal(beforeDeposit.beaconStat.depositedValidators); - expect(afterDeposit.lidoBalance).to.equal(beforeDeposit.lidoBalance); - expect(afterDeposit.stakingRouterBalance).to.equal(beforeDeposit.stakingRouterBalance); + // Verify ETH moved from Lido to StakingRouter + expect(afterDeposit.lidoBalance).to.be.lessThan(beforeDeposit.lidoBalance); + expect(afterDeposit.stakingRouterBalance).to.be.greaterThan(beforeDeposit.stakingRouterBalance); }); }); }); diff --git a/test/0.8.9/beaconChainDepositor.t.sol b/test/0.8.25/beaconChainDepositor.t.sol similarity index 95% rename from test/0.8.9/beaconChainDepositor.t.sol rename to test/0.8.25/beaconChainDepositor.t.sol index 93def7cada..0128fe2094 100644 --- a/test/0.8.9/beaconChainDepositor.t.sol +++ b/test/0.8.25/beaconChainDepositor.t.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; import {Test} from "forge-std/Test.sol"; import {CommonBase} from "forge-std/Base.sol"; @@ -9,7 +9,10 @@ import {StdUtils} from "forge-std/StdUtils.sol"; import {StdAssertions} from "forge-std/StdAssertions.sol"; import {IERC165} from "forge-std/interfaces/IERC165.sol"; -import {BeaconChainDepositor as BCDepositor} from "contracts/0.8.9/BeaconChainDepositor.sol"; +import { + BeaconChainDepositor as BCDepositor, + IDepositContract as IDepositContractLib +} from "contracts/0.8.25/lib/BeaconChainDepositor.sol"; // The following invariants are formulated and enforced for the `BeaconChainDepositor` contract: // - exactly 32 ETH gets attached with every single deposit @@ -171,8 +174,13 @@ contract BCDepositorHandler is CommonBase, StdAssertions, StdUtils { } } -contract BCDepositorHarness is BCDepositor { - constructor(address _depositContract) BCDepositor(_depositContract) {} +contract BCDepositorHarness { + IDepositContractLib public immutable DEPOSIT_CONTRACT; + uint256 internal constant INITIAL_DEPOSIT_SIZE = 32 ether; + + constructor(address _depositContract) { + DEPOSIT_CONTRACT = IDepositContractLib(_depositContract); + } /// @dev Exposed version of the _makeBeaconChainDeposits32ETH /// @param _keysCount amount of keys to deposit @@ -185,7 +193,13 @@ contract BCDepositorHarness is BCDepositor { bytes memory _publicKeysBatch, bytes memory _signaturesBatch ) external { - _makeBeaconChainDeposits32ETH(_keysCount, _withdrawalCredentials, _publicKeysBatch, _signaturesBatch); + BCDepositor.makeBeaconChainDeposits32ETH( + DEPOSIT_CONTRACT, + _keysCount, + _withdrawalCredentials, + _publicKeysBatch, + _signaturesBatch + ); } } diff --git a/test/0.8.25/consolidation-helpers.ts b/test/0.8.25/consolidation-helpers.ts new file mode 100644 index 0000000000..19e79f871e --- /dev/null +++ b/test/0.8.25/consolidation-helpers.ts @@ -0,0 +1,36 @@ +/** + * Shared test helpers for ConsolidationGateway, ConsolidationBus, and ConsolidationMigrator tests. + */ + +/** Sample 48-byte validator public keys for testing. */ +export const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", +]; + +/** Creates dummy (empty-proof) validator witnesses for use in ConsolidationGateway tests. */ +export const witnessesForTargets = (targets: string[]) => + targets.map((pubkey) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, + })); + +/** Creates ConsolidationWitnessGroup[] for ConsolidationBus.executeConsolidation */ +export const buildWitnessGroups = (sourcePubkeysGroups: string[][], targetPubkeys: string[]) => + sourcePubkeysGroups.map((sourcePubkeys, i) => ({ + sourcePubkeys, + targetWitness: { + proof: [] as string[], + pubkey: targetPubkeys[i], + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, + }, + })); diff --git a/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts b/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts new file mode 100644 index 0000000000..76b92ba4a5 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts @@ -0,0 +1,263 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { advanceChainTime, getCurrentBlockTimestamp } from "lib"; +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: execution delay", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let executor: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + + const EXECUTION_DELAY = 3600; // 1 hour + + let originalState: string; + + before(async () => { + [admin, manager, publisher, executor, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, EXECUTION_DELAY); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("constructor", () => { + it("should set the initial execution delay", async () => { + expect(await consolidationBus.executionDelay()).to.equal(EXECUTION_DELAY); + }); + + it("should emit ExecutionDelayUpdated during initialization", async () => { + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 100, 100, 7200)) + .to.emit(bus, "ExecutionDelayUpdated") + .withArgs(7200); + }); + + it("should allow zero execution delay in initializer", async () => { + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + expect(await bus.executionDelay()).to.equal(0); + }); + }); + + context("setExecutionDelay", () => { + it("should set execution delay", async () => { + await expect(consolidationBus.connect(manager).setExecutionDelay(7200)) + .to.emit(consolidationBus, "ExecutionDelayUpdated") + .withArgs(7200); + + expect(await consolidationBus.executionDelay()).to.equal(7200); + }); + + it("should allow setting delay to zero", async () => { + await expect(consolidationBus.connect(manager).setExecutionDelay(0)) + .to.emit(consolidationBus, "ExecutionDelayUpdated") + .withArgs(0); + + expect(await consolidationBus.executionDelay()).to.equal(0); + }); + + it("should revert without MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setExecutionDelay(100)).to.be.reverted; + }); + }); + + context("execution delay enforcement", () => { + let sourcePubkeysGroups: string[][]; + let targetPubkeys: string[]; + let batchHash: string; + + beforeEach(async () => { + sourcePubkeysGroups = [[PUBKEYS[0]]]; + targetPubkeys = [PUBKEYS[1]]; + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should revert when execution delay has not passed", async () => { + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const executeAfter = batchInfo.addedAt + BigInt(EXECUTION_DELAY); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed") + .withArgs((await getCurrentBlockTimestamp()) + 1n, executeAfter); + }); + + it("should allow execution after delay has passed", async () => { + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should allow execution exactly at the delay boundary", async () => { + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const currentTimestamp = await getCurrentBlockTimestamp(); + const timeToAdvance = batchInfo.addedAt + BigInt(EXECUTION_DELAY) - currentTimestamp; + + await advanceChainTime(timeToAdvance); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should allow immediate execution when delay is zero", async () => { + await consolidationBus.connect(manager).setExecutionDelay(0); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should enforce delay per batch independently", async () => { + // Add second batch after some time + await advanceChainTime(BigInt(EXECUTION_DELAY / 2)); + + const sourcePubkeysGroups2 = [[PUBKEYS[2]]]; + const targetPubkeys2 = [PUBKEYS[3]]; + await consolidationBus + .connect(publisher) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[2]], targetPubkey: PUBKEYS[3] }]); + + // Advance enough for batch 1 but not batch 2 + await advanceChainTime(BigInt(EXECUTION_DELAY / 2)); + + // Batch 1 should be executable + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + + // Batch 2 should still be blocked + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups2, targetPubkeys2), { value: 0 }), + ).to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed"); + }); + + it("should use the current delay setting at execution time", async () => { + // Increase delay after batch was added + const longerDelay = EXECUTION_DELAY * 2; + await consolidationBus.connect(manager).setExecutionDelay(longerDelay); + + // Advance the original delay + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + // Should still revert because the new longer delay hasn't passed + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed"); + + // Advance the remaining time + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + // Now should succeed + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + }); + + context("getBatchInfo", () => { + it("should return zero values for non-existent batch", async () => { + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + const batchInfo = await consolidationBus.getBatchInfo(fakeBatchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should return correct info after adding batch", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const blockTimestamp = await getCurrentBlockTimestamp(); + expect(batchInfo.publisher).to.equal(publisher.address); + expect(batchInfo.addedAt).to.equal(blockTimestamp); + }); + + it("should return zero values after batch is executed", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Advance past delay + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups([[PUBKEYS[0]]], [PUBKEYS[1]]), { value: 0 }); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts b/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts new file mode 100644 index 0000000000..f8784d548c --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts @@ -0,0 +1,98 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +describe("ConsolidationBus.sol: deployment", () => { + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + + before(async () => { + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + }); + + it("should deploy and initialize successfully with valid parameters", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + + const adminRole = await bus.DEFAULT_ADMIN_ROLE(); + expect(await bus.hasRole(adminRole, admin.address)).to.be.true; + expect(await bus.batchSize()).to.equal(100); + expect(await bus.maxGroupsInBatch()).to.equal(100); + expect(await bus.getConsolidationGateway()).to.equal(gatewayAddr); + }); + + it("should revert if admin is zero address on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(ethers.ZeroAddress, 100, 100, 0)).to.be.revertedWithCustomError( + bus, + "AdminCannotBeZero", + ); + }); + + it("should revert if consolidationGateway is zero address", async () => { + await expect(ethers.deployContract("ConsolidationBus", [ethers.ZeroAddress])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationBus"), "ZeroArgument") + .withArgs("consolidationGateway"); + }); + + it("should revert zero batch size on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 0, 100, 0)) + .to.be.revertedWithCustomError(bus, "ZeroArgument") + .withArgs("batchSizeLimit"); + }); + + it("should revert zero max groups in batch on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 100, 0, 0)) + .to.be.revertedWithCustomError(bus, "ZeroArgument") + .withArgs("maxGroupsInBatchLimit"); + }); + + it("should revert if maxGroupsInBatch exceeds batchSize on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 10, 20, 0)) + .to.be.revertedWithCustomError(bus, "MaxGroupsExceedsBatchSize") + .withArgs(20, 10); + }); + + it("should revert on double initialization", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + + await expect(bus.initialize(admin.address, 100, 100, 0)).to.be.revertedWithCustomError( + bus, + "InvalidInitialization", + ); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts b/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts new file mode 100644 index 0000000000..1249eee69d --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts @@ -0,0 +1,245 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: executor", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let executor: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + let REMOVE_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, executor, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + + // Grant roles + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("executeConsolidation", () => { + let sourcePubkeysGroups: string[][]; + let targetPubkeys: string[]; + let batchHash: string; + + beforeEach(async () => { + sourcePubkeysGroups = [[PUBKEYS[0]]]; + targetPubkeys = [PUBKEYS[1]]; + + // Add a batch + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should execute consolidation", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, fee); + + // Verify batch is removed from storage after execution + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should forward call to ConsolidationGateway", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, executor.address, fee); + }); + + it("should allow anyone to execute consolidation", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(stranger) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, fee); + }); + + it("should revert if batch not found", async () => { + const fakeSources = [[PUBKEYS[2]]]; + const fakeTargets = [PUBKEYS[0]]; + + const fakeBatchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [PUBKEYS[2]], targetPubkey: PUBKEYS[0] }]], + ), + ); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(fakeSources, fakeTargets), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(fakeBatchHash); + }); + + it("should revert if batch already executed", async () => { + // Execute first time + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }); + + // Try to execute again — batch was deleted, so it's not found + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should revert if batch was removed", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + // Remove the batch + await consolidationBus.connect(manager).removeBatches([batchHash]); + + // Try to execute + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should execute multiple batches sequentially", async () => { + // Add second batch + const sourcePubkeysGroups2 = [[PUBKEYS[1]]]; + const targetPubkeys2 = [PUBKEYS[2]]; + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups2); + + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + // Execute first batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, 10); + + // Execute second batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups2, targetPubkeys2), { value: 15 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash2, 15); + }); + + it("should work with zero value (if gateway allows)", async () => { + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, 0); + }); + + it("should forward exact msg.value to gateway", async () => { + const exactValue = 12345n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: exactValue }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, executor.address, exactValue); + }); + + it("should pass caller as refundRecipient", async () => { + await expect( + consolidationBus + .connect(stranger) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, stranger.address, 10); + }); + }); + + context("ETH balance", () => { + it("should not hold ETH after execution", async () => { + const sourcePubkeysGroups = [[PUBKEYS[0]]]; + const targetPubkeys = [PUBKEYS[1]]; + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const balanceBefore = await ethers.provider.getBalance(await consolidationBus.getAddress()); + + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 100 }); + + const balanceAfter = await ethers.provider.getBalance(await consolidationBus.getAddress()); + + expect(balanceAfter).to.equal(balanceBefore); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.management.test.ts b/test/0.8.25/consolidationBus/consolidationBus.management.test.ts new file mode 100644 index 0000000000..c14aca0990 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.management.test.ts @@ -0,0 +1,198 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: management", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + let REMOVE_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + + // Grant manager role + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("setBatchSize", () => { + it("should set batch size", async () => { + await expect(consolidationBus.connect(manager).setBatchSize(200)) + .to.emit(consolidationBus, "BatchLimitUpdated") + .withArgs(200); + + expect(await consolidationBus.batchSize()).to.equal(200); + }); + + it("should revert setting batch size to zero", async () => { + await expect(consolidationBus.connect(manager).setBatchSize(0)) + .to.be.revertedWithCustomError(consolidationBus, "ZeroArgument") + .withArgs("batchSizeLimit"); + }); + + it("should revert if new batch size is less than current maxGroupsInBatch", async () => { + // maxGroupsInBatch is 100, try to set batchSize to 50 + await expect(consolidationBus.connect(manager).setBatchSize(50)) + .to.be.revertedWithCustomError(consolidationBus, "MaxGroupsExceedsBatchSize") + .withArgs(100, 50); + }); + + it("should revert if caller does not have MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setBatchSize(200)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, MANAGE_ROLE); + }); + }); + + context("setMaxGroupsInBatch", () => { + it("should set max groups in batch", async () => { + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(50)) + .to.emit(consolidationBus, "MaxGroupsInBatchUpdated") + .withArgs(50); + + expect(await consolidationBus.maxGroupsInBatch()).to.equal(50); + }); + + it("should revert setting max groups in batch to zero", async () => { + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(0)) + .to.be.revertedWithCustomError(consolidationBus, "ZeroArgument") + .withArgs("maxGroupsInBatchLimit"); + }); + + it("should revert if maxGroupsInBatch exceeds batchSize", async () => { + // batchSize is 100, try to set maxGroupsInBatch to 200 + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(200)) + .to.be.revertedWithCustomError(consolidationBus, "MaxGroupsExceedsBatchSize") + .withArgs(200, 100); + }); + + it("should revert if caller does not have MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setMaxGroupsInBatch(50)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, MANAGE_ROLE); + }); + }); + + context("removeBatches", () => { + let batchHash: string; + + beforeEach(async () => { + // Register publisher and add a batch + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, publisher.address); + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + // Compute batch hash + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should remove batches", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.not.equal(ethers.ZeroAddress); + + await expect(consolidationBus.connect(manager).removeBatches([batchHash])) + .to.emit(consolidationBus, "BatchesRemoved") + .withArgs([batchHash]); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should revert if caller does not have REMOVE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).removeBatches([batchHash])) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, REMOVE_ROLE); + }); + + it("should revert if batch not found", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + + await expect(consolidationBus.connect(manager).removeBatches([fakeBatchHash])) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(fakeBatchHash); + }); + + it("should revert if batchHashes is empty", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + await expect(consolidationBus.connect(manager).removeBatches([])).to.be.revertedWithCustomError( + consolidationBus, + "EmptyBatchHashes", + ); + }); + + it("should revert if batch already executed", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const sourcePubkeysGroups = [[PUBKEYS[0]]]; + const targetPubkeys = [PUBKEYS[1]]; + + await consolidationBus + .connect(manager) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }); + + // Try to remove the executed batch — batch was deleted, so it's not found + await expect(consolidationBus.connect(manager).removeBatches([batchHash])) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should remove multiple batches", async () => { + // Add another batch + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[0] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups2); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + await expect(consolidationBus.connect(manager).removeBatches([batchHash, batchHash2])) + .to.emit(consolidationBus, "BatchesRemoved") + .withArgs([batchHash, batchHash2]); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(ethers.ZeroAddress); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts b/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts new file mode 100644 index 0000000000..6605b35411 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts @@ -0,0 +1,325 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: publisher", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 10, 10, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + + // Grant roles + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("addConsolidationRequests", () => { + it("should add consolidation requests", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + const batchData = ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [groups], + ); + const batchHash = ethers.keccak256(batchData); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.emit(consolidationBus, "RequestsAdded") + .withArgs(publisher.address, batchData); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(publisher.address); + expect(batchInfo.addedAt).to.be.greaterThan(0); + }); + + it("should add multiple requests in a batch", async () => { + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }, + ]; + + const batchData = ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [groups], + ); + const batchHash = ethers.keccak256(batchData); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.emit(consolidationBus, "RequestsAdded") + .withArgs(publisher.address, batchData); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.not.equal(ethers.ZeroAddress); + }); + + it("should revert if caller does not have PUBLISH_ROLE", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await expect(consolidationBus.connect(stranger).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PUBLISH_ROLE); + }); + + it("should revert if batch is empty", async () => { + await expect(consolidationBus.connect(publisher).addConsolidationRequests([])).to.be.revertedWithCustomError( + consolidationBus, + "EmptyBatch", + ); + }); + + it("should revert if a source group is empty", async () => { + // First group is non-empty, second group is empty + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [] as string[], targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(1); + }); + + it("should revert with EmptyGroup at first index if first group is empty", async () => { + const groups = [ + { sourcePubkeys: [] as string[], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(0); + }); + + it("should revert if batch size exceeds limit", async () => { + // Create a batch with total source pubkeys exceeding the limit (10) + // Use fewer groups but with multiple source keys each to avoid TooManyGroups + const groups = [ + { sourcePubkeys: Array(6).fill(PUBKEYS[0]), targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: Array(6).fill(PUBKEYS[0]), targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(12, 10); + }); + + it("should allow batch at exact limit", async () => { + const groups = Array(10).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.not.be.reverted; + }); + + it("should revert if groups count exceeds max groups in batch", async () => { + // Set maxGroupsInBatch to 3 (batchSize stays at 10) + await consolidationBus.connect(manager).setMaxGroupsInBatch(3); + + // Create 4 groups, each with 1 source pubkey (total size 4 <= batchSize 10, but groups 4 > maxGroups 3) + const groups = Array(4).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(4, 3); + }); + + it("should allow batch at exact max groups limit", async () => { + // Set maxGroupsInBatch to 3 + await consolidationBus.connect(manager).setMaxGroupsInBatch(3); + + const groups = Array(3).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.not.be.reverted; + }); + + it("should check both batch size and max groups limits independently", async () => { + // Set maxGroupsInBatch to 5, batchSize stays at 10 + await consolidationBus.connect(manager).setMaxGroupsInBatch(5); + + // 3 groups with 4 source pubkeys each = 12 total > batchSize 10 + // but groups 3 <= maxGroups 5 + // TooManyGroups check comes first, but this should pass it and fail on BatchTooLarge + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[0] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(12, 10); + }); + + it("should revert if batch already added", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Try to add again + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchAlreadyPending") + .withArgs(batchHash); + }); + + it("should revert if source equals target pubkey", async () => { + const samePubkey = PUBKEYS[0]; + const groups = [{ sourcePubkeys: [samePubkey], targetPubkey: samePubkey }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(0); + }); + + it("should revert if source equals target pubkey at any index", async () => { + // First group is valid, second group has source == target + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + { sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[1] }, // PUBKEYS[1] == PUBKEYS[1] at group index 1 + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(1); + }); + + it("should revert if any source in a multi-source group equals the target", async () => { + // Group has multiple sources, one of which matches the target + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetPubkey: PUBKEYS[1] }, // PUBKEYS[1] is both source and target + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(0); + }); + + it("should allow re-adding batch after removal", async () => { + const REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Remove + await consolidationBus.connect(manager).removeBatches([batchHash]); + + // Batch should be cleared + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + + // Re-add should succeed + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.emit( + consolidationBus, + "RequestsAdded", + ); + }); + + it("should allow re-adding batch after execution", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + // Execute + await consolidationBus.executeConsolidation(buildWitnessGroups([[PUBKEYS[0]]], [PUBKEYS[1]]), { + value: 10, + }); + + // Re-add should succeed + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.emit( + consolidationBus, + "RequestsAdded", + ); + }); + + it("should revert if target pubkey length is not 48 bytes", async () => { + const invalidTargetPubkey = "0x1234"; + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: invalidTargetPubkey }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "InvalidTargetPubkeyLength") + .withArgs(0, 2); + }); + + it("should revert if source pubkey length is not 48 bytes", async () => { + const invalidSourcePubkey = "0x1234"; + const groups = [{ sourcePubkeys: [invalidSourcePubkey], targetPubkey: PUBKEYS[1] }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "InvalidSourcePubkeyLength") + .withArgs(0, 0, 2); + }); + + it("should allow different publishers to add different batches", async () => { + // Register another publisher + const [, , , , publisher2] = await ethers.getSigners(); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher2.address); + + const groups1 = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups1); + await consolidationBus.connect(publisher2).addConsolidationRequests(groups2); + + const batchHash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups1]), + ); + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + expect((await consolidationBus.getBatchInfo(batchHash1)).publisher).to.equal(publisher.address); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(publisher2.address); + }); + }); + + context("view methods", () => { + it("getBatchInfo should return zero values for non-existent batch", async () => { + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + const batchInfo = await consolidationBus.getBatchInfo(fakeBatchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts new file mode 100644 index 0000000000..8cbbb93318 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts @@ -0,0 +1,554 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { addressToWC, advanceChainTime, generateValidator, prepareLocalMerkleTree } from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +const ZERO_ADDRESS = ethers.ZeroAddress; + +// Helper to create a dummy witness (no real CL proof) for tests that don't need proof verification +const dummyWitness = (pubkey: string) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +// Helper functions +const grantConsolidationRequestRole = async ( + consolidationGateway: ConsolidationGateway, + account: HardhatEthersSigner, +) => { + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const grantLimitManagerRole = async (consolidationGateway: ConsolidationGateway, account: HardhatEthersSigner) => { + const role = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const setConsolidationLimit = async ( + consolidationGateway: ConsolidationGateway, + signer: HardhatEthersSigner, + maxRequests: number, + requestsPerFrame: number, + frameDuration: number, +) => { + return consolidationGateway + .connect(signer) + .setConsolidationRequestLimit(maxRequests, requestsPerFrame, frameDuration); +}; + +describe("ConsolidationGateway.sol: addConsolidationRequests", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let dsm: DepositSecurityModule__MockForConsolidationGateway; + let lido: Lido__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + // Pre-built valid witnesses with CL proofs for target validators + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + let validatorPubkeys: string[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + dsm = await ethers.deployContract("DepositSecurityModule__MockForConsolidationGateway"); + lido = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate 3 validators with matching withdrawal credentials + const validators = []; + const validatorIndices: number[] = []; + for (let i = 0; i < 3; i++) { + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + validators.push(validator); + validatorIndices.push(validatorIndex); + } + + // Commit merkle tree to beacon block root + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + // Build valid witnesses for all validators + validWitnesses = []; + validatorPubkeys = []; + for (let i = 0; i < validators.length; i++) { + const proof = await localMerkle.buildProof(validatorIndices[i], beaconBlockHeader); + validWitnesses.push({ + proof, + pubkey: String(validators[i].container.pubkey), + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }); + validatorPubkeys.push(String(validators[i].container.pubkey)); + } + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, // maxConsolidationRequestsLimit + 1, // consolidationsPerFrame + 48, // frameDurationInSec + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + await grantConsolidationRequestRole(consolidationGateway, authorizedEntity); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("authorization", () => { + it("should revert if caller does not have the ADD_CONSOLIDATION_REQUEST_ROLE", async () => { + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + + await expect( + consolidationGateway + .connect(stranger) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, role); + }); + }); + + context("input validation", () => { + it("should revert with ZeroArgument error if msg.value == 0", async () => { + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 0 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ZeroArgument") + .withArgs("msg.value"); + }); + + it("should revert with ZeroArgument error if groups count is zero", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests([], ZERO_ADDRESS, { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ZeroArgument") + .withArgs("groups"); + }); + + it("should revert with EmptyGroup error if a source group is empty", async () => { + // Second group is empty + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }, + { sourcePubkeys: [], targetWitness: dummyWitness(PUBKEYS[2]) }, + ], + ZERO_ADDRESS, + { value: 10 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "EmptyGroup") + .withArgs(1); + }); + + it("should revert with EmptyGroup at first index if first group is empty", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [], targetWitness: dummyWitness(PUBKEYS[1]) }, + { sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[2]) }, + ], + ZERO_ADDRESS, + { value: 10 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "EmptyGroup") + .withArgs(0); + }); + }); + + context("preconditions", () => { + it("should revert with DSMDepositsPaused error if DSM deposits are paused", async () => { + await dsm.mock__setDepositsPaused(true); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "DSMDepositsPaused"); + }); + + it("should revert with LidoDepositsPaused error if Lido deposits are paused", async () => { + await lido.mock__setCanDeposit(false); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "LidoDepositsPaused"); + }); + + it("should not revert when DSM deposits are not paused and Lido deposits are enabled", async () => { + await dsm.mock__setDepositsPaused(false); + await lido.mock__setCanDeposit(true); + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled"); + }); + }); + + context("CL proof verification", () => { + it("should revert with RootNotFound when validator witness beacon root is missing", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { + sourcePubkeys: [PUBKEYS[0]], + targetWitness: { + ...validWitnesses[0], + childBlockTimestamp: validWitnesses[0].childBlockTimestamp + 1, + }, + }, + ], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "RootNotFound"); + }); + + it("should revert with InvalidProof when validator witness proof is malformed", async () => { + // InvalidProof is defined in the SSZ library , not on ConsolidationGateway itself. + // The CLProofVerifier calls SSZ.verifyProof() which reverts with SSZ.InvalidProof(), + // but since the error is on the library, it doesn't appear in ConsolidationGateway's ABI. + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { + sourcePubkeys: [PUBKEYS[0]], + targetWitness: { + ...validWitnesses[0], + proof: [ + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ...validWitnesses[0].proof.slice(1), + ], + }, + }, + ], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError({ interface: ethers.Interface.from(["error InvalidProof()"]) }, "InvalidProof"); + }); + }); + + context("rate limiting", () => { + it("should consume limit when processing requests", async () => { + const dataBefore = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataBefore[4]).to.equal(100); // currentConsolidationRequestsLimit + + // 2 total requests: [source0, source1] -> target0 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 3 }, + ); + + const dataAfter = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataAfter[3]).to.equal(98); // prevConsolidationRequestsLimit + expect(dataAfter[4]).to.equal(98); // currentConsolidationRequestsLimit + + await advanceChainTime(48n); + + const dataRestored = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataRestored[3]).to.equal(98); // prevConsolidationRequestsLimit + expect(dataRestored[4]).to.equal(99); // currentConsolidationRequestsLimit (restored by 1) + }); + + it("should revert if limit doesn't cover requests count", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 2, 1, 48); + + // 3 total requests across groups + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ], + ZERO_ADDRESS, + { value: 4 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded") + .withArgs(3, 2); + }); + + it("should succeed when limit covers all requests and exhaust remaining limit", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 3, 1, 48); + + // 3 total requests: [source0, source1] -> target0, [source2] -> target1 + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ]; + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }); + + const flatSources = [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0], validatorPubkeys[1]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + + // Limit fully consumed — next request should fail + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded") + .withArgs(3, 0); + + // Restore limit after frame advancement + await advanceChainTime(48n * 3n); + + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }), + ) + .to.emit(withdrawalVault, "AddConsolidationRequestsCalled") + .withArgs(flatSources, flatTargets); + }); + }); + + context("fee handling", () => { + it("should revert if total fee is insufficient", async () => { + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 1 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "InsufficientFee") + .withArgs(2, 1); + }); + + it("should use the current consolidation fee for insufficient fee checks", async () => { + await withdrawalVault.mock__setFee(3); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 5 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "InsufficientFee") + .withArgs(6, 5); + }); + + it("should forward the configured fee to withdrawal vault and refund the remainder", async () => { + await withdrawalVault.mock__setFee(4); + + const withdrawalVaultBalanceBefore = await ethers.provider.getBalance(withdrawalVault); + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ], + stranger, + { value: 15 }, + ); + + const withdrawalVaultBalanceAfter = await ethers.provider.getBalance(withdrawalVault); + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + + expect(withdrawalVaultBalanceAfter).to.equal(withdrawalVaultBalanceBefore + 12n); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore + 3n); + }); + + it("should preserve gateway eth balance (no stuck funds)", async () => { + const balanceBefore = await ethers.provider.getBalance(consolidationGateway); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + + const balanceAfter = await ethers.provider.getBalance(consolidationGateway); + expect(balanceAfter).to.equal(balanceBefore); + }); + + it("should refund fee to recipient address", async () => { + const prevBalance = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 1 + 7, + }); + + const newBalance = await ethers.provider.getBalance(stranger); + + expect(newBalance).to.equal(prevBalance + 7n); + }); + + it("should refund fee to sender address when refundRecipient is zero", async () => { + const SENDER_ADDR = authorizedEntity.address; + const prevBalance = await ethers.provider.getBalance(SENDER_ADDR); + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 1 + 7, + }); + + const receipt = await tx.wait(); + const gasUsed = receipt!.gasUsed * receipt!.gasPrice; + + const newBalance = await ethers.provider.getBalance(SENDER_ADDR); + expect(newBalance).to.equal(prevBalance - gasUsed - 1n); + }); + + it("should revert with FeeRefundFailed if refund recipient refuses ETH", async () => { + const refundReverter = await ethers.deployContract("RefundReverter"); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], + await refundReverter.getAddress(), + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "FeeRefundFailed"); + }); + + it("should not make refund if refund is zero", async () => { + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 1, + }); + + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore); + }); + + it("should refund ETH if refund > 0", async () => { + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 5, + }); + + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore + 4n); // 5 - 1 fee = 4 refund + }); + }); + + context("request forwarding", () => { + it("should expand grouped sources to flat source-target pairs", async () => { + // Grouped: [source0, source1] -> target0, i.e. two sources to one target + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 3 }, + ); + + const flatSources = [PUBKEYS[0], PUBKEYS[1]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + }); + + it("should expand multiple groups with multiple sources each", async () => { + // Group 0: [source0, source1] -> target0 (2 pairs) + // Group 1: [source2] -> target1 (1 pair) + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ]; + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }); + + const flatSources = [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0], validatorPubkeys[1]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts new file mode 100644 index 0000000000..620383aba9 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts @@ -0,0 +1,121 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { WithdrawalVault__MockForConsolidationGateway } from "typechain-types"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; + +const DUMMY_GI = "0x0000000000000000000000000000000000000000000000000096000000000028"; + +describe("ConsolidationGateway.sol: deployment", () => { + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + + before(async () => { + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + }); + }); + + it("should deploy successfully with valid admin and verify initial state", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + const adminRole = await gateway.DEFAULT_ADMIN_ROLE(); + expect(await gateway.hasRole(adminRole, admin.address)).to.be.true; + }); + + it("should initialize rate limit config during deployment", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 50, + 5, + 100, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + const data = await gateway.getConsolidationRequestLimitFullInfo(); + expect(data[0]).to.equal(50); // maxConsolidationRequestsLimit + expect(data[1]).to.equal(5); // consolidationsPerFrame + expect(data[2]).to.equal(100); // frameDurationInSec + expect(data[3]).to.equal(50); // prevConsolidationRequestsLimit + expect(data[4]).to.equal(50); // currentConsolidationRequestsLimit + }); + + it("should emit ConsolidationRequestsLimitSet during deployment", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + await expect(gateway.deploymentTransaction()) + .to.emit(gateway, "ConsolidationRequestsLimitSet") + .withArgs(100, 1, 48); + }); + + it("should revert if admin is zero address", async () => { + const locatorAddr = (await deployLidoLocator()).getAddress(); + + await expect( + ethers.deployContract("ConsolidationGateway", [ + ethers.ZeroAddress, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]), + ).to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationGateway"), "AdminCannotBeZero"); + }); + + it("should revert if lidoLocator is zero address", async () => { + const [admin] = await ethers.getSigners(); + + await expect( + ethers.deployContract("ConsolidationGateway", [ + admin.address, + ethers.ZeroAddress, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]), + ) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationGateway"), "ZeroArgument") + .withArgs("lidoLocator"); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts new file mode 100644 index 0000000000..3e3a9c4d5b --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts @@ -0,0 +1,396 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { + addressToWC, + advanceChainTime, + generateValidator, + getCurrentBlockTimestamp, + prepareLocalMerkleTree, +} from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +const dummyWitness = (pubkey: string) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +const ZERO_ADDRESS = ethers.ZeroAddress; + +describe("ConsolidationGateway.sol: pausable", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let dsm: DepositSecurityModule__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let PAUSE_ROLE: string; + let RESUME_ROLE: string; + + // Pre-built valid witnesses with CL proofs for target validators + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + dsm = await ethers.deployContract("DepositSecurityModule__MockForConsolidationGateway"); + const lido: Lido__MockForConsolidationGateway = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate a validator with matching withdrawal credentials + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + + // Commit merkle tree to beacon block root + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + // Build valid witness + const proof = await localMerkle.buildProof(validatorIndex, beaconBlockHeader); + validWitnesses = [ + { + proof, + pubkey: String(validator.container.pubkey), + validatorIndex, + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }, + ]; + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, + 1, + 48, + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, authorizedEntity); + + PAUSE_ROLE = await consolidationGateway.PAUSE_ROLE(); + RESUME_ROLE = await consolidationGateway.RESUME_ROLE(); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("pausable until", () => { + beforeEach(async () => { + // set up necessary roles + await consolidationGateway.connect(admin).grantRole(PAUSE_ROLE, admin); + await consolidationGateway.connect(admin).grantRole(RESUME_ROLE, admin); + }); + + context("resume", () => { + it("should revert if the sender does not have the RESUME_ROLE", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to resume without the RESUME_ROLE + await expect(consolidationGateway.connect(stranger).resume()) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, RESUME_ROLE); + }); + + it("should revert if the contract is not paused", async () => { + // Contract is initially not paused + await expect(consolidationGateway.connect(admin).resume()).to.be.revertedWithCustomError( + consolidationGateway, + "PausedExpected", + ); + }); + + it("should resume the contract when paused and emit Resumed event", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Resume the contract + await expect(consolidationGateway.connect(admin).resume()).to.emit(consolidationGateway, "Resumed"); + + // Verify contract is resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("pauseFor", () => { + it("should revert if the sender does not have the PAUSE_ROLE", async () => { + await expect(consolidationGateway.connect(stranger).pauseFor(1000n)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PAUSE_ROLE); + }); + + it("should revert if the contract is already paused", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to pause again + await expect(consolidationGateway.connect(admin).pauseFor(500n)).to.be.revertedWithCustomError( + consolidationGateway, + "ResumedExpected", + ); + }); + + it("should revert if pause duration is zero", async () => { + await expect(consolidationGateway.connect(admin).pauseFor(0n)).to.be.revertedWithCustomError( + consolidationGateway, + "ZeroPauseDuration", + ); + }); + + it("should pause the contract for the specified duration and emit Paused event", async () => { + await expect(consolidationGateway.connect(admin).pauseFor(1000n)) + .to.emit(consolidationGateway, "Paused") + .withArgs(1000n); + + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should pause the contract indefinitely with PAUSE_INFINITELY", async () => { + const pauseInfinitely = await consolidationGateway.PAUSE_INFINITELY(); + + // Pause the contract indefinitely + await expect(consolidationGateway.connect(admin).pauseFor(pauseInfinitely)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseInfinitely); + + // Verify contract is paused + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time significantly + await advanceChainTime(1_000_000_000n); + + // Contract should still be paused + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should automatically resume after the pause duration passes", async () => { + // Pause the contract for 100 seconds + await consolidationGateway.connect(admin).pauseFor(100n); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Contract should be automatically resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("pauseUntil", () => { + it("should revert if the sender does not have the PAUSE_ROLE", async () => { + const timestamp = await getCurrentBlockTimestamp(); + await expect(consolidationGateway.connect(stranger).pauseUntil(timestamp + 1000n)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PAUSE_ROLE); + }); + + it("should revert if the contract is already paused", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to pause again with pauseUntil + await expect(consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n)).to.be.revertedWithCustomError( + consolidationGateway, + "ResumedExpected", + ); + }); + + it("should revert if timestamp is in the past", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + await expect(consolidationGateway.connect(admin).pauseUntil(timestamp - 1000n)).to.be.revertedWithCustomError( + consolidationGateway, + "PauseUntilMustBeInFuture", + ); + }); + + it("should pause the contract until the specified timestamp and emit Paused event", async () => { + const timestamp = await getCurrentBlockTimestamp(); + const pauseUntil = timestamp + 1000n; + + await expect(consolidationGateway.connect(admin).pauseUntil(pauseUntil)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseUntil - timestamp); + + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should pause the contract indefinitely with PAUSE_INFINITELY", async () => { + const pauseInfinitely = await consolidationGateway.PAUSE_INFINITELY(); + + // Pause the contract indefinitely + await expect(consolidationGateway.connect(admin).pauseUntil(pauseInfinitely)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseInfinitely); + + // Verify contract is paused + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time significantly + await advanceChainTime(1_000_000_000n); + + // Contract should still be paused + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should automatically resume after the pause timestamp passes", async () => { + const timestamp = await getCurrentBlockTimestamp(); + const pauseUntil = timestamp + 100n; + + // Pause the contract until timestamp + 100 + await consolidationGateway.connect(admin).pauseUntil(pauseUntil); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Contract should be automatically resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("Interaction with addConsolidationRequests", () => { + it("pauseFor: should prevent consolidation requests immediately after pausing", async () => { + // Pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Should prevent consolidation requests + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("pauseUntil: should prevent consolidation requests immediately after pausing", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause the contract + await consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n); + + // Should prevent consolidation requests + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("pauseFor: should allow consolidation requests immediately after resuming", async () => { + // Pause and then resume the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + await consolidationGateway.connect(admin).resume(); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseUntil: should allow consolidation requests immediately after resuming", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause and then resume the contract + await consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n); + await consolidationGateway.connect(admin).resume(); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseFor: should allow consolidation requests after pause duration automatically expires", async () => { + // Pause the contract for 100 seconds + await consolidationGateway.connect(admin).pauseFor(100n); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseUntil: should allow consolidation requests after pause duration automatically expires", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause the contract until timestamp + 100 + await consolidationGateway.connect(admin).pauseUntil(timestamp + 100n); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts new file mode 100644 index 0000000000..b6ff43f5e4 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts @@ -0,0 +1,251 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { addressToWC, advanceChainTime, generateValidator, prepareLocalMerkleTree } from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +// Helper functions +const grantLimitManagerRole = async (consolidationGateway: ConsolidationGateway, account: HardhatEthersSigner) => { + const role = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const setConsolidationLimit = async ( + consolidationGateway: ConsolidationGateway, + signer: HardhatEthersSigner, + maxRequests: number, + requestsPerFrame: number, + frameDuration: number, +) => { + return consolidationGateway + .connect(signer) + .setConsolidationRequestLimit(maxRequests, requestsPerFrame, frameDuration); +}; + +const expectLimitData = async ( + consolidationGateway: ConsolidationGateway, + expectedMaxRequests: number, + expectedPerFrame: number, + expectedFrameDuration: number, + expectedPrevLimit: number, + expectedCurrentLimit: number | typeof ethers.MaxUint256, +) => { + const data = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(data[0]).to.equal(expectedMaxRequests); // maxConsolidationRequestsLimit + expect(data[1]).to.equal(expectedPerFrame); // consolidationsPerFrame + expect(data[2]).to.equal(expectedFrameDuration); // frameDurationInSec + expect(data[3]).to.equal(expectedPrevLimit); // prevConsolidationRequestsLimit + expect(data[4]).to.equal(expectedCurrentLimit); // currentConsolidationRequestsLimit +}; + +describe("ConsolidationGateway.sol: rate limit management", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + const dsm: DepositSecurityModule__MockForConsolidationGateway = await ethers.deployContract( + "DepositSecurityModule__MockForConsolidationGateway", + ); + const lido: Lido__MockForConsolidationGateway = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate validators with matching withdrawal credentials + const validators = []; + const validatorIndices: number[] = []; + for (let i = 0; i < 3; i++) { + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + validators.push(validator); + validatorIndices.push(validatorIndex); + } + + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + validWitnesses = []; + for (let i = 0; i < validators.length; i++) { + const proof = await localMerkle.buildProof(validatorIndices[i], beaconBlockHeader); + validWitnesses.push({ + proof, + pubkey: String(validators[i].container.pubkey), + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }); + } + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, // maxConsolidationRequestsLimit + 1, // consolidationsPerFrame + 48, // frameDurationInSec + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, authorizedEntity); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("setConsolidationRequestLimit", () => { + it("should revert without EXIT_LIMIT_MANAGER_ROLE", async () => { + const limitManagerRole = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + + await expect(consolidationGateway.connect(stranger).setConsolidationRequestLimit(4, 1, 48)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(await stranger.getAddress(), limitManagerRole); + }); + + it("should set consolidation limit and emit event", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + const limitTx = await setConsolidationLimit(consolidationGateway, authorizedEntity, 4, 1, 48); + await expect(limitTx).to.emit(consolidationGateway, "ConsolidationRequestsLimitSet").withArgs(4, 1, 48); + }); + + it("should revert if consolidationsPerFrame bigger than maxConsolidationRequestsLimit", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await expect( + setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 1, 48), + ).to.be.revertedWithCustomError(consolidationGateway, "TooLargeItemsPerFrame"); + }); + + it("should update limit config values", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await setConsolidationLimit(consolidationGateway, authorizedEntity, 50, 5, 100); + + await expectLimitData(consolidationGateway, 50, 5, 100, 50, 50); + }); + + it("should allow decreasing limit mid-usage", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + // Consume some limit + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 2 }, + ); + + // Decrease limit — should succeed + await setConsolidationLimit(consolidationGateway, authorizedEntity, 10, 1, 48); + await expectLimitData(consolidationGateway, 10, 1, 48, 10, 10); + }); + }); + + context("getConsolidationRequestLimitFullInfo", () => { + it("should return initial limit data", async () => { + await expectLimitData(consolidationGateway, 100, 1, 48, 100, 100); + }); + + it("should reflect limit consumption after requests", async () => { + // 2 total requests: [source0, source1] -> target0 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 3 }, + ); + + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 98); + }); + + it("should restore limit after frame advancement", async () => { + // Consume 2 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 3 }, + ); + + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 98); + + // Advance one frame → restores 1 + await advanceChainTime(48n); + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 99); + + // Advance another frame → restores another 1 + await advanceChainTime(48n); + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 100); + }); + + it("should return currentConsolidationRequestsLimit as MaxUint256 when limit is 0 (unlimited)", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 0, 48); + + await expectLimitData(consolidationGateway, 0, 0, 48, 0, ethers.MaxUint256); + }); + + it("should allow unlimited consolidation requests when limit is 0", async () => { + // Default limit is 100, but limit 0 means unlimited — deploy with 0 + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 0, 48); + + // 3 total requests grouped into pairs + const groups = Array(3) + .fill(0) + .map((_, i) => ({ sourcePubkeys: [PUBKEYS[i % 3]], targetWitness: validWitnesses[i % 3] })); + + // Should not revert even with many requests when limit is 0 (unlimited) + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ethers.ZeroAddress, { value: 10 }); + }); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts new file mode 100644 index 0000000000..1d7a055b7b --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts @@ -0,0 +1,276 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + ConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +describe("ConsolidationMigrator.sol: allowlist", () => { + let consolidationMigrator: ConsolidationMigrator; + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + let admin: HardhatEthersSigner; + let allowPairManager: HardhatEthersSigner; + let disallowPairManager: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let ALLOW_PAIR_ROLE: string; + let DISALLOW_PAIR_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, allowPairManager, disallowPairManager, submitter, stranger] = await ethers.getSigners(); + + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + + const impl = await ethers.deployContract("ConsolidationMigrator", [ + await stakingRouter.getAddress(), + await consolidationBus.getAddress(), + 1, // sourceModuleId + 2, // targetModuleId + ]); + [consolidationMigrator] = await proxify({ impl, admin }); + await consolidationMigrator.initialize(admin.address); + + ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + + // Grant roles + await consolidationMigrator.connect(admin).grantRole(ALLOW_PAIR_ROLE, allowPairManager.address); + await consolidationMigrator.connect(admin).grantRole(DISALLOW_PAIR_ROLE, disallowPairManager.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("allowPair", () => { + it("should allow a pair with submitter", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + await expect(consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address)) + .to.emit(consolidationMigrator, "ConsolidationPairAllowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + }); + + it("should revert if caller does not have ALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(stranger).allowPair(1, 10, submitter.address)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, ALLOW_PAIR_ROLE); + }); + + it("should revert if submitter is zero address", async () => { + await expect(consolidationMigrator.connect(allowPairManager).allowPair(1, 10, ethers.ZeroAddress)) + .to.be.revertedWithCustomError(consolidationMigrator, "ZeroArgument") + .withArgs("submitter"); + }); + + it("should allow updating submitter for existing pair (idempotent)", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + // First allow with submitter + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + // Update submitter to stranger + await expect(consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, stranger.address)) + .to.emit(consolidationMigrator, "ConsolidationPairAllowed") + .withArgs(sourceOpId, targetOpId, stranger.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(stranger.address); + }); + + it("should allow multiple targets for same source with different submitters", async () => { + const sourceOpId = 1; + const targetOpId1 = 10; + const targetOpId2 = 20; + const targetOpId3 = 30; + + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId1, submitter.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId2, stranger.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId3, admin.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId1)).to.be.true; + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId2)).to.be.true; + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId3)).to.be.true; + + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId1)).to.equal(submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId2)).to.equal(stranger.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId3)).to.equal(admin.address); + + const targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(3); + expect(targets).to.include(BigInt(targetOpId1)); + expect(targets).to.include(BigInt(targetOpId2)); + expect(targets).to.include(BigInt(targetOpId3)); + }); + }); + + context("disallowPair", () => { + beforeEach(async () => { + await consolidationMigrator.connect(allowPairManager).allowPair(1, 10, submitter.address); + }); + + it("should disallow a pair and clear submitter", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + // Verify submitter is set before disallow + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + await expect(consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, targetOpId)) + .to.emit(consolidationMigrator, "ConsolidationPairDisallowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.false; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(ethers.ZeroAddress); + }); + + it("should revert if caller does not have DISALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(stranger).disallowPair(1, 10)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, DISALLOW_PAIR_ROLE); + }); + + it("should revert if caller has ALLOW_PAIR_ROLE but not DISALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(allowPairManager).disallowPair(1, 10)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(allowPairManager.address, DISALLOW_PAIR_ROLE); + }); + + it("should revert if pair not in allowlist", async () => { + const sourceOpId = 999; + const targetOpId = 888; + + await expect(consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "PairNotInAllowlist") + .withArgs(sourceOpId, targetOpId); + }); + }); + + context("selfDisallowPair", () => { + const sourceOpId = 1; + const targetOpId = 10; + + beforeEach(async () => { + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address); + }); + + it("should allow submitter to self-disallow their pair", async () => { + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)) + .to.emit(consolidationMigrator, "ConsolidationPairDisallowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.false; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(ethers.ZeroAddress); + }); + + it("should revert if caller is not the submitter", async () => { + await expect(consolidationMigrator.connect(stranger).selfDisallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, sourceOpId, targetOpId); + }); + + it("should revert if pair does not exist", async () => { + const unknownSourceOpId = 999; + const unknownTargetOpId = 888; + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(unknownSourceOpId, unknownTargetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, unknownSourceOpId, unknownTargetOpId); + }); + + it("should remove pair from getAllowedTargets", async () => { + // Add another pair + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 20, submitter.address); + + let targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(2); + + await consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId); + + targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(1); + expect(targets[0]).to.be.equal(20n); + }); + + it("should revert if called twice for the same pair", async () => { + await consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId); + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOpId, targetOpId); + }); + + it("should not require any role", async () => { + // submitter has no roles granted, but is the designated submitter for the pair + expect(await consolidationMigrator.hasRole(ALLOW_PAIR_ROLE, submitter.address)).to.be.false; + expect(await consolidationMigrator.hasRole(DISALLOW_PAIR_ROLE, submitter.address)).to.be.false; + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)).to.emit( + consolidationMigrator, + "ConsolidationPairDisallowed", + ); + }); + }); + + context("view methods", () => { + it("isPairAllowed should return false for non-existent pair", async () => { + expect(await consolidationMigrator.isPairAllowed(999, 888)).to.be.false; + }); + + it("getAllowedTargets should return empty array for new source", async () => { + const targets = await consolidationMigrator.getAllowedTargets(999); + expect(targets.length).to.equal(0); + }); + + it("getSubmitter should return zero address for non-existent pair", async () => { + expect(await consolidationMigrator.getSubmitter(999, 888)).to.equal(ethers.ZeroAddress); + }); + + it("getAllowedTargets should return correct list after adding and removing", async () => { + const sourceOpId = 1; + + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 10, submitter.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 20, stranger.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 30, admin.address); + + let targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(3); + + await consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, 20); + + targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(2); + expect(targets).to.include(BigInt(10)); + expect(targets).to.include(BigInt(30)); + expect(targets).to.not.include(BigInt(20)); + + // Verify submitter was cleared for removed pair + expect(await consolidationMigrator.getSubmitter(sourceOpId, 20)).to.equal(ethers.ZeroAddress); + // Verify remaining submitters are intact + expect(await consolidationMigrator.getSubmitter(sourceOpId, 10)).to.equal(submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, 30)).to.equal(admin.address); + }); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts new file mode 100644 index 0000000000..7bed7404bb --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts @@ -0,0 +1,93 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +describe("ConsolidationMigrator.sol: deployment", () => { + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + + before(async () => { + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + }); + + it("should deploy and initialize successfully with valid parameters", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + await migrator.initialize(admin.address); + + const adminRole = await migrator.DEFAULT_ADMIN_ROLE(); + expect(await migrator.hasRole(adminRole, admin.address)).to.be.true; + expect(await migrator.getStakingRouter()).to.equal(stakingRouterAddr); + expect(await migrator.getConsolidationBus()).to.equal(consolidationBusAddr); + expect(await migrator.sourceModuleId()).to.equal(1); + expect(await migrator.targetModuleId()).to.equal(2); + }); + + it("should revert if admin is zero address on initialize", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + + await expect(migrator.initialize(ethers.ZeroAddress)).to.be.revertedWithCustomError(migrator, "AdminCannotBeZero"); + }); + + it("should revert if stakingRouter is zero address", async () => { + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [ethers.ZeroAddress, consolidationBusAddr, 1, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("stakingRouter"); + }); + + it("should revert if consolidationBus is zero address", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, ethers.ZeroAddress, 1, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("consolidationBus"); + }); + + it("should revert if sourceModuleId is zero", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 0, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("sourceModuleId"); + }); + + it("should revert if targetModuleId is zero", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 0])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("targetModuleId"); + }); + + it("should revert on double initialization", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + await migrator.initialize(admin.address); + + await expect(migrator.initialize(admin.address)).to.be.revertedWithCustomError(migrator, "InvalidInitialization"); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts new file mode 100644 index 0000000000..8ba422ccaa --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts @@ -0,0 +1,282 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + ConsolidationMigrator, + SourceModule__MockForConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, + TargetModule__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationMigrator.sol: submit", () => { + let consolidationMigrator: ConsolidationMigrator; + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let sourceModule: SourceModule__MockForConsolidationMigrator; + let targetModule: TargetModule__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + let admin: HardhatEthersSigner; + let allowPairManager: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + const SOURCE_MODULE_ID = 1; + const TARGET_MODULE_ID = 2; + const SOURCE_OPERATOR_ID = 100; + const TARGET_OPERATOR_ID = 200; + + let originalState: string; + + before(async () => { + [admin, allowPairManager, submitter, stranger] = await ethers.getSigners(); + + // Deploy mocks + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + sourceModule = await ethers.deployContract("SourceModule__MockForConsolidationMigrator"); + targetModule = await ethers.deployContract("TargetModule__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + + // Set up staking router to return module addresses + await stakingRouter.mock__setStakingModule(SOURCE_MODULE_ID, await sourceModule.getAddress()); + await stakingRouter.mock__setStakingModule(TARGET_MODULE_ID, await targetModule.getAddress()); + + // Deploy ConsolidationMigrator + const impl = await ethers.deployContract("ConsolidationMigrator", [ + await stakingRouter.getAddress(), + await consolidationBus.getAddress(), + SOURCE_MODULE_ID, + TARGET_MODULE_ID, + ]); + [consolidationMigrator] = await proxify({ impl, admin }); + await consolidationMigrator.initialize(admin.address); + + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(admin).grantRole(ALLOW_PAIR_ROLE, allowPairManager.address); + await consolidationMigrator.connect(admin).grantRole(DISALLOW_PAIR_ROLE, allowPairManager.address); + + // Allow the test pair with submitter + await consolidationMigrator + .connect(allowPairManager) + .allowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, submitter.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("submitConsolidationBatch", () => { + beforeEach(async () => { + // Set up source module with deposited keys (totalDeposited=2) + await sourceModule.mock__setOperatorData(SOURCE_OPERATOR_ID, 2, [PUBKEYS[0], PUBKEYS[1]]); + + // Set up target module with deposited keys (totalDeposited=2) + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 2, [PUBKEYS[2], PUBKEYS[3]]); + }); + + it("should submit consolidation batch from designated submitter", async () => { + const groups = [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + { sourceKeyIndices: [1], targetKeyIndex: 1 }, + ]; + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups), + ) + .to.emit(consolidationMigrator, "ConsolidationSubmitted") + .withArgs( + SOURCE_OPERATOR_ID, + TARGET_OPERATOR_ID, + groups.map((g) => [g.sourceKeyIndices, g.targetKeyIndex]), + ); + + // Verify ConsolidationBus was called + expect(await consolidationBus.callCount()).to.equal(1); + expect(await consolidationBus.lastCaller()).to.equal(await consolidationMigrator.getAddress()); + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + }); + + it("should forward correct pubkeys to ConsolidationBus", async () => { + const groups = [{ sourceKeyIndices: [0], targetKeyIndex: 0 }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + // Verify the pubkeys + const sourcePubkey = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const targetPubkey = await consolidationBus.getLastTargetPubkey(0); + + expect(sourcePubkey.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(targetPubkey.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + }); + + it("should revert if caller is not the designated submitter", async () => { + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + }); + + it("should revert if pair is not allowed (no submitter set)", async () => { + const unknownTargetOpId = 999; + + // When pair is not allowed, there's no submitter set (address(0)) + // So caller will fail authorization check first + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, unknownTargetOpId, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, unknownTargetOpId); + }); + + it("should revert if source key is not deposited", async () => { + // Key at index 2 exists but is not deposited (totalDeposited=2, 3 keys total) + await sourceModule.mock__setOperatorData(SOURCE_OPERATOR_ID, 2, [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]); + // Add more target keys and make index 2 deposited + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 3, [PUBKEYS[2], PUBKEYS[3], PUBKEYS[0]]); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [2], targetKeyIndex: 2 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(SOURCE_MODULE_ID, SOURCE_OPERATOR_ID, 2); + }); + + it("should revert if target key is not deposited", async () => { + // totalDepositedValidators = 1, so key at index 0 is deposited, but index 1 is NOT + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 1, [PUBKEYS[2], PUBKEYS[3]]); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 1 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(TARGET_MODULE_ID, TARGET_OPERATOR_ID, 1); + }); + + it("should emit ConsolidationBus event", async () => { + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ).to.emit(consolidationBus, "AddConsolidationRequestsCalled"); + }); + + it("should handle multiple validators in a batch", async () => { + const groups = [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + { sourceKeyIndices: [1], targetKeyIndex: 1 }, + ]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + + const sourcePubkey0 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const sourcePubkey1 = await consolidationBus.getLastSourcePubkeyFromGroup(1, 0); + const targetPubkey0 = await consolidationBus.getLastTargetPubkey(0); + const targetPubkey1 = await consolidationBus.getLastTargetPubkey(1); + + expect(sourcePubkey0.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(sourcePubkey1.toLowerCase()).to.equal(PUBKEYS[1].toLowerCase()); + expect(targetPubkey0.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + expect(targetPubkey1.toLowerCase()).to.equal(PUBKEYS[3].toLowerCase()); + }); + + it("should handle multi-source group consolidation (multiple sources to one target)", async () => { + // Two source keys consolidated to one target + const groups = [{ sourceKeyIndices: [0, 1], targetKeyIndex: 0 }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + // Should produce 2 pairs in 1 group + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + expect(await consolidationBus.getLastGroupsCount()).to.equal(1); + expect(await consolidationBus.getLastGroupSize(0)).to.equal(2); + + const sourcePubkey0 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const sourcePubkey1 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 1); + const targetPubkey = await consolidationBus.getLastTargetPubkey(0); + + expect(sourcePubkey0.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(sourcePubkey1.toLowerCase()).to.equal(PUBKEYS[1].toLowerCase()); + expect(targetPubkey.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + }); + + it("should allow new submitter to submit after allowPair update", async () => { + // Update the pair with a new submitter (stranger) + await consolidationMigrator + .connect(allowPairManager) + .allowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, stranger.address); + + // Old submitter should now fail + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + + // New submitter should succeed + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ).to.emit(consolidationMigrator, "ConsolidationSubmitted"); + }); + + it("should revert after pair is disallowed", async () => { + // Disallow the pair + await consolidationMigrator.connect(allowPairManager).disallowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + + // Submitter should no longer be able to submit + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + }); + }); +}); diff --git a/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..dae1c7a90f --- /dev/null +++ b/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for ConsolidationBus for ConsolidationMigrator tests + */ +contract ConsolidationBus__MockForConsolidationMigrator { + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + event AddConsolidationRequestsCalled(uint256 groupsCount, address caller); + + ConsolidationGroup[] internal _lastGroups; + address public lastCaller; + uint256 public callCount; + + bool internal _shouldRevert; + string internal _revertReason; + + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external { + if (_shouldRevert) { + revert(_revertReason); + } + + delete _lastGroups; + + for (uint256 i = 0; i < groups.length; ++i) { + _lastGroups.push(); + _lastGroups[i].targetPubkey = groups[i].targetPubkey; + for (uint256 j = 0; j < groups[i].sourcePubkeys.length; ++j) { + _lastGroups[i].sourcePubkeys.push(groups[i].sourcePubkeys[j]); + } + } + lastCaller = msg.sender; + callCount++; + + emit AddConsolidationRequestsCalled(groups.length, msg.sender); + } + + function mock__setRevert(bool shouldRevert, string calldata reason) external { + _shouldRevert = shouldRevert; + _revertReason = reason; + } + + function getLastSourcePubkeyFromGroup(uint256 groupIndex, uint256 keyIndex) external view returns (bytes memory) { + return _lastGroups[groupIndex].sourcePubkeys[keyIndex]; + } + + function getLastTargetPubkey(uint256 index) external view returns (bytes memory) { + return _lastGroups[index].targetPubkey; + } + + function getLastGroupsCount() external view returns (uint256) { + return _lastGroups.length; + } + + function getLastGroupSize(uint256 groupIndex) external view returns (uint256) { + return _lastGroups[groupIndex].sourcePubkeys.length; + } + + function getLastTotalPairsCount() external view returns (uint256) { + uint256 total = 0; + for (uint256 i = 0; i < _lastGroups.length; ++i) { + total += _lastGroups[i].sourcePubkeys.length; + } + return total; + } +} diff --git a/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol b/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol new file mode 100644 index 0000000000..49d2def147 --- /dev/null +++ b/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IConsolidationGateway { + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable; +} + +contract ConsolidationGateway__MockForConsolidationBus { + event AddConsolidationRequestsCalled(uint256 groupsCount, address refundRecipient, uint256 value); + + uint256 internal _fee; + bool internal _shouldRevert; + string internal _revertReason; + + constructor() { + _fee = 1; + } + + function addConsolidationRequests( + IConsolidationGateway.ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable { + if (_shouldRevert) { + revert(_revertReason); + } + + emit AddConsolidationRequestsCalled(groups.length, refundRecipient, msg.value); + + // Count total requests and simulate refund if excess ETH was sent + uint256 totalRequests = 0; + for (uint256 i = 0; i < groups.length; ++i) { + totalRequests += groups[i].sourcePubkeys.length; + } + uint256 totalFee = totalRequests * _fee; + if (msg.value > totalFee) { + (bool success, ) = refundRecipient.call{value: msg.value - totalFee}(""); + require(success, "Refund failed"); + } + } + + function mock__setFee(uint256 fee) external { + _fee = fee; + } + + function mock__setRevert(bool shouldRevert, string calldata reason) external { + _shouldRevert = shouldRevert; + _revertReason = reason; + } + + function mock__getFee() external view returns (uint256) { + return _fee; + } +} diff --git a/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol b/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol new file mode 100644 index 0000000000..1c77fc0975 --- /dev/null +++ b/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity ^0.8.25; + +interface IStakingRouter { + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external payable; + function getStakingModuleMaxDepositsCount( + uint256 _stakingModuleId, + uint256 _depositableEth + ) external view returns (uint256); +} + +/// @notice Test-only wrapper that must be set as the authorized Lido caller in the router. +contract DepositCallerWrapper__MockForStakingRouter { + IStakingRouter public immutable stakingRouter; + + constructor(IStakingRouter _router) { + stakingRouter = _router; + } + + /// @notice Store temp values as operators and number of deposits per operator + deposit + /// No refund logic; requires exact msg.value. + function deposit(uint256 stakingModuleId) external payable { + stakingRouter.deposit{value: msg.value}(stakingModuleId, bytes("")); + } +} diff --git a/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol b/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..6a36208138 --- /dev/null +++ b/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract DepositSecurityModule__MockForConsolidationGateway { + bool public isDepositsPaused; + + function mock__setDepositsPaused(bool _paused) external { + isDepositsPaused = _paused; + } +} diff --git a/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol b/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..2b898c487d --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract Lido__MockForConsolidationGateway { + bool public canDepositFlag = true; + + function mock__setCanDeposit(bool _value) external { + canDepositFlag = _value; + } + + function canDeposit() external view returns (bool) { + return canDepositFlag; + } +} diff --git a/test/0.8.25/contracts/Lido__MockForStakingRouter.sol b/test/0.8.25/contracts/Lido__MockForStakingRouter.sol new file mode 100644 index 0000000000..7f3b95e29f --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForStakingRouter.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +interface IStakingRouter { + function receiveDepositableEther() external payable; +} + +contract Lido__MockForStakingRouter { + uint256 internal depositableEther__mocked; + address public stakingRouter; + + event WithdrawDepositableEtherCalled(uint256 amount, uint256 depositsCount); + + constructor() payable {} + + receive() external payable {} + + function setStakingRouter(address _stakingRouter) external { + stakingRouter = _stakingRouter; + } + + function setDepositableEther(uint256 _depositableEther) external { + depositableEther__mocked = _depositableEther; + } + + function getDepositableEther() external view returns (uint256) { + return depositableEther__mocked; + } + + function withdrawDepositableEther(uint256 _amount, uint256 _depositsCount) external { + require(msg.sender == stakingRouter, "ONLY_STAKING_ROUTER"); + require(_amount <= depositableEther__mocked, "NOT_ENOUGH_ETHER"); + + depositableEther__mocked -= _amount; + + emit WithdrawDepositableEtherCalled(_amount, _depositsCount); + + // Send ETH to staking router via receiveDepositableEther + IStakingRouter(stakingRouter).receiveDepositableEther{value: _amount}(); + } + + // Utility to fund the mock with ETH + function fund() external payable {} +} diff --git a/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol b/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol new file mode 100644 index 0000000000..2ce776a5fa --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +contract Lido__MockForTopUpGateway { + bool public canDepositFlag = true; + + function setCanDeposit(bool value) external { + canDepositFlag = value; + } + + function canDeposit() external view returns (bool) { + return canDepositFlag; + } +} diff --git a/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..a13dca1151 --- /dev/null +++ b/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for source staking module for ConsolidationMigrator tests. + * Implements the IStakingModule interface (getSigningKeys + getNodeOperatorSummary). + */ +contract SourceModule__MockForConsolidationMigrator { + uint256 public constant PUBKEY_LENGTH = 48; + uint256 public constant SIGNATURE_LENGTH = 96; + + struct NodeOperatorData { + uint256 totalDepositedValidators; + bytes[] pubkeys; + } + + // operatorId => data + mapping(uint256 => NodeOperatorData) internal _operators; + + function mock__setOperatorData( + uint256 operatorId, + uint256 totalDepositedValidators, + bytes[] calldata pubkeys + ) external { + _operators[operatorId].totalDepositedValidators = totalDepositedValidators; + delete _operators[operatorId].pubkeys; + for (uint256 i = 0; i < pubkeys.length; ++i) { + _operators[operatorId].pubkeys.push(pubkeys[i]); + } + } + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + totalDepositedValidators = op.totalDepositedValidators; + return (0, 0, 0, 0, 0, 0, totalDepositedValidators, 0); + } + + // NOR interface + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys, bytes memory signatures, bool[] memory used) { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + + pubkeys = new bytes(_limit * PUBKEY_LENGTH); + signatures = new bytes(_limit * SIGNATURE_LENGTH); + used = new bool[](_limit); + + for (uint256 i = 0; i < _limit; ++i) { + uint256 keyIndex = _offset + i; + if (keyIndex < op.pubkeys.length) { + bytes storage key = op.pubkeys[keyIndex]; + for (uint256 j = 0; j < PUBKEY_LENGTH; ++j) { + pubkeys[i * PUBKEY_LENGTH + j] = key[j]; + } + used[i] = keyIndex < op.totalDepositedValidators; + } + } + + return (pubkeys, signatures, used); + } +} diff --git a/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol b/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol new file mode 100644 index 0000000000..0f1654dbcf --- /dev/null +++ b/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; + +contract StakingModuleV2__MockForStakingRouter is IStakingModule, IStakingModuleV2 { + event Mock__TargetValidatorsLimitsUpdated(uint256 _nodeOperatorId, uint256 _targetLimitMode, uint256 _targetLimit); + event Mock__RefundedValidatorsCountUpdated(uint256 _nodeOperatorId, uint256 _refundedValidatorsCount); + event Mock__OnRewardsMinted(uint256 _totalShares); + event Mock__ExitedValidatorsCountUpdated(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + event Mock__reportValidatorExitDelay( + uint256 nodeOperatorId, + uint256 proofSlotTimestamp, + bytes publicKeys, + uint256 eligibleToExitInSec + ); + + event Mock__onValidatorExitTriggered( + uint256 _nodeOperatorId, + bytes publicKeys, + uint256 withdrawalRequestPaidFee, + uint256 exitType + ); + + function getType() external view returns (bytes32) { + return keccak256(abi.encodePacked("staking.module")); + } + + uint256 private totalExitedValidators__mocked; + uint256 private totalDepositedValidators__mocked; + uint256 private depositableValidatorsCount__mocked; + + function getStakingModuleSummary() + external + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount) + { + totalExitedValidators = totalExitedValidators__mocked; + totalDepositedValidators = totalDepositedValidators__mocked; + depositableValidatorsCount = depositableValidatorsCount__mocked; + } + + function mock__getStakingModuleSummary( + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) external { + totalExitedValidators__mocked = totalExitedValidators; + totalDepositedValidators__mocked = totalDepositedValidators; + depositableValidatorsCount__mocked = depositableValidatorsCount; + } + + uint256 private nodeOperatorTargetLimitMode__mocked; + uint256 private nodeOperatorTargetValidatorsCount__mocked; + uint256 private nodeOperatorStuckValidatorsCount__mocked; + uint256 private nodeOperatorRefundedValidatorsCount__mocked; + uint256 private nodeOperatorStuckPenaltyEndTimestamp__mocked; + uint256 private nodeOperatorNodeOperatorTotalExitedValidators__mocked; + uint256 private nodeOperatorNodeOperatorTotalDepositedValidators__mocked; + uint256 private nodeOperatorNodeOperatorDepositableValidatorsCount__mocked; + + function getNodeOperatorSummary( + uint256 + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + targetLimitMode = nodeOperatorTargetLimitMode__mocked; + targetValidatorsCount = nodeOperatorTargetValidatorsCount__mocked; + stuckValidatorsCount = nodeOperatorStuckValidatorsCount__mocked; + refundedValidatorsCount = nodeOperatorRefundedValidatorsCount__mocked; + stuckPenaltyEndTimestamp = nodeOperatorStuckPenaltyEndTimestamp__mocked; + totalExitedValidators = nodeOperatorNodeOperatorTotalExitedValidators__mocked; + totalDepositedValidators = nodeOperatorNodeOperatorTotalDepositedValidators__mocked; + depositableValidatorsCount = nodeOperatorNodeOperatorDepositableValidatorsCount__mocked; + } + + function mock__getNodeOperatorSummary( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) external { + nodeOperatorTargetLimitMode__mocked = targetLimitMode; + nodeOperatorTargetValidatorsCount__mocked = targetValidatorsCount; + nodeOperatorStuckValidatorsCount__mocked = stuckValidatorsCount; + nodeOperatorRefundedValidatorsCount__mocked = refundedValidatorsCount; + nodeOperatorStuckPenaltyEndTimestamp__mocked = stuckPenaltyEndTimestamp; + nodeOperatorNodeOperatorTotalExitedValidators__mocked = totalExitedValidators; + nodeOperatorNodeOperatorTotalDepositedValidators__mocked = totalDepositedValidators; + nodeOperatorNodeOperatorDepositableValidatorsCount__mocked = depositableValidatorsCount; + } + + uint256 private nonce; + + function getNonce() external view returns (uint256) { + return nonce; + } + + function mock__getNonce(uint256 newNonce) external { + nonce = newNonce; + } + + uint256 private nodeOperatorsCount__mocked; + uint256 private activeNodeOperatorsCount__mocked; + + function getNodeOperatorsCount() external view returns (uint256) { + return nodeOperatorsCount__mocked; + } + + function getActiveNodeOperatorsCount() external view returns (uint256) { + return activeNodeOperatorsCount__mocked; + } + + function mock__nodeOperatorsCount(uint256 total, uint256 active) external { + nodeOperatorsCount__mocked = total; + activeNodeOperatorsCount__mocked = active; + } + + function getNodeOperatorIsActive(uint256) external view returns (bool) { + return true; + } + + uint256[] private nodeOperatorsIds__mocked; + + function getNodeOperatorIds(uint256, uint256) external view returns (uint256[] memory nodeOperatorIds) { + return nodeOperatorsIds__mocked; + } + + function mock__getNodeOperatorIds(uint256[] calldata nodeOperatorsIds) external { + nodeOperatorsIds__mocked = nodeOperatorsIds; + } + + bool private onRewardsMintedShouldRevert = false; + bool private onRewardsMintedShouldRunOutGas = false; + + function onRewardsMinted(uint256 _totalShares) external { + require(!onRewardsMintedShouldRevert, "revert reason"); + + if (onRewardsMintedShouldRunOutGas) { + revert(); + } + + emit Mock__OnRewardsMinted(_totalShares); + } + + function mock__revertOnRewardsMinted(bool shouldRevert, bool shouldRunOutOfGas) external { + onRewardsMintedShouldRevert = shouldRevert; + onRewardsMintedShouldRunOutGas = shouldRunOutOfGas; + } + + event Mock__VettedSigningKeysCountDecreased(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + function decreaseVettedSigningKeysCount( + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external { + emit Mock__VettedSigningKeysCountDecreased(_nodeOperatorIds, _vettedSigningKeysCounts); + } + + event Mock__StuckValidatorsCountUpdated(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + function updateStuckValidatorsCount( + bytes calldata _nodeOperatorIds, + bytes calldata _stuckValidatorsCounts + ) external { + emit Mock__StuckValidatorsCountUpdated(_nodeOperatorIds, _stuckValidatorsCounts); + } + + function updateExitedValidatorsCount( + bytes calldata _nodeOperatorIds, + bytes calldata _stuckValidatorsCounts + ) external { + emit Mock__ExitedValidatorsCountUpdated(_nodeOperatorIds, _stuckValidatorsCounts); + } + + function updateTargetValidatorsLimits( + uint256 _nodeOperatorId, + uint256 _targetLimitMode, + uint256 _targetLimit + ) external { + emit Mock__TargetValidatorsLimitsUpdated(_nodeOperatorId, _targetLimitMode, _targetLimit); + } + + event Mock__ValidatorsCountUnsafelyUpdated(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount); + + function unsafeUpdateValidatorsCount(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount) external { + emit Mock__ValidatorsCountUnsafelyUpdated(_nodeOperatorId, _exitedValidatorsCount); + } + + function obtainDepositData( + uint256 _depositsCount, + bytes calldata + ) external returns (bytes memory publicKeys, bytes memory signatures) { + publicKeys = new bytes(48 * _depositsCount); + signatures = new bytes(96 * _depositsCount); + } + + // --- Top-up mock data --- + + bytes[] private topUpPubkeys__mocked; + uint256[] private topUpAmounts__mocked; + bool private useCustomTopUpData__mocked; + bool private shouldRevert__mocked; + + function mock__setShouldRevert(bool shouldRevert) external { + shouldRevert__mocked = shouldRevert; + } + + function mock__setTopUpDepositData(uint256[] calldata amounts) external { + delete topUpAmounts__mocked; + + for (uint256 i = 0; i < amounts.length; ++i) { + topUpAmounts__mocked.push(amounts[i]); + } + + useCustomTopUpData__mocked = true; + } + + function mock__clearTopUpDepositData() external { + delete topUpAmounts__mocked; + useCustomTopUpData__mocked = false; + } + + // *** TOP-UP (used by topUp()) *** + function allocateDeposits( + uint256, + bytes[] calldata, + uint256[] calldata, + uint256[] calldata, + uint256[] calldata _topUpLimits + ) external returns (uint256[] memory topUpAmounts) { + require(!shouldRevert__mocked, "Mock: revert requested"); + + if (useCustomTopUpData__mocked) { + return topUpAmounts__mocked; + } + + return _topUpLimits; + } + + event Mock__onExitedAndStuckValidatorsCountsUpdated(); + + bool private onExitedAndStuckValidatorsCountsUpdatedShouldRevert = false; + bool private onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas = false; + + function onExitedAndStuckValidatorsCountsUpdated() external { + require(!onExitedAndStuckValidatorsCountsUpdatedShouldRevert, "revert reason"); + + if (onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas) { + revert(); + } + + emit Mock__onExitedAndStuckValidatorsCountsUpdated(); + } + + function mock__onExitedAndStuckValidatorsCountsUpdated(bool shouldRevert, bool shouldRunOutGas) external { + onExitedAndStuckValidatorsCountsUpdatedShouldRevert = shouldRevert; + onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas = shouldRunOutGas; + } + + event Mock__WithdrawalCredentialsChanged(); + + bool private onWithdrawalCredentialsChangedShouldRevert = false; + bool private onWithdrawalCredentialsChangedShouldRunOutGas = false; + + function onWithdrawalCredentialsChanged() external { + require(!onWithdrawalCredentialsChangedShouldRevert, "revert reason"); + + if (onWithdrawalCredentialsChangedShouldRunOutGas) { + revert(); + } + + emit Mock__WithdrawalCredentialsChanged(); + } + + function mock__onWithdrawalCredentialsChanged(bool shouldRevert, bool shouldRunOutGas) external { + onWithdrawalCredentialsChangedShouldRevert = shouldRevert; + onWithdrawalCredentialsChangedShouldRunOutGas = shouldRunOutGas; + } + + bool private shouldBePenalized__mocked; + + function reportValidatorExitDelay( + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKeys, + uint256 _eligibleToExitInSec + ) external { + emit Mock__reportValidatorExitDelay(_nodeOperatorId, _proofSlotTimestamp, _publicKeys, _eligibleToExitInSec); + } + + function onValidatorExitTriggered( + uint256 _nodeOperatorId, + bytes calldata _publicKeys, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) external { + emit Mock__onValidatorExitTriggered(_nodeOperatorId, _publicKeys, _withdrawalRequestPaidFee, _exitType); + } + + function isValidatorExitDelayPenaltyApplicable( + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) external view returns (bool) { + return shouldBePenalized__mocked; + } + + function mock__isValidatorExitDelayPenaltyApplicable(bool _shouldBePenalized) external { + shouldBePenalized__mocked = _shouldBePenalized; + } + + uint256 private exitDeadlineThreshold__mocked; + + function exitDeadlineThreshold(uint256 _nodeOperatorId) external view returns (uint256) { + return exitDeadlineThreshold__mocked; + } + + function mock__exitDeadlineThreshold(uint256 _threshold) external { + exitDeadlineThreshold__mocked = _threshold; + } + + uint256 private totalModuleStake_mocked; + + function getTotalModuleStake() external view returns (uint256) { + return totalModuleStake_mocked; + } + + function mock__getTotalModuleStake(uint256 _totalModuleStake) external { + totalModuleStake_mocked = _totalModuleStake; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol b/test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol similarity index 92% rename from test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol rename to test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol index c7371a9268..65f3653a04 100644 --- a/test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol +++ b/test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; @@ -205,12 +205,31 @@ contract StakingModule__MockForStakingRouter is IStakingModule { emit Mock__ValidatorsCountUnsafelyUpdated(_nodeOperatorId, _exitedValidatorsCount); } + bytes private obtainDepositData_publicKeys__mocked; + bytes private obtainDepositData_signatures__mocked; + bool private obtainDepositData_useCustom__mocked; + function obtainDepositData( uint256 _depositsCount, bytes calldata ) external returns (bytes memory publicKeys, bytes memory signatures) { - publicKeys = new bytes(48 * _depositsCount); - signatures = new bytes(96 * _depositsCount); + if (obtainDepositData_useCustom__mocked) { + publicKeys = obtainDepositData_publicKeys__mocked; + signatures = obtainDepositData_signatures__mocked; + } else { + publicKeys = new bytes(48 * _depositsCount); + signatures = new bytes(96 * _depositsCount); + } + } + + function mock__obtainDepositData(bytes calldata publicKeys, bytes calldata signatures) external { + obtainDepositData_publicKeys__mocked = publicKeys; + obtainDepositData_signatures__mocked = signatures; + obtainDepositData_useCustom__mocked = true; + } + + function mock__obtainDepositDataReset() external { + obtainDepositData_useCustom__mocked = false; } event Mock__onExitedAndStuckValidatorsCountsUpdated(); diff --git a/test/0.8.25/contracts/StakingRouter__Harness.sol b/test/0.8.25/contracts/StakingRouter__Harness.sol new file mode 100644 index 0000000000..e8ac5a8d6b --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__Harness.sol @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {StakingRouter} from "contracts/0.8.25/sr/StakingRouter.sol"; +import {SRLib} from "contracts/0.8.25/sr/SRLib.sol"; +import {SRStorage} from "contracts/0.8.25/sr/SRStorage.sol"; +import {StakingModuleStatus, ModuleStateAccounting, RouterStateAccounting} from "contracts/0.8.25/sr/SRTypes.sol"; +import {StorageSlot} from "@openzeppelin/contracts-v5.2/utils/StorageSlot.sol"; +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; + +contract StakingRouter__Harness is StakingRouter { + using StorageSlot for bytes32; + using EnumerableSet for EnumerableSet.AddressSet; + + // Old storage slots (must match constants in old 0.8.9 StakingRouter and SRLib) + bytes32 internal constant WITHDRAWAL_CREDENTIALS_POSITION = keccak256("lido.StakingRouter.withdrawalCredentials"); + bytes32 internal constant LIDO_POSITION = keccak256("lido.StakingRouter.lido"); + bytes32 internal constant LAST_STAKING_MODULE_ID_POSITION = keccak256("lido.StakingRouter.lastStakingModuleId"); + bytes32 internal constant STAKING_MODULES_COUNT_POSITION = keccak256("lido.StakingRouter.stakingModulesCount"); + bytes32 internal constant CONTRACT_VERSION_POSITION = keccak256("lido.Versioned.contractVersion"); + + // New storage slots + // keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.Initializable")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 private constant INITIALIZABLE_STORAGE = 0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00; + + /// Mock values matching old 0.8.9 StakingRouter state + bytes32 public constant WC_01_MOCK = bytes32(0x0100000000000000000000001111111111111111111111111111111111111111); + address public constant LIDO_ADDRESS_MOCK = 0x2222222222222222222222222222222222222222; + uint256 public constant LAST_STAKING_MODULE_ID_MOCK = 1; + uint256 public constant STAKING_MODULES_COUNT_MOCK = 0; + uint256 public constant CONTRACT_VERSION_V3 = 3; + + constructor( + address _depositContract, + address _lido, + address _lidoLocator, + uint256 _maxEBType1, + uint256 _maxEBType2 + ) StakingRouter(_depositContract, _lido, _lidoLocator, _maxEBType1, _maxEBType2) {} + + /// @notice Simulates old 0.8.9 StakingRouter state before v4 migration. + /// Sets all old unstructured storage slots that _migrateStorage() reads and cleans up. + function testing_initializeV3() external { + WITHDRAWAL_CREDENTIALS_POSITION.getBytes32Slot().value = WC_01_MOCK; + LIDO_POSITION.getAddressSlot().value = LIDO_ADDRESS_MOCK; + LAST_STAKING_MODULE_ID_POSITION.getUint256Slot().value = LAST_STAKING_MODULE_ID_MOCK; + STAKING_MODULES_COUNT_POSITION.getUint256Slot().value = STAKING_MODULES_COUNT_MOCK; + CONTRACT_VERSION_POSITION.getUint256Slot().value = CONTRACT_VERSION_V3; + } + + /// @notice Checks that old storage slots are cleaned up after migration + function testing_getOldLidoPosition() external view returns (address) { + return LIDO_POSITION.getAddressSlot().value; + } + + function testing_getOldWcPosition() external view returns (bytes32) { + return WITHDRAWAL_CREDENTIALS_POSITION.getBytes32Slot().value; + } + + function testing_getOldContractVersion() external view returns (uint256) { + return CONTRACT_VERSION_POSITION.getUint256Slot().value; + } + + function testing_getOldLastModuleIdPosition() external view returns (uint256) { + return LAST_STAKING_MODULE_ID_POSITION.getUint256Slot().value; + } + + function testing_getOldModulesCountPosition() external view returns (uint256) { + return STAKING_MODULES_COUNT_POSITION.getUint256Slot().value; + } + + /// @notice Grant a role inside the OLD AccessControl storage (OZ v4.4) + function testing_grantRoleOld(bytes32 role, address account) external { + _storageRoles()[role].members[account] = true; + _storageRoleMembers()[role].add(account); + } + + /// @notice Read a role grant from the OLD AccessControl storage (OZ v4.4) + function testing_hasRoleOld(bytes32 role, address account) external view returns (bool) { + return _storageRoles()[role].members[account]; + } + + function testing_getLastModuleId() public view returns (uint256) { + return SRStorage.getRouterState().lastModuleId; + } + + function testing_setVersion(uint256 version) public { + _getInitializableStorage_Mock()._initialized = uint64(version); + } + + function testing_setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) external { + SRLib._setModuleStatus(_stakingModuleId, _status); + } + + function testing_setStakingModuleAccounting( + uint256 _stakingModuleId, + uint64 validatorsBalanceGwei, + uint64 exitedValidatorsCount + ) external { + ModuleStateAccounting storage moduleAcc = SRStorage.getModuleState(_stakingModuleId).accounting; + RouterStateAccounting storage routerAcc = SRStorage.getRouterState().accounting; + + uint64 totalValidatorsBalanceGwei = routerAcc.validatorsBalanceGwei; + + // update totals incrementally as we iterate through the part of modules in general case + // 1. subtract old values + unchecked { + totalValidatorsBalanceGwei -= moduleAcc.validatorsBalanceGwei; + } + // 2. validate and add new values + + unchecked { + totalValidatorsBalanceGwei += validatorsBalanceGwei; + } + + routerAcc.validatorsBalanceGwei = totalValidatorsBalanceGwei; + + moduleAcc.validatorsBalanceGwei = validatorsBalanceGwei; + moduleAcc.exitedValidatorsCount = exitedValidatorsCount; + } + + function _getInitializableStorage_Mock() private pure returns (InitializableStorage storage $) { + assembly { + $.slot := INITIALIZABLE_STORAGE + } + } + + // OZ AccessControl v.4.4 + + struct RoleDataOld { + mapping(address => bool) members; + bytes32 adminRole; + } + + /// @dev OZ AccessControlEnumerable _roleMembers mapping storage reference + function _storageRoleMembers() private pure returns (mapping(bytes32 => EnumerableSet.AddressSet) storage $) { + bytes32 position = keccak256("openzeppelin.AccessControlEnumerable._roleMembers"); + assembly { + $.slot := position + } + } + + /// @dev OZ AccessControl _roles mapping storage reference + function _storageRoles() private pure returns (mapping(bytes32 => RoleDataOld) storage $) { + bytes32 position = keccak256("openzeppelin.AccessControl._roles"); + assembly { + $.slot := position + } + } +} diff --git a/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..44fb9b29f6 --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract StakingRouter__MockForConsolidationMigrator { + struct StakingModule { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + mapping(uint256 => StakingModule) internal _modules; + + function mock__setStakingModule(uint256 moduleId, address moduleAddress) external { + _modules[moduleId] = StakingModule({ + id: uint24(moduleId), + stakingModuleAddress: moduleAddress, + stakingModuleFee: 0, + treasuryFee: 0, + stakeShareLimit: 0, + status: 0, + name: "", + lastDepositAt: 0, + lastDepositBlock: 0, + exitedValidatorsCount: 0, + priorityExitShareThreshold: 0, + maxDepositsPerBlock: 0, + minDepositBlockDistance: 0, + withdrawalCredentialsType: 0 + }); + } + + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory) { + return _modules[_stakingModuleId]; + } +} diff --git a/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol b/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol new file mode 100644 index 0000000000..949b272031 --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +contract StakingRouter__MockForTopUpGateway { + mapping(uint256 => bytes32) internal withdrawalCredentials; + mapping(uint256 => bool) internal moduleExists; + mapping(uint256 => bool) internal moduleIsActive; + + event TopUpCalled( + uint256 stakingModuleId, + uint256[] keyIndices, + uint256[] operatorIds, + bytes[] pubkeys, + uint256[] topUpLimits + ); + + uint256 public topUpCalls; + + function setWithdrawalCredentials(uint256 moduleId, bytes32 wc) external { + withdrawalCredentials[moduleId] = wc; + moduleExists[moduleId] = true; + moduleIsActive[moduleId] = true; + } + + function setModuleActive(uint256 moduleId, bool active) external { + moduleExists[moduleId] = true; + moduleIsActive[moduleId] = active; + } + + function getStakingModuleWithdrawalCredentials(uint256 moduleId) external view returns (bytes32) { + return withdrawalCredentials[moduleId]; + } + + function hasStakingModule(uint256 moduleId) public view returns (bool) { + return moduleExists[moduleId]; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) && getStakingModuleIsActive(_stakingModuleId); + } + + function getStakingModuleIsActive(uint256 moduleId) public view returns (bool) { + return moduleIsActive[moduleId]; + } + + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external { + unchecked { + ++topUpCalls; + } + + emit TopUpCalled(_stakingModuleId, _keyIndices, _operatorIds, _pubkeys, _topUpLimits); + } +} diff --git a/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..1b53e90d8e --- /dev/null +++ b/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for target staking module (CMv2/NOR) for ConsolidationMigrator tests + */ +contract TargetModule__MockForConsolidationMigrator { + uint256 public constant PUBKEY_LENGTH = 48; + + struct NodeOperatorData { + uint256 totalDepositedValidators; + bytes[] pubkeys; + } + + // operatorId => data + mapping(uint256 => NodeOperatorData) internal _operators; + + function mock__setOperatorData( + uint256 operatorId, + uint256 totalDepositedValidators, + bytes[] calldata pubkeys + ) external { + _operators[operatorId].totalDepositedValidators = totalDepositedValidators; + delete _operators[operatorId].pubkeys; + for (uint256 i = 0; i < pubkeys.length; ++i) { + _operators[operatorId].pubkeys.push(pubkeys[i]); + } + } + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + totalDepositedValidators = op.totalDepositedValidators; + // Other values are zero by default + return (0, 0, 0, 0, 0, 0, totalDepositedValidators, 0); + } + + // CMv2 interface + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys) { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + + pubkeys = new bytes(_limit * PUBKEY_LENGTH); + + for (uint256 i = 0; i < _limit; ++i) { + uint256 keyIndex = _offset + i; + if (keyIndex < op.pubkeys.length) { + bytes storage key = op.pubkeys[keyIndex]; + for (uint256 j = 0; j < PUBKEY_LENGTH; ++j) { + pubkeys[i * PUBKEY_LENGTH + j] = key[j]; + } + } + } + + return pubkeys; + } +} diff --git a/test/0.8.25/contracts/TopUpGateway__Harness.sol b/test/0.8.25/contracts/TopUpGateway__Harness.sol new file mode 100644 index 0000000000..cd4026e308 --- /dev/null +++ b/test/0.8.25/contracts/TopUpGateway__Harness.sol @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +import {TopUpGateway} from "contracts/0.8.25/TopUpGateway.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; + +contract TopUpGateway__Harness is TopUpGateway { + constructor( + address _lidoLocator, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot, + uint256 _slotsPerEpoch + ) TopUpGateway(_lidoLocator, _gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot, _slotsPerEpoch) {} + + function harness_setLastTopUpData() external { + _setLastTopUpData(); + } + + function harness_setLastTopUpTimestamp(uint256 _timestamp) external { + _gatewayStorage().lastTopUpTimestamp = uint32(_timestamp); + } + + function harness_setLastTopUpBlock(uint256 _block) external { + _gatewayStorage().lastTopUpBlock = uint32(_block); + } + + function harness_setMaxValidatorsPerTopUp(uint256 newValue) external { + _setMaxValidatorsPerTopUp(newValue); + } + + function harness_setMinBlockDistance(uint256 newValue) external { + _setMinBlockDistance(newValue); + } + + function harness_getLocator() external view returns (address) { + return address(LOCATOR); + } + + function _verifyValidator( + BeaconRootData calldata, + ValidatorWitness calldata, + uint256, + bytes32 + ) internal view override { + // no-op for harness; verification is covered separately + } +} diff --git a/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol b/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..bc4c4b0c8f --- /dev/null +++ b/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract WithdrawalVault__MockForConsolidationGateway { + event AddConsolidationRequestsCalled(bytes[] sourcePubkeys, bytes[] targetPubkeys); + + uint256 internal _fee; + + constructor() { + _fee = 1; + } + + function addConsolidationRequests(bytes[] calldata sourcePubkeys, bytes[] calldata targetPubkeys) external payable { + emit AddConsolidationRequestsCalled(sourcePubkeys, targetPubkeys); + } + + function getConsolidationRequestFee() external view returns (uint256) { + return _fee; + } + + function mock__setFee(uint256 fee) external { + _fee = fee; + } +} diff --git a/test/0.8.25/srv3/clValidatorProofVerifier.test.ts b/test/0.8.25/srv3/clValidatorProofVerifier.test.ts new file mode 100644 index 0000000000..c7f880f96b --- /dev/null +++ b/test/0.8.25/srv3/clValidatorProofVerifier.test.ts @@ -0,0 +1,523 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { CLValidatorVerifier__Harness, SSZValidatorsMerkleTree } from "typechain-types"; + +import { generateBeaconHeader, generateValidator, randomBytes32, setBeaconBlockRoot } from "lib/pdg"; +import { prepareLocalMerkleTree } from "lib/top-ups"; + +const STATIC_VALIDATOR = { + blockRoot: "0xbe928e3a9fa76b916df79d78a8b67237f9b133269bb421f37490b7624abad452", + gIFirstValidator: "0x0000000000000000000000000000000000000000000000000096000000000028", + beaconRootData: { + childBlockTimestamp: 1769723675n, + slot: 13574970n, + proposerIndex: 1704508n, + }, + validators: [ + { + index: 12345, + witness: { + proofValidator: [ + "0x216b6e8fa6cc4f005b56c12afdf98fad45ece56133c8e460fa4141d4003776aa", + "0xc9cd3df16c39ee2ab805653e93aa7c66dfa8b4313b42367e0e2c93b97c467a7c", + "0xbccc857f25b04e4ffbfb3bb4a739f2ee21668f9ac5e6d6ffe243a83bd53773dd", + "0x9428eb489f519010c69549cec7acc9e93ed5be99de26feda1434d36821ae325d", + "0x286483026731535ec459bbe6299db5d838261f2da5cbafd85630bca4e8ebebb0", + "0x8b6f3cb97fe65b7cdbda7ee19b403bc148a6f4c185b2e06aa24f26696edf9274", + "0x2502294ada8a819553c36c45e10f6d37230b1bfe4a60c3b122e25ec7687e7b06", + "0x71d33773e8b437e94c30b30980472ef59686fc07c79eb513dae455c2b3feeddb", + "0x4fa851a66a442c140c6cb5d038ee03e9f2538780d455c57cb752eca56e874f2a", + "0x9e4db5b11d21e0d57de169caa2a129555275cf59e612cefea0da82d9f2a9b56a", + "0x7d95d434555b5cbfac0c34585b232314a53cc11a3f80cab5a3bd3c8824247e08", + "0x94d35de0bc90861fef95220f1dc8bd90738f2057ac801454ca7a81ecdc2f5a0e", + "0xf9161f3c69d468aae3ae78deae56e59e4a9722dae2dab2d8427e16ec401acafa", + "0xfdfde41dd4fbee3943abf104c54689f1587e821f018ddee1ce6838d4f1fe3024", + "0x6f4a3562c9b16e8e63d5b956a3305b37efb4e716777867bbf22825e9185b67b4", + "0x2ba6010fd77fc624970171c55647a13f75680122802f168fe16553a4bf251d33", + "0xb8a2b7d9d041154028a82877bdc2b4adb76475d4797f3ac586d319c76644309e", + "0xfb2f06c2b4c43f7252844db5fff60e0bfc207bdcecaae2fc53c37f1b9e03e50a", + "0x32c59b5c8c804d2a3c4c72415f1afc3d0db5c80c0bd5a8e404150121ad340abe", + "0x9a07eeffcc8578a939d457d107ec733bf3b121a7ff9f84e179931ee9237be7cb", + "0xf302fc1c45667fe834ab5537774ae4679dd6d9d4fca3e0a6b6dc6d6dd84d48ba", + "0xff6fa857e6a6b00c6f71ea4c5bf522535561ca25abd32389677b26c5a4b140df", + "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7", + "0xc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886722ff", + "0x1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5", + "0x2f075ae229646b6f6aed19a5e372cf295081401eb893ff599b3f9acc0c0d3e7d", + "0x328921deb59612076801e8cd61592107b5c67c79b846595cc6320c395b46362c", + "0xbfb909fdb236ad2411b4e4883810a074b840464689986c3f8a8091827e17c327", + "0x55d8fb3687ba3ba49f342c77f5a1f89bec83d811446e1a467139213d640b6a74", + "0xf7210d4f8e7e1039790e7bf4efa207555a10a6db1dd4b95da313aaa88b88fe76", + "0xad21b516cbc645ffe34ab5de1c8aef8cd4e7f8d2b51e8e1456adc7563cda206f", + "0x5e8d210000000000000000000000000000000000000000000000000000000000", + "0xc6341f0000000000000000000000000000000000000000000000000000000000", + "0x797d496cea42b783b4ada624d44fa8d0fd7ff09214509c1fab9dd7618dec8db3", + "0xd492b2a4246027ef1a1fa848bbae345f077680e86b5fe0a394251b63da1f9381", + "0x044cd392c78edc7bbda4544fa482c11effa29ac38ea4c87a4dd11bb0b4f5e0b5", + "0x4db7cb7fae529d04f8c42467d5ab71190aba9ef6982e8c5aecfb682eaaf0024e", + "0x79a3cf55bfd7c33308555b76aec5b6b6dfa1c4b628773cd0657a5bd00c9255d5", + "0xa78bc2eae77405eb3badf1a31e7c5b46cf44e0fb90b25c1ac3e39d9368c73ac3", + "0x4a4eb09f597003c58696430554b7154a878f31c09422870e70aa3b34c928e30c", + "0x420db8b9116cae945235fb92dd224c30bda527f31b71e859e8be5ad8b33f83ba", + ], + pubkey: "0x80773a007f9e496a196b8f28fae04ddaa72fa65c0f8a98145a1e192082c3edcf7cee891ccf1d6b6fee0abe0045b9f61b", + effectiveBalance: 0n, + activationEligibilityEpoch: 0n, + activationEpoch: 0n, + exitEpoch: 400136n, + withdrawableEpoch: 400392n, + slashed: false, + }, + }, + { + index: 67890, + witness: { + proofValidator: [ + "0x48c9ab2d18314cc8b31d343abaf430e32165ffece1333c4b30598c3e653bb8c4", + "0xe1150bdb10f20186ac2d48c874bcd8ee07201d1082351e56ad6b232b6ede0ed9", + "0x0e51272c8f40696dd2a1af27f1b4941676e778bd015fe03aee65901ba576da74", + "0x8ee28dca9c22ac9c0ffe72524012dde0e36ae3b421768fa08bcfb78231515aeb", + "0x6ac30c0e3188ecdb2c7f7ad4c27e936bb449d0c2d89d43a6f0c4348b5d3f8da9", + "0x2ef2d2287454ef3bb5066e139c4dfb3042b423c3c05e379d8885fd5feb205827", + "0x9ac6b7bba6408f9e9be2f9a0fca03771d3a5f1c817314c90a0402b868319fb5f", + "0x00acd7dfea9c4c686a6ccd697ddd9d3dbe44b771c6bc7fa73943b23256320b34", + "0x88e66715b98a5621b58dc5d9baa5fb4a9c07cd8e28a2ff771e02d1ddc7b1af52", + "0xc3c233aa48ac7d546942cd42a162e12276fe8a061df5bb07adc8c0d1f1e5f94a", + "0xdc76934849f2b932a88326e3ff1aa140c042bc541a3453e5d0909c2f4378850a", + "0x57455c42a8f749c6c849c9ba01b2279d192ab1e18ee8319ace30bd549c375349", + "0x99276db9294041d58d89ce524a33a4d60bc2b5019bf6bc3d6de02e952f6e34bc", + "0xfb73dc74945a6f29dd4b3f1b0fa938f77926e4a1256ea3407067cf66ff284af4", + "0x19641f85d86a45b1bff5d9792c0a622328f645815e32184e1b0c13bd2eedd0f0", + "0x261abd8edbccfb08a970621e9330115f97177619f9f657c091d6b05b2056a59d", + "0x2f015c6b4fc7f03cbd3366bbbf96574901b81742af5e98b0f01cc50705b25ceb", + "0xfb2f06c2b4c43f7252844db5fff60e0bfc207bdcecaae2fc53c37f1b9e03e50a", + "0x32c59b5c8c804d2a3c4c72415f1afc3d0db5c80c0bd5a8e404150121ad340abe", + "0x9a07eeffcc8578a939d457d107ec733bf3b121a7ff9f84e179931ee9237be7cb", + "0xf302fc1c45667fe834ab5537774ae4679dd6d9d4fca3e0a6b6dc6d6dd84d48ba", + "0xff6fa857e6a6b00c6f71ea4c5bf522535561ca25abd32389677b26c5a4b140df", + "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7", + "0xc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886722ff", + "0x1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5", + "0x2f075ae229646b6f6aed19a5e372cf295081401eb893ff599b3f9acc0c0d3e7d", + "0x328921deb59612076801e8cd61592107b5c67c79b846595cc6320c395b46362c", + "0xbfb909fdb236ad2411b4e4883810a074b840464689986c3f8a8091827e17c327", + "0x55d8fb3687ba3ba49f342c77f5a1f89bec83d811446e1a467139213d640b6a74", + "0xf7210d4f8e7e1039790e7bf4efa207555a10a6db1dd4b95da313aaa88b88fe76", + "0xad21b516cbc645ffe34ab5de1c8aef8cd4e7f8d2b51e8e1456adc7563cda206f", + "0x5e8d210000000000000000000000000000000000000000000000000000000000", + "0xc6341f0000000000000000000000000000000000000000000000000000000000", + "0x797d496cea42b783b4ada624d44fa8d0fd7ff09214509c1fab9dd7618dec8db3", + "0xd492b2a4246027ef1a1fa848bbae345f077680e86b5fe0a394251b63da1f9381", + "0x044cd392c78edc7bbda4544fa482c11effa29ac38ea4c87a4dd11bb0b4f5e0b5", + "0x4db7cb7fae529d04f8c42467d5ab71190aba9ef6982e8c5aecfb682eaaf0024e", + "0x79a3cf55bfd7c33308555b76aec5b6b6dfa1c4b628773cd0657a5bd00c9255d5", + "0xa78bc2eae77405eb3badf1a31e7c5b46cf44e0fb90b25c1ac3e39d9368c73ac3", + "0x4a4eb09f597003c58696430554b7154a878f31c09422870e70aa3b34c928e30c", + "0x420db8b9116cae945235fb92dd224c30bda527f31b71e859e8be5ad8b33f83ba", + ], + pubkey: "0x85c12b9cd79c0fd7712db78245d14583c465e7c4cf4045b83ca34b1f148d85a1fe16dd2004f3332e8dc6312793f5db4a", + effectiveBalance: 0n, + activationEligibilityEpoch: 7074n, + activationEpoch: 11751n, + exitEpoch: 195058n, + withdrawableEpoch: 195314n, + slashed: false, + }, + }, + ], +}; + +describe("CLTopUpProofVerifier", () => { + let sszMerkleTree: SSZValidatorsMerkleTree; + let gIFirstValidator: string; + let firstValidatorLeafIndex: bigint; + let verifier: CLValidatorVerifier__Harness; + + before(async () => { + // 1) Build a local SSZ tree once + const localTree = await prepareLocalMerkleTree(); + sszMerkleTree = localTree.stateTree; + gIFirstValidator = localTree.gIFirstValidator; + firstValidatorLeafIndex = localTree.firstValidatorLeafIndex; + + // populate merkle tree with validators + for (let i = 1; i < 100; i++) { + const v = generateValidator().container; + await sszMerkleTree.addValidatorsLeaf(v); + } + + // 2) Deploy the verifier (same GI for prev/curr, no pivot) + verifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + gIFirstValidator, // GI_FIRST_VALIDATOR_PREV + gIFirstValidator, // GI_FIRST_VALIDATOR_CURR + 0, // PIVOT_SLOT + ]); + }); + + it("verifies full Validator container at head under EIP-4788", async () => { + // 1) Create an 'active' validator at the target epoch + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 2n; + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + // Insert validator into the local SSZ tree + await sszMerkleTree.addValidatorsLeaf(v.container); + + // Compute its index in validators[i] + const leafCount = await sszMerkleTree.leafCount(); + // Index = (current leaves - 1) - firstValidatorLeafIndex + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + // Anchor the current state_root into EIP-4788 via a header at SLOT + const SLOT = 3200; // epoch = 100 (greater than activationEpoch) + const stateRoot = await sszMerkleTree.getStateRoot(); + const beaconBlockHeader = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(beaconBlockHeader); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + // Build proof: + // - stateProof: validators[i] → validators_root → state_root + // - headerProof: state_root → … → beacon_block_root (contains parent(slot, proposer) node) + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + + // state_root -> beacon_block_root + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(beaconBlockHeader); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + }; + + // 2) Validator witness (validator container only) + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + // 4) Call harness + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + + // 5) Negative: wrong WC must fail + const wrongWC = "0x" + "11".repeat(32); + await expect(verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, wrongWC)).to.be + .reverted; + }); + + it("don't revert with ValidatorIsSlashed when slashed = true", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = true; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 2n; + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + await sszMerkleTree.addValidatorsLeaf(v.container); + + const leafCount = await sszMerkleTree.leafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + const SLOT = 3200; // epoch = 100 + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + // validator[i] -> validators_root -> state_root' + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await expect(verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC)).to.not.be + .rejected; + }); + + it("don't revert when activationEpoch > epoch(slot)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 101n; // > epoch(slot=3200)=100 + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + await sszMerkleTree.addValidatorsLeaf(v.container); + + const leafCount = await sszMerkleTree.leafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + const SLOT = 3200; + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("don't reverts when activationEpoch == epoch(slot)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 100n; // == epoch(slot) + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + const expectedWC = v.container.withdrawalCredentials; + await sszMerkleTree.addValidatorsLeaf(v.container); + const leafCount = await sszMerkleTree.leafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + const SLOT = 3200; // epoch=100 + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("don't revert when a validator with non-FAR_FUTURE exitEpoch (proof mismatch)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + v.container.slashed = false; + v.container.activationEligibilityEpoch = 70n; + v.container.activationEpoch = 90n; + const SLOT = 3200; // epoch(slot) = 100 + v.container.exitEpoch = 101n; // + v.container.withdrawableEpoch = FAR_FUTURE; + const expectedWC = v.container.withdrawalCredentials; + await sszMerkleTree.addValidatorsLeaf(v.container); + const leafCount = await sszMerkleTree.leafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("should verify static validator 12345 with real mainnet proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + await staticVerifier.TEST_verifyValidator( + beaconRootData, + v.witness, + v.index, + "0x010000000000000000000000ddc6ed6e6a9c1e55c87b155b9a40bac4721a6dac", + ); + }); + + it("should verify static validator 67890 with real mainnet proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[1]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + await staticVerifier.TEST_verifyValidator( + beaconRootData, + v.witness, + v.index, + "0x010000000000000000000000210b3cb99fa1de0a64085fa80e18c22fe4722a1b", + ); + }); + + it("should reject static validator with wrong withdrawal credentials", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + const wrongWC = "0x" + "11".repeat(32); + await expect(staticVerifier.TEST_verifyValidator(beaconRootData, v.witness, v.index, wrongWC)).to.be.reverted; + }); + + it("should reject static validator with fake proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + const tamperedWitness = { + ...v.witness, + proofValidator: [...v.witness.proofValidator], + }; + tamperedWitness.proofValidator[0] = "0x" + "aa".repeat(32); + + await expect( + staticVerifier.TEST_verifyValidator( + beaconRootData, + tamperedWitness, + v.index, + "0x010000000000000000000000ddc6ed6e6a9c1e55c87b155b9a40bac4721a6dac", + ), + ).to.be.reverted; + }); + + it("should change gIndex on pivot slot", async () => { + const pivotSlot = 1000; + const giPrev = randomBytes32(); + const giCurr = randomBytes32(); + + const proofVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [giPrev, giCurr, pivotSlot], {}); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot - 1)).to.equal(giPrev); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot)).to.equal(giCurr); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot + 1)).to.equal(giCurr); + }); +}); diff --git a/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol b/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol new file mode 100644 index 0000000000..129600062a --- /dev/null +++ b/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {CLValidatorVerifier} from "contracts/0.8.25/CLValidatorVerifier.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; + +contract CLValidatorVerifier__Harness is CLValidatorVerifier { + constructor( + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot + ) CLValidatorVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) {} + + function TEST_verifyValidator( + BeaconRootData calldata beaconData, + ValidatorWitness calldata vw, + uint256 validatorIndex, + bytes32 withdrawalCredentials + ) public view { + _verifyValidator(beaconData, vw, validatorIndex, withdrawalCredentials); + } + + function TEST_getParentBlockRoot(uint64 parentBlockTimestamp) public view returns (bytes32) { + return _getParentBlockRoot(parentBlockTimestamp); + } + + function TEST_getValidatorGI(uint256 offset, uint64 slot) public view returns (GIndex) { + return _getValidatorGI(offset, slot); + } +} diff --git a/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol b/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol new file mode 100644 index 0000000000..29155bf3d1 --- /dev/null +++ b/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {GIndex, pack, concat, fls} from "contracts/common/lib/GIndex.sol"; +import {SSZ} from "contracts/common/lib/SSZ.sol"; +import {BLS12_381} from "contracts/common/lib/BLS.sol"; +import {SSZBLSHelpers} from "../../vaults/predepositGuarantee/contracts/SSZBLSHelpers.sol"; + +/// Merkle tree implementation aligned with CL state tree structure +/// NOT gas optimized, for testing purposes only +contract SSZValidatorsMerkleTree is SSZBLSHelpers { + uint256 public immutable TREE_DEPTH; + + uint256 public leafCount = 0; + + uint256 public immutable VALIDATORS_BASE_INDEX; + + mapping(uint256 => bytes32) public nodes; + + /// @notice Initializes the Merkle tree with a given depth and pre-filled nodes so GIndex can closely match CL + constructor(GIndex validatorsBase) { + TREE_DEPTH = depth(validatorsBase); + + // offset to the start of validators field in the state tree + leafCount = validatorsBase.index() - (1 << TREE_DEPTH); + + VALIDATORS_BASE_INDEX = validatorsBase.index(); + } + + /// @notice Adds a new leaf to the validators tree + /// @param validator The leaf value + /// @return index The index of the added leaf + function addValidatorsLeaf(SSZBLSHelpers.Validator calldata validator) public returns (uint256) { + bytes32 leaf = validatorHashTreeRootCalldata(validator); + + require(leafCount < (1 << TREE_DEPTH), "Tree is full"); + + uint256 gi = (1 << TREE_DEPTH) + leafCount; + nodes[gi] = leaf; + leafCount++; + + _updateTree(gi); + + return gi; + } + + function getStateRoot() public view returns (bytes32) { + return nodes[1]; + } + + function getValidatorProof(uint256 leafIndex) public view returns (bytes32[] memory) { + require(leafIndex < leafCount, "Invalid leaf index"); + uint256 gi = (1 << TREE_DEPTH) + leafIndex; + return _getMerkleProof(gi); + } + + /// generalized index for validators[position] + function getValidatorGeneralizedIndex(uint256 position) public view returns (GIndex) { + require(position < (1 << TREE_DEPTH), "Invalid position"); + uint256 gi = (1 << TREE_DEPTH) + position; + return pack(gi, uint8(TREE_DEPTH)); + } + + /// @notice Computes and returns the Merkle proof for a given *global* index + function _getMerkleProof(uint256 index) internal view returns (bytes32[] memory) { + bytes32[] memory proof = new bytes32[](TREE_DEPTH); + + for (uint256 i = 0; i < TREE_DEPTH; ++i) { + uint256 siblingIndex = index % 2 == 0 ? index + 1 : index - 1; + proof[i] = nodes[siblingIndex]; + index /= 2; + } + + return proof; + } + + /// @dev Updates the tree after adding a leaf + /// @param index The index of the new leaf + function _updateTree(uint256 index) internal { + while (index > 1) { + uint256 parentIndex = index / 2; + uint256 siblingIndex = index % 2 == 0 ? index + 1 : index - 1; + + bytes32 left = nodes[index % 2 == 0 ? index : siblingIndex]; + bytes32 right = nodes[index % 2 == 0 ? siblingIndex : index]; + + nodes[parentIndex] = sha256(abi.encodePacked(left, right)); + + index = parentIndex; + } + } +} diff --git a/test/0.8.25/stakingRouter/helpers/index.ts b/test/0.8.25/stakingRouter/helpers/index.ts new file mode 100644 index 0000000000..6d2d2b69d2 --- /dev/null +++ b/test/0.8.25/stakingRouter/helpers/index.ts @@ -0,0 +1,127 @@ +import { expect } from "chai"; +import { randomBytes } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + StakingModule__MockForStakingRouter, + StakingModuleV2__MockForStakingRouter, + StakingRouter__Harness, +} from "typechain-types"; + +import { wcTypeMaxEB } from "lib"; +import { ONE_GWEI, StakingModuleStatus, TOTAL_BASIS_POINTS, WithdrawalCredentialsType } from "lib/constants"; + +export const DEFAULT_CONFIG: ModuleConfig = { + stakeShareLimit: TOTAL_BASIS_POINTS, + priorityExitShareThreshold: TOTAL_BASIS_POINTS, + moduleFee: 5_00n, + treasuryFee: 5_00n, + maxDepositsPerBlock: 150n, + minDepositBlockDistance: 25n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, +}; +export const DEFAULT_MEB = wcTypeMaxEB(DEFAULT_CONFIG.withdrawalCredentialsType); + +type SetupModuleResult = T extends WithdrawalCredentialsType.WC0x02 + ? [StakingModuleV2__MockForStakingRouter, bigint] + : T extends WithdrawalCredentialsType.WC0x01 + ? [StakingModule__MockForStakingRouter, bigint] + : [StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, bigint]; + +export async function setupModule( + ctx: CtxConfig, + cfg: ModuleConfig & { withdrawalCredentialsType: T }, +): Promise>; + +export async function setupModule( + { stakingRouter, admin, deployer }: CtxConfig, + { + stakeShareLimit, + priorityExitShareThreshold, + moduleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + exited = 0n, + deposited = 0n, + depositable = 0n, + status = StakingModuleStatus.Active, + withdrawalCredentialsType = WithdrawalCredentialsType.WC0x01, + validatorsBalanceGwei = 0n, + totalModuleStake = 0n, + }: ModuleConfig, +): Promise<[StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, bigint]> { + const modulesCount = await stakingRouter.getStakingModulesCount(); + const moduleId = modulesCount + 1n; + + const stakingModuleConfig = { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee: moduleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType, + }; + + const initializeModule = async ( + module: StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, + ) => { + await stakingRouter + .connect(admin) + .addStakingModule(randomBytes(8).toString(), await module.getAddress(), stakingModuleConfig); + + expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount + 1n); + + await module.mock__getStakingModuleSummary(exited, deposited, depositable); + if (validatorsBalanceGwei == 0n && deposited > 0n) { + validatorsBalanceGwei = (deposited * wcTypeMaxEB(withdrawalCredentialsType)) / ONE_GWEI; + } + await stakingRouter.testing_setStakingModuleAccounting(moduleId, validatorsBalanceGwei, exited); + + if (status != StakingModuleStatus.Active) { + await stakingRouter.setStakingModuleStatus(moduleId, status); + } + }; + + if (withdrawalCredentialsType === WithdrawalCredentialsType.WC0x02) { + const module = await ethers.deployContract("StakingModuleV2__MockForStakingRouter", deployer); + await initializeModule(module); + + if (totalModuleStake == 0n && deposited > 0n) { + totalModuleStake = deposited * wcTypeMaxEB(WithdrawalCredentialsType.WC0x01); + } + await module.mock__getTotalModuleStake(totalModuleStake); + + return [module, moduleId]; + } + + const module = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await initializeModule(module); + + return [module, moduleId]; +} + +export interface CtxConfig { + deployer: HardhatEthersSigner; + admin: HardhatEthersSigner; + stakingRouter: StakingRouter__Harness; +} + +export interface ModuleConfig { + stakeShareLimit: bigint; + priorityExitShareThreshold: bigint; + moduleFee: bigint; + treasuryFee: bigint; + maxDepositsPerBlock: bigint; + minDepositBlockDistance: bigint; + withdrawalCredentialsType: WithdrawalCredentialsType; + exited?: bigint; + deposited?: bigint; + depositable?: bigint; + status?: StakingModuleStatus; + validatorsBalanceGwei?: bigint; + totalModuleStake?: bigint; +} diff --git a/test/0.8.9/stakingRouter/stakingRouter.exit.test.ts b/test/0.8.25/stakingRouter/stakingRouter.exit.test.ts similarity index 63% rename from test/0.8.9/stakingRouter/stakingRouter.exit.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.exit.test.ts index bf5e78656d..d4746ac8c7 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.exit.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.exit.test.ts @@ -1,34 +1,34 @@ import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { - DepositContract__MockForBeaconChainDepositor, - StakingModule__MockForTriggerableWithdrawals, - StakingRouter__Harness, -} from "typechain-types"; +import { LidoLocator, StakingModule__MockForTriggerableWithdrawals, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, ether, proxify, randomString } from "lib"; +import { certainAddress, ether, randomString, randomWCType1, WithdrawalCredentialsType } from "lib"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; + describe("StakingRouter.sol:exit", () => { let deployer: HardhatEthersSigner; - let proxyAdmin: HardhatEthersSigner; + let admin: HardhatEthersSigner; let stakingRouterAdmin: HardhatEthersSigner; let user: HardhatEthersSigner; let reporter: HardhatEthersSigner; - let depositContract: DepositContract__MockForBeaconChainDepositor; + let locator: LidoLocator; let stakingRouter: StakingRouter__Harness; let stakingModule: StakingModule__MockForTriggerableWithdrawals; let originalState: string; const lido = certainAddress("test:staking-router:lido"); - const withdrawalCredentials = hexlify(randomBytes(32)); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + const withdrawalCredentials = randomWCType1(); const STAKE_SHARE_LIMIT = 1_00n; const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; const MODULE_FEE = 5_00n; @@ -39,21 +39,19 @@ describe("StakingRouter.sol:exit", () => { const NODE_OPERATOR_ID = 1n; before(async () => { - [deployer, proxyAdmin, stakingRouterAdmin, user, reporter] = await ethers.getSigners(); - - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + [deployer, admin, stakingRouterAdmin, user, reporter] = await ethers.getSigners(); + + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - [stakingRouter] = await proxify({ impl, admin: proxyAdmin, caller: user }); + // deploy staking router + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); // Initialize StakingRouter - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); + await stakingRouter.initialize(stakingRouterAdmin.address, withdrawalCredentials); // Deploy mock staking module stakingModule = await ethers.deployContract("StakingModule__MockForTriggerableWithdrawals", deployer); @@ -63,19 +61,36 @@ describe("StakingRouter.sol:exit", () => { .connect(stakingRouterAdmin) .grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), stakingRouterAdmin); + const stakingModuleConfig = { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + stakeShareLimit: STAKE_SHARE_LIMIT, + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + stakingModuleFee: MODULE_FEE, + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + treasuryFee: TREASURY_FEE, + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + /// @notice Withdrawal credential type used by the module. + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + // Add staking module await stakingRouter .connect(stakingRouterAdmin) - .addStakingModule( - randomString(8), - await stakingModule.getAddress(), - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); + .addStakingModule(randomString(8), await stakingModule.getAddress(), stakingModuleConfig); // Grant necessary roles to reporter await stakingRouter @@ -94,7 +109,7 @@ describe("StakingRouter.sol:exit", () => { context("reportValidatorExitDelay", () => { const proofSlotTimestamp = Math.floor(Date.now() / 1000); const eligibleToExitInSec = 86400; // 1 day - const publicKey = hexlify(randomBytes(48)); + const publicKey = randomString(48); it("calls reportValidatorExitDelay on the staking module", async () => { await expect( @@ -125,16 +140,14 @@ describe("StakingRouter.sol:exit", () => { publicKey, eligibleToExitInSec, ), - ).to.be.revertedWith( - `AccessControl: account ${user.address.toLowerCase()} is missing role ${await stakingRouter.REPORT_VALIDATOR_EXITING_STATUS_ROLE()}`, - ); + ).to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount"); }); }); context("onValidatorExitTriggered", () => { const withdrawalRequestPaidFee = ether("0.01"); const exitType = 1n; - const publicKey = hexlify(randomBytes(48)); + const publicKey = randomString(48); it("calls onValidatorExitTriggered on the staking module for each validator", async () => { const validatorExitData = [ @@ -199,9 +212,7 @@ describe("StakingRouter.sol:exit", () => { await expect( stakingRouter.connect(user).onValidatorExitTriggered(validatorExitData, withdrawalRequestPaidFee, exitType), - ).to.be.revertedWith( - `AccessControl: account ${user.address.toLowerCase()} is missing role ${await stakingRouter.REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE()}`, - ); + ).to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount"); }); }); }); diff --git a/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts b/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts new file mode 100644 index 0000000000..d948c4d403 --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts @@ -0,0 +1,767 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + AccountingOracle__MockForStakingRouter, + Lido__MockForStakingRouter, + LidoLocator, + StakingRouter__Harness, +} from "typechain-types"; + +import { randomWCType1 } from "lib"; +import { ONE_GWEI, StakingModuleStatus, WithdrawalCredentialsType } from "lib/constants"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { CtxConfig, DEFAULT_CONFIG, DEFAULT_MEB, setupModule } from "./helpers"; + +describe("StakingRouter.sol:getDepositAllocations", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; + let lidoMock: Lido__MockForStakingRouter; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let originalState: string; + + let ctx: CtxConfig; + + const withdrawalCredentials = randomWCType1(); + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + + before(async () => { + [deployer, admin] = await ethers.getSigners(); + + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: await lidoMock.getAddress(), + depositSecurityModule, + accountingOracle: await accountingOracle.getAddress(), + }); + + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator, lido: lidoMock })); + + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + await stakingRouter.initialize(admin, withdrawalCredentials); + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + + ctx = { + deployer, + admin, + stakingRouter, + }; + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("getDepositAllocations with _isTopUp = false (initial deposits)", () => { + it("Returns empty arrays when there are no modules registered", async () => { + const result = await stakingRouter.getDepositAllocations(100n, false); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([]); + expect(result.newAllocations).to.deep.equal([]); + }); + + it("Returns all allocations to a single module if there is only one", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 100n, + }; + + await setupModule(ctx, config); + + const ethToDeposit = 150n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([moduleAllocation]); + expect(result.allocated).to.deep.equal([moduleAllocation]); + }); + + it("Allocates evenly if target shares are equal and capacities allow for that", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation * 2n); + expect(result.newAllocations).to.deep.equal([moduleAllocation, moduleAllocation]); + expect(result.allocated).to.deep.equal([moduleAllocation, moduleAllocation]); + }); + + it("Does not allocate to non-Active modules", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); + + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([moduleAllocation, 0n]); + expect(result.allocated).to.deep.equal([moduleAllocation, 0n]); + }); + + it("Allocates according to capacities at equal target shares", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 100n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = module1Config.depositable * DEFAULT_MEB; + const module2Allocation = module2Config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates according to target shares", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 60_00n, + priorityExitShareThreshold: 60_00n, + depositable: 100n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 40_00n, + priorityExitShareThreshold: 40_00n, + depositable: 100n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = 100n * DEFAULT_MEB; + const module2Allocation = 80n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates with unlimited (100%) and 20% limited share modules", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + depositable: 200n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + depositable: 200n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // totalValidators = 0 + 0 + 200 = 200 + // Module 1 target: (10000 * 200) / 10000 = 200, cap = min(200, 200) = 200 + // Module 2 target: (2000 * 200) / 10000 = 40, cap = min(40, 200) = 40 + // MinFirst: [0,0] caps [200,40] + // fill both to 40: cost 80, remaining 120 + // module 2 at cap, module 1 gets 120 + // result: [160, 40], total = 200 + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = 160n * DEFAULT_MEB; + const module2Allocation = 40n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Unlimited module absorbs excess when 20% module hits share limit with pre-existing deposits", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + depositable: 100n, + deposited: 50n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + depositable: 100n, + deposited: 50n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // totalValidators = 50 + 50 + 200 = 300 + // Module 1 target: (10000 * 300) / 10000 = 300, cap = min(300, 150) = 150 + // Module 2 target: (2000 * 300) / 10000 = 60, cap = min(60, 150) = 60 + // MinFirst: [50,50] caps [150,60] + // fill both to 60: cost 20, remaining 180 + // module 2 at cap, module 1 gets min(180, 90) = 90 + // result: [150, 60], total allocated = 110 + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Delta = 100n * DEFAULT_MEB; + const module2Delta = 10n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([150n * DEFAULT_MEB, 60n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Returns zero allocated array when deposit amount is zero", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + }; + + await setupModule(ctx, config); + + const result = await stakingRouter.getDepositAllocations(0n, false); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([0n]); + // newAllocations should reflect current allocation state (no deposited = 0) + expect(result.newAllocations).to.deep.equal([0n]); + }); + }); + + context("getDepositAllocations with _isTopUp = true (top-up deposits)", () => { + it("Returns empty arrays when there are no modules registered", async () => { + const result = await stakingRouter.getDepositAllocations(100n, true); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([]); + expect(result.newAllocations).to.deep.equal([]); + }); + + it("Returns all allocations to a single module if there is only one", async () => { + // For top-up 0x02 modules, capacity = activeValidators * maxEBType2 / maxEBType1 + // We need deposited validators with initial balance (32 ETH each) to create top-up room + const deposited = 10n; + const config = { + ...DEFAULT_CONFIG, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, // each validator at initial 32 ETH + }; + + await setupModule(ctx, config); + + // capacity_equiv = 10 * 2048/32 = 640, current_equiv = 10, room = 630 + const ethToDeposit = 631n * DEFAULT_MEB; + const moduleAllocation = 630n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([(deposited + 630n) * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation]); + }); + + it("Allocates evenly if target shares are equal and capacities allow for that", async () => { + const deposited = 1n; + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + // capacity_equiv = 1 * 2048/32 = 64, current_equiv = 1, room = 63 + const ethToDeposit = 50n * DEFAULT_MEB; + const moduleAllocation = 25n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation * 2n); + expect(result.newAllocations).to.deep.equal([(deposited + 25n) * DEFAULT_MEB, (deposited + 25n) * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation, moduleAllocation]); + }); + + it("Does not allocate to non-Active modules", async () => { + const deposited = 1n; + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); + + // Module 1: capacity_equiv = 1 * 2048/32 = 64, current_equiv = 1, room = 63 + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = deposited * 63n * DEFAULT_MEB; // all to module 1 since module 2 is paused + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([(deposited + 63n) * DEFAULT_MEB, deposited * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation, 0n]); + }); + + it("Allocates according to capacities at equal target shares", async () => { + // Module with more active validators has more top-up capacity + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited: 10n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 10n * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 2n * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Module 1: capacity_equiv = 10 * 2048/32 = 640, current_equiv = 10, room = 630 + // Module 2: capacity_equiv = 2 * 2048/32 = 128, current_equiv = 2, room = 126 + // + // cap1_raw = 10*64=640, cap2_raw = 2*64=128 + // total = 10+2+1000 = 1012, target = 506 each + // cap1 = min(506, 640)=506, cap2 = min(506, 128)=128 + // MinFirst: [10,2] caps [506,128] + // fill 2→10: +8, remaining 992 + // fill equally to 128: each +118, remaining 756 + // module 2 at cap, module 1 gets min(756, 506-128)=378 + // total = 8+236+378 = 622 + // module1 delta = 496, module2 delta = 126 + const ethToDeposit = 1000n * DEFAULT_MEB; + const module1Allocation = 496n * DEFAULT_MEB; + const module2Allocation = 126n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([506n * DEFAULT_MEB, 128n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates according to target shares", async () => { + // Same deposited count, different share limits → allocation driven by target shares + const deposited = 10n; + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 60_00n, + priorityExitShareThreshold: 60_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 40_00n, + priorityExitShareThreshold: 40_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // total = 10+10+80 = 100 + // target1 = 60, target2 = 40, cap_raw = 10*64=640 each + // cap1 = min(60,640)=60, cap2 = min(40,640)=40 + // MinFirst: [10,10] caps [60,40] + // fill equally to 40: each +30, remaining 20 + // module 2 at cap, module 1 gets 20 + // total = 80 + const ethToDeposit = 80n * DEFAULT_MEB; + const module1Allocation = 50n * DEFAULT_MEB; + const module2Allocation = 30n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([60n * DEFAULT_MEB, 40n * DEFAULT_MEB]); + }); + + it("Allocates with unlimited (100%) and 20% limited share modules for top-up", async () => { + const deposited = 10n; + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Each module: cap_raw = 10 * 2048/32 = 640 equiv validators + // Current: 10 equiv each + // totalValidators = 10 + 10 + 100 = 120 + // Module 1 target: (10000 * 120) / 10000 = 120, cap = min(120, 640) = 120 + // Module 2 target: (2000 * 120) / 10000 = 24, cap = min(24, 640) = 24 + // MinFirst: [10,10] caps [120,24] + // fill both to 24: cost 28, remaining 72 + // module 2 at cap, module 1 gets min(72, 96) = 72 + // result: [96, 24], total allocated = 100 + const ethToDeposit = 100n * DEFAULT_MEB; + const module1Delta = 86n * DEFAULT_MEB; + const module2Delta = 14n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([96n * DEFAULT_MEB, 24n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Unlimited module absorbs excess when 20% module has fewer active validators for top-up", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + deposited: 10n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 10n * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + deposited: 1n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 1n * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Module 1: cap_raw = 10 * 64 = 640, current = 10 + // Module 2: cap_raw = 1 * 64 = 64, current = 1 + // totalValidators = 10 + 1 + 600 = 611 + // Module 1 target: (10000 * 611) / 10000 = 611, cap = min(611, 640) = 611 + // Module 2 target: (2000 * 611) / 10000 = 122, cap = min(122, 64) = 64 + const ethToDeposit = 600n * DEFAULT_MEB; + const module1Delta = 537n * DEFAULT_MEB; + const module2Delta = 63n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([547n * DEFAULT_MEB, 64n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Returns zero allocated array when deposit amount is zero", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }; + + await setupModule(ctx, config); + + const result = await stakingRouter.getDepositAllocations(0n, true); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([0n]); + }); + }); + + context("multi-module top-up scenarios", () => { + // Module balances from SR accounting (wei) + const MODULE_1_BALANCE_GWEI = 960_006_155_190_000_000_000n / ONE_GWEI; // ~960.006 ETH ~ 31 validators + const MODULE_2_BALANCE_GWEI = 0n; + const MODULE_3_BALANCE_GWEI = 1_600_010_258_650_000_000_000n / ONE_GWEI; // ~1600.01 ETH ~ 51 validators + const MODULE_4_BALANCE_GWEI = 1_988_080_734_502_000_000_000n / ONE_GWEI; // ~1988.08 ETH ~ 63 validators + // in total 145 validators + + const BUFFER = 5_552_649_867_953_000_000_001n; // ~5552.65 ETH + + const sharesDefault = new Map(); + sharesDefault.set(1, { stakeShareLimit: 10000n, priorityExitShareThreshold: 10000n }); + sharesDefault.set(2, { stakeShareLimit: 400n, priorityExitShareThreshold: 10000n }); + sharesDefault.set(3, { stakeShareLimit: 2000n, priorityExitShareThreshold: 2500n }); + sharesDefault.set(4, { stakeShareLimit: 2000n, priorityExitShareThreshold: 2500n }); + + async function setupModules( + shares: Map = sharesDefault, + ) { + // Module 1: Curated (0x01, 100% share limit, 30 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(1)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(1)!.priorityExitShareThreshold, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 30n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_1_BALANCE_GWEI, + }); + + // Module 2: SimpleDVT (0x01, 4% share limit, 0 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(2)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(2)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 0n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_2_BALANCE_GWEI, + }); + + // Module 3: Community Staking (0x01, 20% share limit, 50 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(3)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(3)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + maxDepositsPerBlock: 30n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 50n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_3_BALANCE_GWEI, + }); + + // Module 4: curated-onchain-v2 (0x02, variable share limit, 25 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(4)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(4)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + maxDepositsPerBlock: 30n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + deposited: 25n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_4_BALANCE_GWEI, + totalModuleStake: MODULE_4_BALANCE_GWEI * ONE_GWEI, + }); + } + + it("Returns zero new allocation when 0x01 modules have no depositable keys and 0x02 module is at share limit", async () => { + await setupModules(); + + // allocations - is array containing new allocation per module + already allocated amount of Eth + // allocated - is a total new sum of deposits + // this test expect 0 new allocated Eth + const result = await stakingRouter.getDepositAllocations(BUFFER, true); + expect(result.totalAllocated).to.equal(0n, "totalAllocated should be 0 — no capacity in any modules"); + + // newAllocations array returns per-module new total allocations (including existing), + // verify it has an entry per module + expect(result.newAllocations.length).to.equal(4); + + const ETH32 = 32n * 10n ** 18n; + // for type2 modules: newAllocations[i] = ceilDiv(totalModuleStake, 32 ETH) * 32 ETH + const toValidatorETH = (balance: bigint) => ((balance + ETH32 - 1n) / ETH32) * ETH32; + + expect(result.newAllocations[0]).to.equal(30n * ETH32); + expect(result.newAllocations[1]).to.equal(0n); + expect(result.newAllocations[2]).to.equal(50n * ETH32); + expect(result.newAllocations[3]).to.equal(toValidatorETH(MODULE_4_BALANCE_GWEI * ONE_GWEI)); + + // all allocated deltas should be 0 + for (const a of result.allocated) { + expect(a).to.equal(0n); + } + }); + + it("Allocates to 0x02 module when buffer is large enough to push target above current allocation", async () => { + await setupModules(); + + // to make some top up in 4 module -> it should have 64 validators + // 64 * 32 = X * 32 * 20/100 -> X = 320 validators in total + // already have 143 validators (30 + 50 + 63) + // need 177 validators = 320 - 143 + // 177*32 = 5664 eth - minimum buffer + + const INCREASED_BUFFER = 5670n * 10n ** 18n; + + // Snapshot current state for comparison + const resultBefore = await stakingRouter.getDepositAllocations(BUFFER, true); + expect(resultBefore.totalAllocated).to.equal(0n, "sanity check: original buffer gives 0"); + + const result = await stakingRouter.getDepositAllocations(INCREASED_BUFFER, true); + + // Module 4 (0x02) now has capacity — new ETH is allocated + expect(result.totalAllocated).to.be.gt(32n, "totalAllocated should be > 0 with larger buffer"); + + // Modules 1-3 didn't change (0x01, no depositable keys — capacity == current) + expect(result.newAllocations[0]).to.equal(resultBefore.newAllocations[0], "module 1 unchanged"); + expect(result.newAllocations[1]).to.equal(resultBefore.newAllocations[1], "module 2 unchanged"); + expect(result.newAllocations[2]).to.equal(resultBefore.newAllocations[2], "module 3 unchanged"); + + // Module 4 grew + expect(result.newAllocations[3]).to.be.gt(resultBefore.newAllocations[3], "module 4 allocation increased"); + + // Delta for module 4 = newAllocation - currentAllocation = totalAllocated (since only module 4 grew) + const module4Delta = result.newAllocations[3] - resultBefore.newAllocations[3]; + expect(module4Delta).to.equal(result.totalAllocated, "all new allocation went to module 4"); + + // Verify allocated array reflects the same delta + expect(result.allocated[0]).to.equal(0n, "module 1 delta is 0"); + expect(result.allocated[1]).to.equal(0n, "module 2 delta is 0"); + expect(result.allocated[2]).to.equal(0n, "module 3 delta is 0"); + expect(result.allocated[3]).to.equal(module4Delta, "module 4 delta matches"); + }); + }); + + context("getDepositAllocations allocated (delta) array", () => { + it("Returns per-module deltas that sum to totalAllocated", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + + let allocatedSum = 0n; + for (const a of result.allocated) { + allocatedSum += a; + } + expect(allocatedSum).to.equal(result.totalAllocated); + }); + + it("Delta is zero for modules with no capacity", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 0n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.allocated.length).to.equal(2); + expect(result.allocated[0]).to.equal(module1Config.depositable * DEFAULT_MEB); + expect(result.allocated[1]).to.equal(0n); + }); + + it("Delta reflects newly allocated amount with pre-existing deposits", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + deposited: 100n, + }; + + await setupModule(ctx, config); + + const ethToDeposit = 50n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + + // allocated[0] is the delta (new allocation) + // newAllocations[0] includes existing validators + new + expect(result.allocated[0]).to.equal(result.totalAllocated); + expect(result.newAllocations[0]).to.be.equal(150n * DEFAULT_MEB); // 100 existing + 50 new = 150 total allocation after deposit + }); + + it("Returns per-module deltas that sum to totalAllocated for top-up", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + + let allocatedSum = 0n; + for (const a of result.allocated) { + allocatedSum += a; + } + expect(allocatedSum).to.equal(result.totalAllocated); + }); + }); +}); diff --git a/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts b/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts new file mode 100644 index 0000000000..9d78f6a744 --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts @@ -0,0 +1,233 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { AccountingOracle__MockForStakingRouter, LidoLocator, StakingRouter__Harness } from "typechain-types"; + +import { certainAddress, ether, randomAddress, randomBytes32, randomWCType1 } from "lib"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +describe("StakingRouter.sol:misc", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let stakingRouterAdmin: HardhatEthersSigner; + let user: HardhatEthersSigner; + let locator: LidoLocator; + let accountingOracle: AccountingOracle__MockForStakingRouter; + let stakingRouter: StakingRouter__Harness; + let impl: StakingRouter__Harness; + + let originalState: string; + + const lido = certainAddress("test:staking-router:lido"); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + const accounting = certainAddress("test:staking-router:accounting"); + const withdrawalCredentials = randomWCType1(); + + before(async () => { + [deployer, admin, stakingRouterAdmin, user] = await ethers.getSigners(); + + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, + accountingOracle, + }); + + ({ stakingRouter, impl } = await deployStakingRouter( + { deployer, admin, user }, + { + lidoLocator: locator, + }, + )); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("initialize", () => { + it("Reverts if admin is zero address", async () => { + await expect(stakingRouter.initialize(ZeroAddress, withdrawalCredentials)).to.be.revertedWithCustomError( + stakingRouter, + "ZeroAddress", + ); + }); + + it("Initializes the contract version, sets up roles and variables", async () => { + await expect(stakingRouter.initialize(stakingRouterAdmin.address, withdrawalCredentials)) + .to.emit(stakingRouter, "Initialized") + .withArgs(4) + .and.to.emit(stakingRouter, "RoleGranted") + .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address) + .and.to.emit(stakingRouter, "WithdrawalCredentialsSet") + .withArgs(withdrawalCredentials, user.address); + + expect(await stakingRouter.getContractVersion()).to.equal(4); + expect(await stakingRouter.LIDO_LOCATOR()).to.equal(locator); + expect(await stakingRouter.getWithdrawalCredentials()).to.equal(withdrawalCredentials); + + // fails with InvalidInitialization error when called after initialize + await expect(stakingRouter.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + context("finalizeUpgrade_v4()", () => { + let DEFAULT_ADMIN_ROLE: string; + let STAKING_MODULE_MANAGE_ROLE: string; + let REPORT_EXITED_VALIDATORS_ROLE: string; + let REPORT_REWARDS_MINTED_ROLE: string; + let MANAGE_WITHDRAWAL_CREDENTIALS_ROLE: string; + let STAKING_MODULE_UNVETTING_ROLE: string; + let REPORT_VALIDATOR_EXITING_STATUS_ROLE: string; + let REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE: string; + let UNSAFE_SET_EXITED_VALIDATORS_ROLE: string; + let roles: string[]; + + beforeEach(async () => { + // Simulate old 0.8.9 StakingRouter state (v3): + // sets WITHDRAWAL_CREDENTIALS_POSITION, LIDO_POSITION, LAST_STAKING_MODULE_ID_POSITION, + // STAKING_MODULES_COUNT_POSITION, CONTRACT_VERSION_POSITION + await stakingRouter.testing_initializeV3(); + + // simulate old OZ v4.4 AccessControl state: admin has DEFAULT_ADMIN_ROLE and STAKING_MODULE_MANAGE_ROLE + DEFAULT_ADMIN_ROLE = await stakingRouter.DEFAULT_ADMIN_ROLE(); + STAKING_MODULE_MANAGE_ROLE = await stakingRouter.STAKING_MODULE_MANAGE_ROLE(); + // AccountingOracle + REPORT_EXITED_VALIDATORS_ROLE = await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE(); + // Accounting + REPORT_REWARDS_MINTED_ROLE = await stakingRouter.REPORT_REWARDS_MINTED_ROLE(); + + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE(); + // DSM + STAKING_MODULE_UNVETTING_ROLE = await stakingRouter.STAKING_MODULE_UNVETTING_ROLE(); + // VEBO + REPORT_VALIDATOR_EXITING_STATUS_ROLE = await stakingRouter.REPORT_VALIDATOR_EXITING_STATUS_ROLE(); + // TW + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = await stakingRouter.REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE(); + UNSAFE_SET_EXITED_VALIDATORS_ROLE = await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE(); + + roles = [ + // DEFAULT_ADMIN_ROLE, + STAKING_MODULE_MANAGE_ROLE, + REPORT_EXITED_VALIDATORS_ROLE, + REPORT_REWARDS_MINTED_ROLE, + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE, + STAKING_MODULE_UNVETTING_ROLE, + REPORT_VALIDATOR_EXITING_STATUS_ROLE, + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE, + UNSAFE_SET_EXITED_VALIDATORS_ROLE, + ]; + + await stakingRouter.testing_grantRoleOld(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address); + await stakingRouter.testing_grantRoleOld(STAKING_MODULE_MANAGE_ROLE, stakingRouterAdmin.address); + await stakingRouter.testing_grantRoleOld(REPORT_EXITED_VALIDATORS_ROLE, accountingOracle); + await stakingRouter.testing_grantRoleOld(REPORT_REWARDS_MINTED_ROLE, accounting); + + // simulate oracle report + await accountingOracle.mock_setProcessingState(1, true, true); + }); + + it("fails with InvalidInitialization error when called on implementation", async () => { + await expect(impl.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + + it("sets correct contract version, withdrawal credentials and admin role", async () => { + // OZ Initializable slot is 0 before migration (old Versioned used a different slot) + expect(await stakingRouter.getContractVersion()).to.equal(0); + // but old Versioned slot has v3 + expect(await stakingRouter.testing_getOldContractVersion()).to.equal(3); + + await expect(stakingRouter.finalizeUpgrade_v4()) + .to.emit(stakingRouter, "Initialized") + .withArgs(4) + .and.to.emit(stakingRouter, "RoleGranted") + .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address); + + // new OZ version is set + expect(await stakingRouter.getContractVersion()).to.be.equal(4); + + // data migrated correctly + expect(await stakingRouter.getWithdrawalCredentials()).to.equal(await stakingRouter.WC_01_MOCK()); + expect(await stakingRouter.testing_getLastModuleId()).to.equal(await stakingRouter.LAST_STAKING_MODULE_ID_MOCK()); + + // admin role granted + expect(await stakingRouter.hasRole(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address)).to.be + .true; + }); + + it("cleans up old storage slots after migration", async () => { + await stakingRouter.finalizeUpgrade_v4(); + + // all old unstructured storage slots should be zeroed + expect(await stakingRouter.testing_getOldLidoPosition()).to.equal(ZeroAddress); + expect(await stakingRouter.testing_getOldWcPosition()).to.equal(ethers.ZeroHash); + expect(await stakingRouter.testing_getOldContractVersion()).to.equal(0); + expect(await stakingRouter.testing_getOldLastModuleIdPosition()).to.equal(0); + expect(await stakingRouter.testing_getOldModulesCountPosition()).to.equal(0); + }); + + it("migrate all defined AccessControl role and skip undefined", async () => { + const someAccount = randomAddress(); + const someNewRole = randomBytes32(); + + for (const role of roles) { + await stakingRouter.testing_grantRoleOld(role, someAccount); + } + // grant undefined role + await stakingRouter.testing_grantRoleOld(someNewRole, someAccount); + + // old slots are populated + for (const role of roles) { + expect(await stakingRouter.testing_hasRoleOld(role, someAccount)).to.be.true; + } + expect(await stakingRouter.testing_hasRoleOld(someNewRole, someAccount)).to.be.true; + + // but new OZ 5.2 hasRole() reads from a different ERC-7201 slot — roles are invisible + expect(await stakingRouter.hasRole(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address)).to.be.false; + for (const role of roles) { + expect(await stakingRouter.hasRole(role, someAccount)).to.be.false; + } + expect(await stakingRouter.hasRole(someNewRole, someAccount)).to.be.false; + + // migration writes DEFAULT_ADMIN_ROLE to the NEW slot, but does NOT touch old slots + await stakingRouter.finalizeUpgrade_v4(); + + // after migration: all roles should be reassigned + expect(await stakingRouter.hasRole(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address)).to.be.true; + for (const role of roles) { + expect(await stakingRouter.hasRole(role, someAccount)).to.be.true; + } + // undefined role is not migrated + expect(await stakingRouter.hasRole(someNewRole, someAccount)).to.be.false; + + // old AccessControl slots are NOT cleaned up (orphaned, inaccessible by new code) + for (const role of roles) { + expect(await stakingRouter.testing_hasRoleOld(role, someAccount)).to.be.true; + } + expect(await stakingRouter.testing_hasRoleOld(someNewRole, someAccount)).to.be.true; + }); + + it("cannot be called twice", async () => { + await stakingRouter.finalizeUpgrade_v4(); + await expect(stakingRouter.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + context("receive", () => { + it("Reverts", async () => { + await expect( + user.sendTransaction({ + to: stakingRouter, + value: ether("1.0"), + }), + ).to.be.revertedWithCustomError(stakingRouter, "DirectETHTransfer"); + }); + }); +}); diff --git a/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts b/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts new file mode 100644 index 0000000000..ef4cc3ceaf --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts @@ -0,0 +1,679 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { LidoLocator, StakingRouter } from "typechain-types"; + +import { certainAddress, getNextBlock, randomString, randomWCType1, WithdrawalCredentialsType } from "lib"; + +import { deployLidoLocator } from "test/deploy"; + +import { deployStakingRouter } from "../../deploy/stakingRouter"; + +const UINT64_MAX = 2n ** 64n - 1n; + +describe("StakingRouter.sol:module-management", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let user: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter; + + const withdrawalCredentials = randomWCType1(); + const lido = certainAddress("test:staking-router-modules:lido"); // mock lido address + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + + beforeEach(async () => { + [deployer, admin, user] = await ethers.getSigners(); + + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, + }); + + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); + + // initialize staking router + await stakingRouter.initialize(admin, withdrawalCredentials); + + // grant roles + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + }); + + context("addStakingModule", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router:staking-module"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + const stakingModuleConfig = { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + stakeShareLimit: STAKE_SHARE_LIMIT, + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + stakingModuleFee: MODULE_FEE, + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + treasuryFee: TREASURY_FEE, + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + /// @notice The type of withdrawal credentials for creation of validators. + /// @dev 1 = 0x01 withdrawals, 2 = 0x02 withdrawals. + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).addStakingModule(NAME, ADDRESS, stakingModuleConfig)) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if the target share is greater than 100%", async () => { + const STAKE_SHARE_LIMIT_OVER_100 = 100_01; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + stakeShareLimit: STAKE_SHARE_LIMIT_OVER_100, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the sum of module and treasury fees is greater than 100%", async () => { + const MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + stakingModuleFee: MODULE_FEE_INVALID, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + + const TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + treasuryFee: TREASURY_FEE_INVALID, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if the staking module address is zero address", async () => { + await expect( + stakingRouter.addStakingModule(NAME, ZeroAddress, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddress"); + }); + + it("Reverts if the staking module name is empty string", async () => { + const NAME_EMPTY_STRING = ""; + + await expect( + stakingRouter.addStakingModule(NAME_EMPTY_STRING, ADDRESS, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); + }); + + it("Reverts if the staking module name is too long", async () => { + const MAX_STAKING_MODULE_NAME_LENGTH = await stakingRouter.MAX_STAKING_MODULE_NAME_LENGTH(); + const NAME_TOO_LONG = randomString(Number(MAX_STAKING_MODULE_NAME_LENGTH + 1n)); + + await expect( + stakingRouter.addStakingModule(NAME_TOO_LONG, ADDRESS, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); + }); + + it("Reverts if the max number of staking modules is reached", async () => { + const MAX_STAKING_MODULES_COUNT = await stakingRouter.MAX_STAKING_MODULES_COUNT(); + + const moduleConfig = { + stakeShareLimit: 100, + priorityExitShareThreshold: 100, + stakingModuleFee: 100, + treasuryFee: 100, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + for (let i = 0; i < MAX_STAKING_MODULES_COUNT; i++) { + await stakingRouter.addStakingModule( + randomString(8), + certainAddress(`test:staking-router:staking-module-${i}`), + moduleConfig, + ); + } + + expect(await stakingRouter.getStakingModulesCount()).to.equal(MAX_STAKING_MODULES_COUNT); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModulesLimitExceeded", + ); + }); + + it("Reverts if adding a module with the same address", async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModuleAddressExists", + ); + }); + + it("Reverts if the module fee sum differs from existing modules", async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + + await expect( + stakingRouter.addStakingModule("StakingModule2", certainAddress("test:staking-router:staking-module-2"), { + ...stakingModuleConfig, + stakingModuleFee: MODULE_FEE + 1n, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Adds the module to stakingRouter and emits events", async () => { + const stakingModuleId = (await stakingRouter.getStakingModulesCount()) + 1n; + const moduleAddedBlock = await getNextBlock(); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)) + .to.be.emit(stakingRouter, "StakingRouterETHDeposited") + .withArgs(stakingModuleId, 0) + .and.to.be.emit(stakingRouter, "StakingModuleAdded") + .withArgs(stakingModuleId, ADDRESS, NAME, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(stakingModuleId, STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(stakingModuleId, MODULE_FEE, TREASURY_FEE, admin.address); + + expect(await stakingRouter.getStakingModule(stakingModuleId)).to.deep.equal([ + stakingModuleId, + ADDRESS, + MODULE_FEE, + TREASURY_FEE, + STAKE_SHARE_LIMIT, + 0n, // status active + NAME, + moduleAddedBlock.timestamp, + moduleAddedBlock.number, + 0n, // exited validators, + PRIORITY_EXIT_SHARE_THRESHOLD, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + WithdrawalCredentialsType.WC0x01, + 0, + ]); + }); + }); + + context("updateStakingModule", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router-modules:staking-module"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + let ID: bigint; + + const NEW_STAKE_SHARE_LIMIT = 2_00n; + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = NEW_STAKE_SHARE_LIMIT; + + const NEW_MODULE_FEE = 6_00n; + const NEW_TREASURY_FEE = 4_00n; + + const NEW_MAX_DEPOSITS_PER_BLOCK = 100n; + const NEW_MIN_DEPOSIT_BLOCK_DISTANCE = 20n; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + ID = await stakingRouter.getStakingModulesCount(); + }); + + it("Reverts if the caller does not have the role", async () => { + stakingRouter = stakingRouter.connect(user); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if the new target share is greater than 100%", async () => { + const NEW_STAKE_SHARE_LIMIT_OVER_100 = 100_01; + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT_OVER_100, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the new priority exit share is greater than 100%", async () => { + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new priority exit share is less than stake share limit", async () => { + const UPGRADED_STAKE_SHARE_LIMIT = 55_00n; + const UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00n; + await expect( + stakingRouter.updateStakingModule( + ID, + UPGRADED_STAKE_SHARE_LIMIT, + UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new deposit block distance is zero", async () => { + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + 0n, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); + }); + + it("Reverts if the new deposit block distance is great then uint64 max", async () => { + await stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + UINT64_MAX, + ); + + expect((await stakingRouter.getStakingModule(ID)).minDepositBlockDistance).to.be.equal(UINT64_MAX); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + UINT64_MAX + 1n, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); + }); + + it("Reverts if the new max deposits per block is great then uint64 max", async () => { + await stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + UINT64_MAX, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ); + + expect((await stakingRouter.getStakingModule(ID)).maxDepositsPerBlock).to.be.equal(UINT64_MAX); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + UINT64_MAX + 1n, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMaxDepositPerBlockValue"); + }); + + it("Reverts if the sum of the new module and treasury fees is greater than 100%", async () => { + const NEW_MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; + + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE_INVALID, + TREASURY_FEE, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + + const NEW_TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + MODULE_FEE, + NEW_TREASURY_FEE_INVALID, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if the new fee sum differs from other modules", async () => { + await stakingRouter.addStakingModule( + "StakingModule2", + certainAddress("test:staking-router-modules:staking-module-2"), + { + ...stakingModuleConfig, + }, + ); + + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + MODULE_FEE + 1n, + TREASURY_FEE, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Update target share, module and treasury fees and emits events", async () => { + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ) + .to.be.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(ID, NEW_MODULE_FEE, NEW_TREASURY_FEE, admin.address); + }); + }); + + context("updateModuleShares", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router-modules:staking-module-shares"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + let ID: bigint; + + const NEW_STAKE_SHARE_LIMIT = 2_00; + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = 3_00; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + ID = await stakingRouter.getStakingModulesCount(); + + // grant the STAKING_MODULE_SHARE_MANAGE_ROLE to admin + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_SHARE_MANAGE_ROLE(), admin); + }); + + it("Reverts if the caller does not have the role", async () => { + await expect( + stakingRouter.connect(user).updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD), + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_SHARE_MANAGE_ROLE()); + }); + + it("Reverts if the staking module id does not exist", async () => { + const NON_EXISTENT_MODULE_ID = 999; + await expect( + stakingRouter.updateModuleShares( + NON_EXISTENT_MODULE_ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + ), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleUnregistered"); + }); + + it("Reverts if the new stake share limit is greater than 100%", async () => { + const STAKE_SHARE_LIMIT_OVER_100 = 100_01; + await expect( + stakingRouter.updateModuleShares(ID, STAKE_SHARE_LIMIT_OVER_100, STAKE_SHARE_LIMIT_OVER_100), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the new priority exit share threshold is greater than 100%", async () => { + const PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; + await expect( + stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new priority exit share threshold is less than stake share limit", async () => { + const HIGHER_STAKE_SHARE_LIMIT = 55_00; + const LOWER_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00; + await expect( + stakingRouter.updateModuleShares(ID, HIGHER_STAKE_SHARE_LIMIT, LOWER_PRIORITY_EXIT_SHARE_THRESHOLD), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Updates share params and emits StakingModuleShareLimitSet event", async () => { + await expect(stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(NEW_STAKE_SHARE_LIMIT); + expect(moduleAfter.priorityExitShareThreshold).to.equal(NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + }); + + it("Does not modify other module params (fees, deposits config)", async () => { + const moduleBefore = await stakingRouter.getStakingModule(ID); + + await stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + + // share params should change + expect(moduleAfter.stakeShareLimit).to.equal(NEW_STAKE_SHARE_LIMIT); + expect(moduleAfter.priorityExitShareThreshold).to.equal(NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + + // other params should remain unchanged + expect(moduleAfter.stakingModuleFee).to.equal(moduleBefore.stakingModuleFee); + expect(moduleAfter.treasuryFee).to.equal(moduleBefore.treasuryFee); + expect(moduleAfter.stakingModuleAddress).to.equal(moduleBefore.stakingModuleAddress); + expect(moduleAfter.maxDepositsPerBlock).to.equal(moduleBefore.maxDepositsPerBlock); + expect(moduleAfter.minDepositBlockDistance).to.equal(moduleBefore.minDepositBlockDistance); + }); + + it("Allows setting stake share limit and priority exit share threshold to the same value", async () => { + const SAME_VALUE = 50_00; + await expect(stakingRouter.updateModuleShares(ID, SAME_VALUE, SAME_VALUE)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, SAME_VALUE, SAME_VALUE, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(SAME_VALUE); + expect(moduleAfter.priorityExitShareThreshold).to.equal(SAME_VALUE); + }); + + it("Allows setting both values to zero", async () => { + await expect(stakingRouter.updateModuleShares(ID, 0, 0)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, 0, 0, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(0); + expect(moduleAfter.priorityExitShareThreshold).to.equal(0); + }); + + it("Allows setting both values to 100%", async () => { + const MAX_BP = 100_00; + await expect(stakingRouter.updateModuleShares(ID, MAX_BP, MAX_BP)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, MAX_BP, MAX_BP, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(MAX_BP); + expect(moduleAfter.priorityExitShareThreshold).to.equal(MAX_BP); + }); + }); + + context("updateAllStakingModulesFees", () => { + const MODULE_ONE_NAME = "StakingModule1"; + const MODULE_TWO_NAME = "StakingModule2"; + const MODULE_ONE_ADDRESS = certainAddress("test:staking-router-modules:staking-module-batch-1"); + const MODULE_TWO_ADDRESS = certainAddress("test:staking-router-modules:staking-module-batch-2"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(MODULE_ONE_NAME, MODULE_ONE_ADDRESS, stakingModuleConfig); + await stakingRouter.addStakingModule(MODULE_TWO_NAME, MODULE_TWO_ADDRESS, stakingModuleConfig); + }); + + it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 3_00n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if batch arrays length differs from modules count", async () => { + await expect(stakingRouter.updateAllStakingModulesFees([6_00n], [4_00n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); + + await expect(stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); + }); + + it("Reverts if any fee sum is greater than 100%", async () => { + await expect( + stakingRouter.updateAllStakingModulesFees([100_01n, 7_00n], [0n, 3_00n]), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if fee sums differ inside the batch", async () => { + await expect( + stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 4_00n]), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Updates fees for all modules atomically and emits events", async () => { + await expect(stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 3_00n])) + .to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(1n, 6_00n, 4_00n, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(2n, 7_00n, 3_00n, admin.address); + + const moduleOne = await stakingRouter.getStakingModule(1n); + expect(moduleOne.stakingModuleFee).to.equal(6_00n); + expect(moduleOne.treasuryFee).to.equal(4_00n); + expect(moduleOne.stakeShareLimit).to.equal(STAKE_SHARE_LIMIT); + expect(moduleOne.maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + expect(moduleOne.minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); + + const moduleTwo = await stakingRouter.getStakingModule(2n); + expect(moduleTwo.stakingModuleFee).to.equal(7_00n); + expect(moduleTwo.treasuryFee).to.equal(3_00n); + expect(moduleTwo.stakeShareLimit).to.equal(STAKE_SHARE_LIMIT); + expect(moduleTwo.maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + expect(moduleTwo.minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); + }); + }); +}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts b/test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts similarity index 71% rename from test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts index 85a4a3015d..590cb486ae 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts @@ -1,29 +1,46 @@ import { bigintToHex, bufToHex } from "bigint-conversion"; import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + AccountingOracle__MockForStakingRouter, DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, StakingModule__MockForStakingRouter, - StakingRouter, + StakingRouter__Harness, } from "typechain-types"; +import { ValidatorsCountsCorrectionStruct } from "typechain-types/contracts/0.8.25/sr/StakingRouter"; -import { ether, getNextBlock, proxify } from "lib"; - +import { + ether, + getNextBlock, + impersonate, + randomString, + randomWCType1, + StakingModuleStatus, + wcTypeMaxEB, + WithdrawalCredentialsType, +} from "lib"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; import { Snapshot } from "test/suite"; describe("StakingRouter.sol:module-sync", () => { let deployer: HardhatEthersSigner; let admin: HardhatEthersSigner; let user: HardhatEthersSigner; - let lido: HardhatEthersSigner; + let dsmSigner: HardhatEthersSigner; - let stakingRouter: StakingRouter; + let stakingRouter: StakingRouter__Harness; let stakingModule: StakingModule__MockForStakingRouter; let depositContract: DepositContract__MockForBeaconChainDepositor; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let locator: LidoLocator; + let lidoMock: Lido__MockForStakingRouter; let moduleId: bigint; let stakingModuleAddress: string; @@ -34,34 +51,45 @@ describe("StakingRouter.sol:module-sync", () => { const name = "myStakingModule"; const stakingModuleFee = 5_00n; const treasuryFee = 5_00n; - const stakeShareLimit = 1_00n; - const priorityExitShareThreshold = 2_00n; + const stakeShareLimit = 100_00n; + const priorityExitShareThreshold = 100_00n; const maxDepositsPerBlock = 150n; const minDepositBlockDistance = 25n; + const withdrawalCredentials = randomWCType1(); + const topUpGateway = "0x0000000000000000000000000000000000000001"; + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + let originalState: string; before(async () => { - [deployer, admin, user, lido] = await ethers.getSigners(); + [deployer, admin, user] = await ethers.getSigners(); + + // Deploy Lido mock + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: lidoMock, + topUpGateway, + depositSecurityModule, + accountingOracle, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); + ({ stakingRouter, depositContract } = await deployStakingRouter( + { deployer, admin }, + { lidoLocator: locator, lido: lidoMock }, + )); - [stakingRouter] = await proxify({ impl, admin }); + // initialize staking router with Lido mock + await stakingRouter.initialize(admin, withdrawalCredentials); - // initialize staking router - await stakingRouter.initialize( - admin, - lido, - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + // Set staking router address on Lido mock so it can send ETH + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + + // Get DSM signer for deposit tests + dsmSigner = await impersonate(depositSecurityModule, ether("10.0")); // grant roles @@ -81,16 +109,17 @@ describe("StakingRouter.sol:module-sync", () => { lastDepositAt = timestamp; lastDepositBlock = number; - await stakingRouter.addStakingModule( - name, - stakingModuleAddress, + const stakingModuleConfig = { stakeShareLimit, priorityExitShareThreshold, stakingModuleFee, treasuryFee, maxDepositsPerBlock, minDepositBlockDistance, - ); + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + await stakingRouter.addStakingModule(name, stakingModuleAddress, stakingModuleConfig); moduleId = await stakingRouter.getStakingModulesCount(); }); @@ -114,24 +143,39 @@ describe("StakingRouter.sol:module-sync", () => { bigint, bigint, bigint, + number, + bigint, ]; // module mock state + const exitedValidators = 100n; + const depositedValidators = 1000n; + const depositableValidators = 200n; const stakingModuleSummary: Parameters = [ - 100n, // exitedValidators - 1000, // depositedValidators - 200, // depositableValidators + exitedValidators, // exitedValidators + depositedValidators, // depositedValidators + depositableValidators, // depositableValidators + ]; + + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + depositedValidators - exitedValidators, + ); + const stakingModuleAccounting: Parameters = [ + 0n, // moduleId + balance, // effectiveBalanceGwei + exitedValidators, // exitedValidators ]; const nodeOperatorSummary: Parameters = [ - 1, // targetLimitMode - 100n, // targetValidatorsCount + 0, // targetLimitMode + 0n, // targetValidatorsCount 0n, // stuckValidatorsCount 0n, // refundedValidatorsCount 0n, // stuckPenaltyEndTimestamp - 50, // totalExitedValidators - 1000n, // totalDepositedValidators - 200n, // depositableValidatorsCount + exitedValidators, // totalExitedValidators + depositedValidators, // totalDepositedValidators + depositableValidators, // depositableValidatorsCount ]; const nodeOperatorsCounts: Parameters = [ @@ -148,18 +192,22 @@ describe("StakingRouter.sol:module-sync", () => { stakingModuleFee, treasuryFee, stakeShareLimit, - Status.Active, + StakingModuleStatus.Active, name, lastDepositAt, lastDepositBlock, - 0n, // exitedValidatorsCount, + exitedValidators, priorityExitShareThreshold, maxDepositsPerBlock, minDepositBlockDistance, + WithdrawalCredentialsType.WC0x01, + balance, ]; // mocking module state await stakingModule.mock__getStakingModuleSummary(...stakingModuleSummary); + stakingModuleAccounting[0] = moduleId; + await stakingRouter.testing_setStakingModuleAccounting(...stakingModuleAccounting); await stakingModule.mock__getNodeOperatorSummary(...nodeOperatorSummary); await stakingModule.mock__nodeOperatorsCount(...nodeOperatorsCounts); await stakingModule.mock__getNodeOperatorIds(nodeOperatorsIds); @@ -289,10 +337,10 @@ describe("StakingRouter.sol:module-sync", () => { context("getStakingModuleActiveValidatorsCount", () => { it("Returns the number of active validators in the module", async () => { - const [exitedValidators, depositedValidators] = stakingModuleSummary; + const [exited, deposited] = stakingModuleSummary; expect(await stakingRouter.getStakingModuleActiveValidatorsCount(moduleId)).to.equal( - Number(depositedValidators) - Number(exitedValidators), + Number(deposited) - Number(exited), ); }); }); @@ -300,13 +348,19 @@ describe("StakingRouter.sol:module-sync", () => { context("setWithdrawalCredentials", () => { it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).setWithdrawalCredentials(randomWCType1())) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE()); + }); + + it("Reverts if withdrawal credentials are empty", async () => { await expect( - stakingRouter.connect(user).setWithdrawalCredentials(hexlify(randomBytes(32))), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE()); + stakingRouter.connect(admin).setWithdrawalCredentials(bigintToHex(0n, true, 32)), + ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddress"); }); it("Set new withdrawal credentials and informs modules", async () => { - const newWithdrawalCredentials = hexlify(randomBytes(32)); + const newWithdrawalCredentials = randomWCType1(); await expect(stakingRouter.setWithdrawalCredentials(newWithdrawalCredentials)) .to.emit(stakingRouter, "WithdrawalCredentialsSet") @@ -326,7 +380,7 @@ describe("StakingRouter.sol:module-sync", () => { "72657665727420726561736f6e00000000000000000000000000000000000000", ].join(""); - await expect(stakingRouter.setWithdrawalCredentials(hexlify(randomBytes(32)))) + await expect(stakingRouter.setWithdrawalCredentials(randomWCType1())) .to.emit(stakingRouter, "WithdrawalsCredentialsChangeFailed") .withArgs(moduleId, revertReasonEncoded); }); @@ -335,7 +389,7 @@ describe("StakingRouter.sol:module-sync", () => { const shouldRunOutOfGas = true; await stakingModule.mock__onWithdrawalCredentialsChanged(false, shouldRunOutOfGas); - await expect(stakingRouter.setWithdrawalCredentials(hexlify(randomBytes(32)))).to.be.revertedWithCustomError( + await expect(stakingRouter.setWithdrawalCredentials(randomWCType1())).to.be.revertedWithCustomError( stakingRouter, "UnrecoverableModuleError", ); @@ -352,7 +406,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .updateTargetValidatorsLimits(moduleId, NODE_OPERATOR_ID, TARGET_LIMIT_MODE, TARGET_LIMIT), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); }); it("Redirects the call to the staking module", async () => { @@ -366,15 +422,16 @@ describe("StakingRouter.sol:module-sync", () => { context("reportRewardsMinted", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).reportRewardsMinted([moduleId], [0n]), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_REWARDS_MINTED_ROLE()); + await expect(stakingRouter.connect(user).reportRewardsMinted([moduleId], [0n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_REWARDS_MINTED_ROLE()); }); it("Reverts if the arrays have different lengths", async () => { - await expect(stakingRouter.reportRewardsMinted([moduleId], [0n, 1n])) - .to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch") - .withArgs(1n, 2n); + await expect(stakingRouter.reportRewardsMinted([moduleId], [0n, 1n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); }); it("Does nothing if the total shares is 0", async () => { @@ -425,17 +482,60 @@ describe("StakingRouter.sol:module-sync", () => { }); }); + context("validateReportValidatorBalancesByStakingModule", () => { + it("reverts if the report does not include all registered modules", async () => { + const secondStakingModule = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await stakingRouter.addStakingModule(name + "-2", await secondStakingModule.getAddress(), { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + await expect( + stakingRouter.validateReportValidatorBalancesByStakingModule([moduleId], [1n]), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); + }); + + it("reverts if the report module ids are not in router order", async () => { + const secondStakingModule = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await stakingRouter.addStakingModule(name + "-2", await secondStakingModule.getAddress(), { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + const secondModuleId = await stakingRouter.getStakingModulesCount(); + + await expect(stakingRouter.validateReportValidatorBalancesByStakingModule([secondModuleId, moduleId], [1n, 2n])) + .to.be.revertedWithCustomError(stakingRouter, "UnexpectedModuleId") + .withArgs(moduleId, secondModuleId); + }); + + it("reverts if a reported balance exceeds the allowed gwei range", async () => { + await expect( + stakingRouter.validateReportValidatorBalancesByStakingModule([moduleId], [10n ** 27n]), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidAmountGwei"); + }); + }); + context("updateExitedValidatorsCountByStakingModule", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).updateExitedValidatorsCountByStakingModule([moduleId], [0n]), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + await expect(stakingRouter.connect(user).updateExitedValidatorsCountByStakingModule([moduleId], [0n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the array lengths are different", async () => { - await expect(stakingRouter.updateExitedValidatorsCountByStakingModule([moduleId], [0n, 1n])) - .to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch") - .withArgs(1n, 2n); + await expect( + stakingRouter.updateExitedValidatorsCountByStakingModule([moduleId], [0n, 1n]), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); }); it("Reverts if the new number of exited validators is less than the previous one", async () => { @@ -531,7 +631,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, NODE_OPERATOR_IDS, VALIDATORS_COUNTS), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the node operators ids are packed incorrectly", async () => { @@ -619,8 +721,8 @@ describe("StakingRouter.sol:module-sync", () => { }; const operatorSummary = { - targetLimitMode: 1, - targetValidatorsCount: 100n, + targetLimitMode: 0, + targetValidatorsCount: 0n, stuckValidatorsCount: 0n, refundedValidatorsCount: 0n, stuckPenaltyEndTimestamp: 0n, @@ -629,7 +731,7 @@ describe("StakingRouter.sol:module-sync", () => { depositableValidatorsCount: 1n, }; - const correction: StakingRouter.ValidatorsCountsCorrectionStruct = { + const correction: ValidatorsCountsCorrectionStruct = { currentModuleExitedValidatorsCount: moduleSummary.totalExitedValidators, currentNodeOperatorExitedValidatorsCount: operatorSummary.totalExitedValidators, newModuleExitedValidatorsCount: moduleSummary.totalExitedValidators, @@ -642,6 +744,11 @@ describe("StakingRouter.sol:module-sync", () => { moduleSummary.totalDepositedValidators, moduleSummary.depositableValidatorsCount, ); + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + moduleSummary.totalDepositedValidators - moduleSummary.totalExitedValidators, + ); + await stakingRouter.testing_setStakingModuleAccounting(moduleId, balance, moduleSummary.totalExitedValidators); const nodeOperatorSummary: Parameters = [ operatorSummary.targetLimitMode, @@ -662,7 +769,9 @@ describe("StakingRouter.sol:module-sync", () => { it("Reverts if the caller does not have the role", async () => { await expect( stakingRouter.connect(user).unsafeSetExitedValidatorsCount(moduleId, nodeOperatorId, true, correction), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the number of exited validators in the module does not match what is stored on the contract", async () => { @@ -731,9 +840,9 @@ describe("StakingRouter.sol:module-sync", () => { context("onValidatorsCountsByNodeOperatorReportingFinished", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).onValidatorsCountsByNodeOperatorReportingFinished(), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + await expect(stakingRouter.connect(user).onValidatorsCountsByNodeOperatorReportingFinished()) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Calls the hook on the staking module", async () => { @@ -789,7 +898,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .decreaseStakingModuleVettedKeysCountByNodeOperator(moduleId, NODE_OPERATOR_IDS, VETTED_KEYS_COUNTS), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_UNVETTING_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_UNVETTING_ROLE()); }); it("Reverts if the node operators ids are packed incorrectly", async () => { @@ -869,69 +980,105 @@ describe("StakingRouter.sol:module-sync", () => { context("deposit", () => { beforeEach(async () => { - stakingRouter = stakingRouter.connect(lido); - }); - - it("Reverts if the caller is not Lido", async () => { - await expect(stakingRouter.connect(user).deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( - stakingRouter, - "AppAuthLidoFailed", + // Set up Lido mock with depositable ether and fund it + const depositableAmount = ether("320.0"); // Enough for 10 deposits + await lidoMock.setDepositableEther(depositableAmount); + await lidoMock.fund({ value: depositableAmount }); + + // Set up staking module with depositable validators + await stakingModule.mock__getStakingModuleSummary(0n, 100n, 10n); // 10 depositable validators + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + 100n, // active validators ); + await stakingRouter.testing_setStakingModuleAccounting(moduleId, balance, 0); }); - it("Reverts if withdrawal credentials are not set", async () => { - await stakingRouter.connect(admin).setWithdrawalCredentials(bigintToHex(0n, true, 32)); - - await expect(stakingRouter.deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( + it("Reverts if the caller is not DSM", async () => { + await expect(stakingRouter.connect(user).deposit(moduleId, "0x")).to.be.revertedWithCustomError( stakingRouter, - "EmptyWithdrawalsCredentials", + "NotAuthorized", ); }); it("Reverts if the staking module is not active", async () => { - await stakingRouter.connect(admin).setStakingModuleStatus(moduleId, Status.DepositsPaused); + await stakingRouter.connect(admin).setStakingModuleStatus(moduleId, StakingModuleStatus.DepositsPaused); - await expect(stakingRouter.deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( stakingRouter, "StakingModuleNotActive", ); }); - it("Reverts if ether does correspond to the number of deposits", async () => { - const deposits = 2n; - const depositValue = ether("32.0"); - const correctAmount = deposits * depositValue; - const etherToSend = correctAmount + 1n; + it("Revert when 0 deposits", async () => { + // Set depositable ether to 0 + await lidoMock.setDepositableEther(0n); + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( + stakingRouter, + "ZeroDeposits", + ); + }); - await expect( - stakingRouter.deposit(deposits, moduleId, "0x", { - value: etherToSend, - }), - ) - .to.be.revertedWithCustomError(stakingRouter, "InvalidDepositsValue") - .withArgs(etherToSend, deposits); + it("Successfully deposits when depositable ether is available", async () => { + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.emit( + depositContract, + "Deposited__MockEvent", + ); }); - it("Does not submit 0 deposits", async () => { - await expect(stakingRouter.deposit(0n, moduleId, "0x")).not.to.emit(depositContract, "Deposited__MockEvent"); + it("Successfully deposits for module type 0x02 (New)", async () => { + const stakingRouterAsAdmin = stakingRouter.connect(admin); + + const newStakingModule = await ethers.deployContract("StakingModuleV2__MockForStakingRouter", deployer); + const newStakingModuleAddress = await newStakingModule.getAddress(); + const withdrawalCredentialsType = WithdrawalCredentialsType.WC0x02; + const stakingModuleConfigNew = { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType, + }; + + await stakingRouterAsAdmin.addStakingModule(`${name}-new`, newStakingModuleAddress, stakingModuleConfigNew); + + const newModuleId = await stakingRouter.getStakingModulesCount(); + + // Set up the new module with depositable validators + const exitedValidators = 0n; + const depositedValidators = 0n; + const depositableValidators = 10n; + await newStakingModule.mock__getStakingModuleSummary( + exitedValidators, + depositedValidators, + depositableValidators, + ); // 10 depositable validators + const validatorsBalanceGwei = _getBalanceByValidatorsCount(withdrawalCredentialsType, depositedValidators); + await stakingRouter.testing_setStakingModuleAccounting(newModuleId, validatorsBalanceGwei, exitedValidators); + + await expect(stakingRouter.connect(dsmSigner).deposit(newModuleId, "0x")).to.emit( + depositContract, + "Deposited__MockEvent", + ); }); - it("Reverts if ether does correspond to the number of deposits", async () => { - const deposits = 2n; - const depositValue = ether("32.0"); - const correctAmount = deposits * depositValue; + it("Reverts if module returns pubkeys with invalid length (not divisible by 48)", async () => { + // Mock the module to return pubkeys with invalid length (47 bytes instead of 48) + const invalidPubkeys = randomString(47); // Not divisible by PUBKEY_LENGTH (48) + const signatures = randomString(96); // Valid signature length - await expect( - stakingRouter.deposit(deposits, moduleId, "0x", { - value: correctAmount, - }), - ).to.emit(depositContract, "Deposited__MockEvent"); + await stakingModule.mock__obtainDepositData(invalidPubkeys, signatures); + + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( + stakingRouter, + "WrongPubkeyLength", + ); }); }); }); -enum Status { - Active, - DepositsPaused, - Stopped, +function _getBalanceByValidatorsCount(wcType: WithdrawalCredentialsType, validatorsCount: bigint): bigint { + return (validatorsCount * wcTypeMaxEB(wcType)) / 1_000_000_000n; // in gwei } diff --git a/test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts b/test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts similarity index 63% rename from test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts index 04a60586c0..45099066c8 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts @@ -1,59 +1,62 @@ import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { StakingModule__MockForStakingRouter, StakingRouter } from "typechain-types"; +import { LidoLocator, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, ether, proxify } from "lib"; -import { TOTAL_BASIS_POINTS } from "lib/constants"; +import { certainAddress, ether, randomWCType1 } from "lib"; +import { StakingModuleStatus, WithdrawalCredentialsType } from "lib/constants"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; + +import { CtxConfig, DEFAULT_CONFIG, setupModule } from "./helpers"; + describe("StakingRouter.sol:rewards", () => { let deployer: HardhatEthersSigner; let admin: HardhatEthersSigner; - let stakingRouter: StakingRouter; + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; let originalState: string; + let ctx: CtxConfig; + const DEPOSIT_VALUE = ether("32.0"); - const DEFAULT_CONFIG: ModuleConfig = { - stakeShareLimit: TOTAL_BASIS_POINTS, - priorityExitShareThreshold: TOTAL_BASIS_POINTS, - moduleFee: 5_00n, - treasuryFee: 5_00n, - maxDepositsPerBlock: 150n, - minDepositBlockDistance: 25n, - }; + + const withdrawalCredentials = randomWCType1(); + const lido = certainAddress("test:staking-router-modules:lido"); // mock lido address + + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); before(async () => { [deployer, admin] = await ethers.getSigners(); - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin }); + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); // initialize staking router - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-modules:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + await stakingRouter.initialize(admin, withdrawalCredentials); // grant roles await Promise.all([stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin)]); + + ctx = { + deployer, + admin, + stakingRouter, + }; }); beforeEach(async () => (originalState = await Snapshot.take())); @@ -76,110 +79,61 @@ describe("StakingRouter.sol:rewards", () => { depositable: 100n, }; - const [, id] = await setupModule(config); + const [, id] = await setupModule(ctx, config); expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, maxDeposits * DEPOSIT_VALUE)).to.equal( config.depositable, ); }); - it("Returns even allocation between modules if target shares are equal and capacities allow for that", async () => { - const maxDeposits = 200n; + it("Returns the maximum allocation to a single module based on the value and module capacity for new module", async () => { + const maxDeposits = 150n; const config = { ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - depositable: 50n, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, }; - const [, id1] = await setupModule(config); - const [, id2] = await setupModule(config); + const [, id] = await setupModule(ctx, config); - expect(await stakingRouter.getStakingModuleMaxDepositsCount(id1, maxDeposits * DEPOSIT_VALUE)).to.equal( - config.depositable, - ); - expect(await stakingRouter.getStakingModuleMaxDepositsCount(id2, maxDeposits * DEPOSIT_VALUE)).to.equal( + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, maxDeposits * DEPOSIT_VALUE)).to.equal( config.depositable, ); }); - }); - - context("getDepositsAllocation", () => { - it("Returns 0 allocated and empty allocations when there are no modules registered", async () => { - expect(await stakingRouter.getDepositsAllocation(100n)).to.deep.equal([0, []]); - }); - - it("Returns all allocations to a single module if there is only one", async () => { - const config = { - ...DEFAULT_CONFIG, - depositable: 100n, - }; - - await setupModule(config); - expect(await stakingRouter.getDepositsAllocation(150n)).to.deep.equal([config.depositable, [config.depositable]]); - }); + it("Returns the maximum allocation based on the value and module capacity if one module on pause", async () => { + const depositableEther = ether("32") * 100n + 10n; - it("Allocates evenly if target shares are equal and capacities allow for that", async () => { const config = { ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, - depositable: 50n, + depositable: 150n, }; - await setupModule(config); - await setupModule(config); + const [, id] = await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([ - config.depositable * 2n, - [config.depositable, config.depositable], - ]); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, depositableEther)).to.equal(100n); }); - it("Allocates according to capacities at equal target shares", async () => { - const module1Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, - depositable: 100n, - }; + it("Returns even allocation between modules if target shares are equal and capacities allow for that", async () => { + const maxDeposits = 200n; - const module2Config = { + const config = { ...DEFAULT_CONFIG, stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, depositable: 50n, }; - await setupModule(module1Config); - await setupModule(module2Config); - - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([ - module1Config.depositable + module2Config.depositable, - [module1Config.depositable, module2Config.depositable], - ]); - }); - - it("Allocates according to target shares", async () => { - const module1Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 60_00n, - priorityExitShareThreshold: 60_00n, - depositable: 100n, - }; - - const module2Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 40_00n, - priorityExitShareThreshold: 40_00n, - depositable: 100n, - }; - - await setupModule(module1Config); - await setupModule(module2Config); + const [, id1] = await setupModule(ctx, config); + const [, id2] = await setupModule(ctx, config); - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([180n, [100n, 80n]]); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id1, maxDeposits * DEPOSIT_VALUE)).to.equal( + config.depositable, + ); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id2, maxDeposits * DEPOSIT_VALUE)).to.equal( + config.depositable, + ); }); }); @@ -195,7 +149,7 @@ describe("StakingRouter.sol:rewards", () => { }); it("Returns empty values if there are modules but no active validators", async () => { - await setupModule(DEFAULT_CONFIG); + await setupModule(ctx, DEFAULT_CONFIG); expect(await stakingRouter.getStakingRewardsDistribution()).to.deep.equal([ [], @@ -212,7 +166,7 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module, id] = await setupModule(config); + const [module, id] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -235,8 +189,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module1, id1] = await setupModule(config); - const [module2, id2] = await setupModule(config); + const [module1, id1] = await setupModule(ctx, config); + const [module2, id2] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -270,8 +224,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 0n, }; - const [module1, id1] = await setupModule(module1Config); - await setupModule(module2Config); + const [module1, id1] = await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -293,10 +247,10 @@ describe("StakingRouter.sol:rewards", () => { const config = { ...DEFAULT_CONFIG, deposited: 1000n, - status: Status.Stopped, + status: StakingModuleStatus.Stopped, }; - const [module, id] = await setupModule(config); + const [module, id] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -330,8 +284,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module1, id1] = await setupModule(module1Config); - const [module2, id2] = await setupModule(module2Config); + const [module1, id1] = await setupModule(ctx, module1Config); + const [module2, id2] = await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -381,8 +335,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); - await setupModule(module2Config); + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); @@ -418,8 +372,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); - await setupModule(module2Config); + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); expect(await stakingRouter.getStakingFeeAggregateDistributionE4Precision()).to.deep.equal([500n, 500n]); }); @@ -440,68 +394,9 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); + await setupModule(ctx, module1Config); expect(await stakingRouter.getTotalFeeE4Precision()).to.equal(10_00n); }); }); - - async function setupModule({ - stakeShareLimit, - priorityExitShareThreshold, - moduleFee, - treasuryFee, - maxDepositsPerBlock, - minDepositBlockDistance, - exited = 0n, - deposited = 0n, - depositable = 0n, - status = Status.Active, - }: ModuleConfig): Promise<[StakingModule__MockForStakingRouter, bigint]> { - const modulesCount = await stakingRouter.getStakingModulesCount(); - const module = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); - - await stakingRouter - .connect(admin) - .addStakingModule( - randomBytes(8).toString(), - await module.getAddress(), - stakeShareLimit, - priorityExitShareThreshold, - moduleFee, - treasuryFee, - maxDepositsPerBlock, - minDepositBlockDistance, - ); - - const moduleId = modulesCount + 1n; - expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount + 1n); - - await module.mock__getStakingModuleSummary(exited, deposited, depositable); - - if (status != Status.Active) { - await stakingRouter.setStakingModuleStatus(moduleId, status); - } - - return [module, moduleId]; - } }); - -enum Status { - Active, - DepositsPaused, - Stopped, -} - -interface ModuleConfig { - stakeShareLimit: bigint; - priorityExitShareThreshold: bigint; - moduleFee: bigint; - treasuryFee: bigint; - maxDepositsPerBlock: bigint; - minDepositBlockDistance: bigint; - exited?: bigint; - deposited?: bigint; - depositable?: bigint; - status?: Status; -} diff --git a/test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts b/test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts similarity index 75% rename from test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts index a023e4410a..220b596028 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts @@ -1,16 +1,16 @@ import { expect } from "chai"; -import { randomBytes } from "crypto"; -import { hexlify } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { StakingRouter__Harness } from "typechain-types"; +import { LidoLocator, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, proxify } from "lib"; +import { certainAddress, randomWCType1, WithdrawalCredentialsType } from "lib"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; enum Status { Active, DepositsPaused, @@ -22,46 +22,49 @@ context("StakingRouter.sol:status-control", () => { let admin: HardhatEthersSigner; let user: HardhatEthersSigner; + let locator: LidoLocator; let stakingRouter: StakingRouter__Harness; let moduleId: bigint; let originalState: string; + const lido = certainAddress("test:staking-router-status:lido"); + const withdrawalCredentials = randomWCType1(); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + before(async () => { [deployer, admin, user] = await ethers.getSigners(); - // deploy staking router - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); + // deploy staking router + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); - [stakingRouter] = await proxify({ impl, admin }); - - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-status:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + await stakingRouter.initialize(admin, withdrawalCredentials); // give the necessary role to the admin await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + const stakingModuleConfig = { + stakeShareLimit: 1_00, + priorityExitShareThreshold: 1_00, + stakingModuleFee: 5_00, + treasuryFee: 5_00, + maxDepositsPerBlock: 150, + minDepositBlockDistance: 25, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + // add staking module await stakingRouter.addStakingModule( "myStakingModule", certainAddress("test:staking-router-status:staking-module"), // mock staking module address - 1_00, // target share - 1_00, // target share - 5_00, // module fee - 5_00, // treasury fee - 150, // max deposits per block - 25, // min deposit block distance + stakingModuleConfig, ); moduleId = await stakingRouter.getStakingModulesCount(); @@ -73,9 +76,9 @@ context("StakingRouter.sol:status-control", () => { context("setStakingModuleStatus", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).setStakingModuleStatus(moduleId, Status.DepositsPaused), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + await expect(stakingRouter.connect(user).setStakingModuleStatus(moduleId, Status.DepositsPaused)) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); }); it("Reverts if the new status is the same", async () => { diff --git a/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts b/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts new file mode 100644 index 0000000000..6c26c407ee --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts @@ -0,0 +1,327 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + AccountingOracle__MockForStakingRouter, + DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, + StakingModuleV2__MockForStakingRouter, + StakingRouter__Harness, +} from "typechain-types"; + +import { findEventsWithInterfaces, randomString, randomWCType1, wcTypeMaxEB } from "lib"; +import { ONE_GWEI, WithdrawalCredentialsType } from "lib/constants"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { CtxConfig, DEFAULT_CONFIG, setupModule } from "./helpers"; + +describe("StakingRouter.sol:topUp", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let topUpGatewaySigner: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; + let depositContract: DepositContract__MockForBeaconChainDepositor; + let lidoMock: Lido__MockForStakingRouter; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let originalState: string; + + let ctx: CtxConfig; + + const NEW_MEB = wcTypeMaxEB(WithdrawalCredentialsType.WC0x02); + const WEI_PER_GWEI = 1_000_000_000n; + const withdrawalCredentials = randomWCType1(); + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + + before(async () => { + [deployer, admin, topUpGatewaySigner, stranger] = await ethers.getSigners(); + // Deploy Lido mock + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + + // deploy oracle + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: lidoMock, + topUpGateway: await topUpGatewaySigner.getAddress(), + depositSecurityModule, + accountingOracle, + }); + + // deploy staking router + ({ stakingRouter, depositContract } = await deployStakingRouter( + { deployer, admin }, + { lidoLocator: locator, lido: lidoMock }, + )); + + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + + // initialize staking router with the mock lido and topUpGateway as a signer + await stakingRouter.initialize(admin, withdrawalCredentials); + + // grant roles + await Promise.all([stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin)]); + + ctx = { + deployer, + admin, + stakingRouter, + }; + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + context("topUp", () => { + const KEY_INDEX = 0n; + const OPERATOR_ID = 1n; + const TOP_UP_LIMIT_GWEI = 10n * ONE_GWEI; // 10 ETH in ONE_GWEI + + function makeValidTopUpData() { + const keyIndices = [KEY_INDEX]; + const operatorIds = [OPERATOR_ID]; + // topUpLimits are now in wei (TOP_UP_LIMIT_GWEI is already 10 ETH in gwei, convert to wei) + const topUpLimits = [TOP_UP_LIMIT_GWEI * WEI_PER_GWEI]; + const pubkeys = [randomString(48)]; + + return { keyIndices, operatorIds, topUpLimits, pubkeys }; + } + + it("Reverts if caller is not TopUpGateway", async () => { + const config = { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }; + + const [, id] = await setupModule(ctx, config); + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(stranger).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "NotAuthorized"); + }); + + it("Reverts if the module does not exist", async () => { + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(1n, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleUnregistered"); + }); + + it("Reverts if the module is Legacy (top-ups only supported for 0x02)", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "WrongWithdrawalCredentialsType"); + }); + + it("Reverts if keyIndices array is empty", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const keyIndices: bigint[] = []; + const operatorIds: bigint[] = []; + const topUpLimits: bigint[] = []; + const pubkeys: string[] = []; + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "EmptyKeysList"); + }); + + it("Reverts if pubkeys array length doesn't match keyIndices count", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const keyIndices = [0n, 1n]; + const operatorIds = [0n, 0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI, 20n * ONE_GWEI * WEI_PER_GWEI]; // in wei + const pubkeys = [randomString(48)]; // Only 1 key, but 2 expected + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); + }); + + it("Does not perform deposits when module allocation is 0", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + depositable: 0n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + // Set depositable ether to 0 (no ETH available) + await lidoMock.setDepositableEther(0n); + + const pubkeys = [randomString(48)]; + // Mock module returns 0 allocations + await stakingModule.mock__setTopUpDepositData([0n]); + + const keyIndices = [0n]; + const operatorIds = [0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI]; // in wei + + const tx = await stakingRouter + .connect(topUpGatewaySigner) + .topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(0); + }); + + it("Performs top-up for a New module for all keys", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + deposited: 100n, + validatorsBalanceGwei: 100n * 32n * 10n ** 9n, //100 x 32 eth / 1 gwei + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + // topUpLimits are now in wei + const topUpWei = [ + 10n * 10n ** 18n, // 10 ETH in wei + 20n * 10n ** 18n, // 20 ETH in wei + 30n * 10n ** 18n, // 30 ETH in wei + ]; + + const pubkeys = [randomString(48), randomString(48), randomString(48)]; + + // Mock module to return these allocations (in wei) + await stakingModule.mock__setTopUpDepositData(topUpWei); + + const totalTopUpWei = topUpWei.reduce((acc, v) => acc + v, 0n); + + // Set depositable ether in lido mock + await lidoMock.setDepositableEther(100n * NEW_MEB); + // Fund lido mock with ETH + await lidoMock.fund({ value: totalTopUpWei }); + + const keyIndices = [0n, 1n, 2n]; + const operatorIds = [0n, 0n, 0n]; + + const tx = await stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(topUpWei.length); + }); + + it("Reverts when allocation exceeds module's target", async () => { + const [stakingModule, id] = (await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, // 50% + priorityExitShareThreshold: 50_00n, + depositable: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + })) as [StakingModuleV2__MockForStakingRouter, bigint]; + + // Add second module to split allocation + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const depositableEth = 2n * NEW_MEB; + + // Mock module returns allocations that exceed target (in wei) + const pubkeys = [randomString(48), randomString(48)]; + // These allocations will exceed 50% of depositableEth + const topUpWei = [1500n * ONE_GWEI * WEI_PER_GWEI, 1500n * ONE_GWEI * WEI_PER_GWEI]; // 3000 ETH total, but module only gets 50% = 2048 ETH + await stakingModule.mock__setTopUpDepositData(topUpWei); + + await lidoMock.setDepositableEther(depositableEth); + + const keyIndices = [0n, 1n]; + const operatorIds = [0n, 0n]; + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei), + ).to.be.revertedWithCustomError(stakingRouter, "ModuleReturnExceedTarget"); + }); + + it("Reverts when top up amount for key is below 1 ETH", async () => { + const reducedBalanceGwei = (100n * NEW_MEB - 64n * 10n ** 18n) / 10n ** 9n; + + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + deposited: 100n, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: reducedBalanceGwei, + }); + + const pubkeys = [randomString(48)]; + const topUpWei = [500_000_000n * WEI_PER_GWEI]; // 0.5 ETH in wei + await stakingModule.mock__setTopUpDepositData(topUpWei); + + const depositableEth = 100n * NEW_MEB; + await lidoMock.setDepositableEther(depositableEth); + await lidoMock.fund({ value: depositableEth }); + + const keyIndices = [0n]; + const operatorIds = [0n]; + + const beaconChainDepositor = await ethers.getContractFactory("BeaconChainDepositor"); + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei), + ).to.be.revertedWithCustomError(beaconChainDepositor, "DepositAmountTooLow"); + }); + + it("Zero allocations from module result in no deposits", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const pubkeys = [randomString(48)]; + // Mock module returns 0 allocation + await stakingModule.mock__setTopUpDepositData([0n]); + + await lidoMock.setDepositableEther(100n * NEW_MEB); + + const keyIndices = [0n]; + const operatorIds = [0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI]; // in wei + + const tx = await stakingRouter + .connect(topUpGatewaySigner) + .topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/topUpGateway/topUpGateway.test.ts b/test/0.8.25/topUpGateway/topUpGateway.test.ts new file mode 100644 index 0000000000..4e1320582f --- /dev/null +++ b/test/0.8.25/topUpGateway/topUpGateway.test.ts @@ -0,0 +1,608 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { time } from "@nomicfoundation/hardhat-network-helpers"; + +import type { TopUpGateway__Harness } from "typechain-types"; +import { Lido__MockForTopUpGateway, LidoLocator, StakingRouter__MockForTopUpGateway } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { deployLidoLocator } from "test/deploy"; +import { Snapshot } from "test/suite"; + +describe("TopUpGateway.sol", () => { + let admin: HardhatEthersSigner; + let topUpOperator: HardhatEthersSigner; + let limitsManager: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + let lido: Lido__MockForTopUpGateway; + let locator: LidoLocator; + let stakingRouter: StakingRouter__MockForTopUpGateway; + let topUpGateway: TopUpGateway__Harness; + + let snapshot: string; + let topUpRole: string; + let manageLimitsRole: string; + + const MODULE_ID = 1n; + const FAR_FUTURE_EPOCH = (1n << 64n) - 1n; + const SAMPLE_PUBKEY = `0x${"11".repeat(48)}`; + const DEFAULT_MAX_VALIDATORS = 5n; + const DEFAULT_MIN_BLOCK_DISTANCE = 1n; + const DEFAULT_MAX_ROOT_AGE = 300n; + const G_INDEX = ethers.zeroPadValue("0x01", 32); + const ZERO_BYTES_31 = "00".repeat(31); + const WC_TYPE_02 = `0x02${ZERO_BYTES_31}`; + const WC_TYPE_01 = `0x01${ZERO_BYTES_31}`; + // Mainnet-like values: targetBalance = 2046.75 ETH, minTopUp = 1 ETH + const DEFAULT_TARGET_BALANCE_GWEI = 204675n * 10n ** 7n; // 2046.75 ETH in Gwei + const DEFAULT_MIN_TOP_UP_GWEI = 1n * 10n ** 9n; // 1 ETH in Gwei + const SLOTS_PER_EPOCH = 32n; + + type TopUpData = { + moduleId: bigint; + keyIndices: bigint[]; + operatorIds: bigint[]; + validatorIndices: bigint[]; + beaconRootData: { + childBlockTimestamp: bigint; + slot: bigint; + proposerIndex: bigint; + }; + validatorWitness: Array<{ + proofValidator: string[]; + pubkey: string; + effectiveBalance: bigint; + slashed: boolean; + activationEligibilityEpoch: bigint; + activationEpoch: bigint; + exitEpoch: bigint; + withdrawableEpoch: bigint; + }>; + pendingBalanceGwei: bigint[]; + }; + + beforeEach(async () => { + [admin, topUpOperator, limitsManager, stranger] = await ethers.getSigners(); + snapshot = await Snapshot.take(); + lido = await ethers.deployContract("Lido__MockForTopUpGateway"); + stakingRouter = await ethers.deployContract("StakingRouter__MockForTopUpGateway"); + locator = await deployLidoLocator({ + stakingRouter: await stakingRouter.getAddress(), + lido: await lido.getAddress(), + }); + + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + + [topUpGateway] = await proxify({ impl, admin }); + + await topUpGateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ); + + topUpRole = await topUpGateway.TOP_UP_ROLE(); + manageLimitsRole = await topUpGateway.MANAGE_LIMITS_ROLE(); + await topUpGateway.grantRole(topUpRole, topUpOperator.address); + await topUpGateway.grantRole(manageLimitsRole, limitsManager.address); + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_02); + }); + + afterEach(async () => { + await Snapshot.restore(snapshot); + }); + + const buildTopUpData = async (): Promise => { + const timestamp = BigInt(await time.latest()); + + return { + moduleId: MODULE_ID, + keyIndices: [1n], + operatorIds: [1n], + validatorIndices: [1n], + beaconRootData: { + childBlockTimestamp: timestamp, + slot: 123n, + proposerIndex: 1n, + }, + validatorWitness: [ + { + proofValidator: [], + pubkey: SAMPLE_PUBKEY, + effectiveBalance: 32n * 10n ** 9n, + slashed: false, + activationEligibilityEpoch: 0n, + activationEpoch: 0n, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }, + ], + pendingBalanceGwei: [0n], + }; + }; + + describe("initialize", () => { + it("initializes config and roles", async () => { + expect(await topUpGateway.getMaxValidatorsPerTopUp()).to.equal(DEFAULT_MAX_VALIDATORS); + expect(await topUpGateway.getMinBlockDistance()).to.equal(DEFAULT_MIN_BLOCK_DISTANCE); + expect(await topUpGateway.getLastTopUpTimestamp()).to.equal(0n); + expect(await topUpGateway.hasRole(await topUpGateway.DEFAULT_ADMIN_ROLE(), admin.address)).to.be.true; + expect(await topUpGateway.hasRole(topUpRole, admin.address)).to.be.false; + expect(await topUpGateway.harness_getLocator()).to.equal(await locator.getAddress()); + }); + + it("reverts on double initialization", async () => { + await expect( + topUpGateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(topUpGateway, "InvalidInitialization"); + }); + + it("reverts when maxValidatorsPerTopUp is zero", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + const [gateway] = await proxify({ impl, admin }); + await expect( + gateway.initialize( + admin.address, + 0n, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(gateway, "ZeroValue"); + }); + + it("reverts when minBlockDistance is zero", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + const [gateway] = await proxify({ impl, admin }); + await expect( + gateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + 0n, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(gateway, "ZeroValue"); + }); + + it("reverts when admin is zero address", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + const [gateway] = await proxify({ impl, admin }); + await expect( + gateway.initialize( + ethers.ZeroAddress, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ) + .to.be.revertedWithCustomError(gateway, "ZeroArgument") + .withArgs("_admin"); + }); + + it("reverts when lidoLocator is zero address (constructor)", async () => { + await expect( + ethers.deployContract("TopUpGateway__Harness", [ethers.ZeroAddress, G_INDEX, G_INDEX, 0, SLOTS_PER_EPOCH]), + ) + .to.be.revertedWithCustomError(await ethers.getContractFactory("TopUpGateway__Harness"), "ZeroArgument") + .withArgs("_lidoLocator"); + }); + + it("reverts when calling initialize on the implementation directly", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + await expect( + impl.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + describe("limits management", () => { + it("allows manage limits role to set the max validators per top up", async () => { + const newLimit = DEFAULT_MAX_VALIDATORS + 1n; + await expect(topUpGateway.connect(limitsManager).setMaxValidatorsPerTopUp(newLimit)) + .to.emit(topUpGateway, "MaxValidatorsPerTopUpChanged") + .withArgs(newLimit); + expect(await topUpGateway.getMaxValidatorsPerTopUp()).to.equal(newLimit); + }); + + it("reverts when non-manager tries to set the max validators per top up", async () => { + await expect(topUpGateway.connect(stranger).setMaxValidatorsPerTopUp(DEFAULT_MAX_VALIDATORS + 1n)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("allows manage limits role to set the min block distance", async () => { + const newDistance = DEFAULT_MIN_BLOCK_DISTANCE + 10n; + await expect(topUpGateway.connect(limitsManager).setMinBlockDistance(newDistance)) + .to.emit(topUpGateway, "MinBlockDistanceChanged") + .withArgs(newDistance); + expect(await topUpGateway.getMinBlockDistance()).to.equal(newDistance); + }); + + it("reverts when non-manager tries to set the min block distance", async () => { + await expect(topUpGateway.connect(stranger).setMinBlockDistance(DEFAULT_MIN_BLOCK_DISTANCE + 10n)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("allows manage limits role to set top-up balance limits", async () => { + const newTarget = DEFAULT_TARGET_BALANCE_GWEI + 10n ** 9n; + const newMinTopUp = DEFAULT_MIN_TOP_UP_GWEI + 10n ** 8n; + await expect(topUpGateway.connect(limitsManager).setTopUpBalanceLimits(newTarget, newMinTopUp)) + .to.emit(topUpGateway, "TopUpBalanceLimitsChanged") + .withArgs(newTarget, newMinTopUp); + expect(await topUpGateway.getTargetBalanceGwei()).to.equal(newTarget); + expect(await topUpGateway.getMinTopUpGwei()).to.equal(newMinTopUp); + }); + + it("reverts when non-manager tries to set top-up balance limits", async () => { + await expect( + topUpGateway.connect(stranger).setTopUpBalanceLimits(DEFAULT_TARGET_BALANCE_GWEI, DEFAULT_MIN_TOP_UP_GWEI), + ) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("reverts when minTopUp exceeds targetBalance", async () => { + await expect(topUpGateway.connect(limitsManager).setTopUpBalanceLimits(100n, 200n)).to.be.revertedWithCustomError( + topUpGateway, + "MinTopUpExceedsTarget", + ); + }); + }); + + describe("topUp", () => { + it("reverts when caller lacks the role", async () => { + const data = await buildTopUpData(); + await expect(topUpGateway.connect(stranger).topUp(data)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, topUpRole); + }); + + it("reverts when validator list is empty", async () => { + const data = await buildTopUpData(); + data.validatorIndices = []; + data.keyIndices = []; + data.operatorIds = []; + data.validatorWitness = []; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongArrayLength", + ); + }); + + it("reverts when array lengths mismatch", async () => { + const data = await buildTopUpData(); + data.keyIndices = [1n, 2n]; + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongArrayLength", + ); + }); + + it("reverts when validators count exceeds the limit", async () => { + await topUpGateway.connect(limitsManager).setMaxValidatorsPerTopUp(1n); + const data = await buildTopUpData(); + data.validatorIndices = [1n, 2n]; + data.keyIndices = [1n, 2n]; + data.operatorIds = [1n, 2n]; + const secondPubkey = `0x${"22".repeat(48)}`; + data.validatorWitness = [ + data.validatorWitness[0], + { + ...data.validatorWitness[0], + pubkey: secondPubkey, + }, + ]; + data.pendingBalanceGwei = [0n, 0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "MaxValidatorsPerTopUpExceeded", + ); + }); + it("reverts when validatorIndices contain duplicates", async () => { + const data = await buildTopUpData(); + data.validatorIndices = [1n, 1n]; + data.keyIndices = [1n, 1n]; + data.operatorIds = [1n, 1n]; + const secondPubkey = `0x${"22".repeat(48)}`; + data.validatorWitness = [ + data.validatorWitness[0], + { + ...data.validatorWitness[0], + pubkey: secondPubkey, + }, + ]; + data.pendingBalanceGwei = [0n, 0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "InvalidValidatorIndicesSortOrder", + ); + }); + + it("reverts when beacon data is too old", async () => { + await time.increase(400); + const now = BigInt(await time.latest()); + const data = await buildTopUpData(); + data.beaconRootData.childBlockTimestamp = now - 400n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "RootIsTooOld", + ); + }); + + it("reverts when root precedes last top up", async () => { + const timestamp = BigInt(await time.latest()); + await topUpGateway.harness_setLastTopUpTimestamp(timestamp); + const data = await buildTopUpData(); + data.beaconRootData.childBlockTimestamp = timestamp; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "RootPrecedesLastTopUp", + ); + }); + + it("reverts when withdrawal credentials type is not 0x02", async () => { + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_01); + const data = await buildTopUpData(); + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongWithdrawalCredentials", + ); + }); + + it("reverts when block distance is not met", async () => { + // Set a large min block distance so we can test the revert + await topUpGateway.connect(limitsManager).setMinBlockDistance(100n); + + // First successful top-up sets lastTopUpBlock + const data = await buildTopUpData(); + await topUpGateway.connect(topUpOperator).topUp(data); + + // Immediately try again - should fail since we haven't mined enough blocks + const data2 = await buildTopUpData(); + data2.beaconRootData.slot = data.beaconRootData.slot + 1n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data2)).to.be.revertedWithCustomError( + topUpGateway, + "MinBlockDistanceNotMet", + ); + }); + + it("returns zero top-up limit when balance exceeds target", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI + 1n; + data.pendingBalanceGwei = [0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("reverts when pubkey length is invalid", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].pubkey = "0x1234"; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongPubkeyLength", + ); + }); + + it("calls StakingRouter.topUp and updates last timestamp", async () => { + const data = await buildTopUpData(); + data.pendingBalanceGwei = [0n]; + // topUp = targetBalance - currentTotal + const expectedTopUpGwei = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance; + const expectedTopUpWei = expectedTopUpGwei * 1_000_000_000n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]) + .and.to.emit(topUpGateway, "LastTopUpChanged"); + + const lastTimestamp = await topUpGateway.getLastTopUpTimestamp(); + expect(lastTimestamp).to.be.gt(0n); + expect(await stakingRouter.topUpCalls()).to.equal(1n); + }); + + it("reduces top-up limit by pending deposit amount", async () => { + const data = await buildTopUpData(); + const pendingAmount = 100n * 10n ** 9n; + data.pendingBalanceGwei = [pendingAmount]; // 100 Gwei + + const expectedTopUpGwei = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance - pendingAmount; + // topUpLimits are now in wei + const expectedTopUpWei = expectedTopUpGwei * 1_000_000_000n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]); + }); + + it("returns zero when topUp < minTopUp (balance + pending just below target)", async () => { + const data = await buildTopUpData(); + // Set balance so that topUp = targetBalance - currentTotal < minTopUp + // targetBalance = 2046.75 ETH, minTopUp = 1 ETH → threshold = 2045.75 ETH + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI + 1n; + data.pendingBalanceGwei = [0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when balance + pending exactly equals target", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].effectiveBalance = 2045n * 10n ** 9n; + data.pendingBalanceGwei[0] = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns exactly minTopUp when balance is at threshold", async () => { + const data = await buildTopUpData(); + // Set balance so topUp = exactly minTopUp (= 1 ETH) + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI; + data.pendingBalanceGwei = [0n]; + + const expectedTopUpWei = DEFAULT_MIN_TOP_UP_GWEI * 1_000_000_000n; + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]); + }); + + it("returns zero when validator is slashed", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].slashed = true; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when validator has exitEpoch set", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].exitEpoch = 1000n; // not FAR_FUTURE_EPOCH + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when validator has withdrawableEpoch set", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].withdrawableEpoch = 2000n; // not FAR_FUTURE_EPOCH + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("revert if validator is not active", async () => { + const data = await buildTopUpData(); + const epoch = data.beaconRootData.slot / SLOTS_PER_EPOCH; + // Validator should be activated earlier than current epoch + data.validatorWitness[0].activationEpoch = epoch + 1n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "ValidatorIsNotActivated", + ); + }); + }); + + describe("role management", () => { + it("DEFAULT_ADMIN_ROLE can grant roles", async () => { + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.false; + await topUpGateway.connect(admin).grantRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.true; + }); + + it("DEFAULT_ADMIN_ROLE can revoke roles", async () => { + await topUpGateway.connect(admin).grantRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.true; + await topUpGateway.connect(admin).revokeRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.false; + }); + + it("non-admin cannot grant roles", async () => { + await expect(topUpGateway.connect(stranger).grantRole(topUpRole, stranger.address)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, await topUpGateway.DEFAULT_ADMIN_ROLE()); + }); + }); + + describe("canTopUp", () => { + it("returns false when module is not registered", async () => { + expect(await topUpGateway.canTopUp(999n)).to.equal(false); + }); + + it("returns false when module is inactive", async () => { + await stakingRouter.setModuleActive(MODULE_ID, false); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when block distance is not met", async () => { + await topUpGateway.connect(limitsManager).setMinBlockDistance(DEFAULT_MIN_BLOCK_DISTANCE + 1n); + await topUpGateway.harness_setLastTopUpData(); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when Lido cannot deposit", async () => { + await lido.setCanDeposit(false); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when withdrawal credentials are not 0x02", async () => { + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_01); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns true when all conditions are satisfied", async () => { + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(true); + }); + }); +}); diff --git a/test/0.8.9/accounting.handleOracleReport.test.ts b/test/0.8.9/accounting.handleOracleReport.test.ts index 4615d2293b..426771bc0f 100644 --- a/test/0.8.9/accounting.handleOracleReport.test.ts +++ b/test/0.8.9/accounting.handleOracleReport.test.ts @@ -56,6 +56,14 @@ describe("Accounting.sol:report", () => { new VaultHub__MockForAccountingReport__factory(deployer).deploy(), ]); + await stakingRouter.mock__getStakingRewardsDistribution( + [], // recipients + [], // stakingModuleIds + [], // stakingModuleFees + 0, // totalFee + 100n * 10n ** 18n, // precisionPoints = 100% + ); + locator = await deployLidoLocator( { lido, @@ -69,7 +77,8 @@ describe("Accounting.sol:report", () => { deployer, ); - const accountingImpl = await ethers.deployContract("Accounting", [locator, lido], deployer); + const accountingImpl = await ethers.deployContract("Accounting", [locator, lido]); + const accountingProxy = await ethers.deployContract( "OssifiableProxy", [accountingImpl, deployer, new Uint8Array()], @@ -83,11 +92,12 @@ describe("Accounting.sol:report", () => { }); function report(overrides?: Partial): ReportValuesStruct { + const now = Math.floor(Date.now() / 1000); return { - timestamp: 0n, - timeElapsed: 0n, - clValidators: 0n, - clBalance: 0n, + timestamp: BigInt(now), + timeElapsed: 12n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, @@ -124,28 +134,32 @@ describe("Accounting.sol:report", () => { }); context("handleOracleReport", () => { - it("Update CL validators count if reported more", async () => { - let depositedValidators = 100n; - await lido.mock__setDepositedValidators(depositedValidators); + it("Update CL balances when reported", async () => { + await lido.mock__setDepositedValidators(100n); + + // Setup deposits mock in StakingRouter + await stakingRouter.mock__setDepositAmountFromLastSlot(ether("150")); - // first report, 100 validators await accounting.handleOracleReport( report({ - clValidators: depositedValidators, + clValidatorsBalance: ether("100"), + clPendingBalance: ether("50"), }), ); - expect(await lido.reportClValidators()).to.equal(depositedValidators); + expect(await lido.reportClValidatorsBalance()).to.equal(ether("100")); + expect(await lido.reportClPendingBalance()).to.equal(ether("50")); - depositedValidators = 101n; - await lido.mock__setDepositedValidators(depositedValidators); + await lido.mock__setDepositedValidators(101n); + await stakingRouter.mock__setDepositAmountFromLastSlot(ether("20")); - // second report, 101 validators await accounting.handleOracleReport( report({ - clValidators: depositedValidators, + clValidatorsBalance: ether("110"), + clPendingBalance: ether("60"), }), ); - expect(await lido.reportClValidators()).to.equal(depositedValidators); + expect(await lido.reportClValidatorsBalance()).to.equal(ether("110")); + expect(await lido.reportClPendingBalance()).to.equal(ether("60")); }); it("Reverts if the `checkAccountingOracleReport` sanity check fails", async () => { @@ -181,19 +195,6 @@ describe("Accounting.sol:report", () => { ).to.be.revertedWithCustomError(accounting, "IncorrectReportTimestamp"); }); - it("Reverts if the reported validators count is less than the current count", async () => { - const depositedValidators = 100n; - await expect( - accounting.handleOracleReport( - report({ - clValidators: depositedValidators, - }), - ), - ) - .to.be.revertedWithCustomError(accounting, "IncorrectReportValidators") - .withArgs(100n, 0n, 0n); - }); - it("Does not revert if the `checkWithdrawalQueueOracleReport` sanity check fails but no withdrawal batches were reported", async () => { await oracleReportSanityChecker.mock__checkWithdrawalQueueOracleReportReverts(true); await withdrawalQueue.mock__isPaused(true); @@ -283,7 +284,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, // made 1 wei of profit, triggers reward processing + clValidatorsBalance: 1n, // made 1 wei of profit, triggers reward processing }), ), ).to.be.revertedWithPanic(0x01); // assert @@ -312,7 +313,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, // made 1 wei of profit, triggers reward processing + clValidatorsBalance: 1n, // made 1 wei of profit, triggers reward processing }), ), ).to.be.revertedWithPanic(0x01); // assert @@ -338,7 +339,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, + clValidatorsBalance: 1n, }), ), ).not.to.emit(stakingRouter, "Mock__MintedRewardsReported"); @@ -363,10 +364,10 @@ describe("Accounting.sol:report", () => { precisionPoints, ); - const clBalance = ether("1.0"); + const clValidatorsBalance = ether("1.0"); const expectedSharesToMint = - (clBalance * totalFee * (await lido.getTotalShares())) / - (((await lido.getTotalPooledEther()) + clBalance) * precisionPoints - clBalance * totalFee); + (clValidatorsBalance * totalFee * (await lido.getTotalShares())) / + (((await lido.getTotalPooledEther()) + clValidatorsBalance) * precisionPoints - clValidatorsBalance * totalFee); const expectedModuleRewardInShares = expectedSharesToMint / (totalFee / stakingModule.fee); const expectedTreasuryCutInShares = expectedSharesToMint - expectedModuleRewardInShares; @@ -374,7 +375,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: ether("1.0"), // 1 ether of profit + clValidatorsBalance: ether("1.0"), // 1 ether of profit }), ), ) @@ -406,18 +407,18 @@ describe("Accounting.sol:report", () => { precisionPoints, ); - const clBalance = ether("1.0"); + const clValidatorsBalance = ether("1.0"); const expectedSharesToMint = - (clBalance * totalFee * (await lido.getTotalShares())) / - (((await lido.getTotalPooledEther()) + clBalance) * precisionPoints - clBalance * totalFee); + (clValidatorsBalance * totalFee * (await lido.getTotalShares())) / + (((await lido.getTotalPooledEther()) + clValidatorsBalance) * precisionPoints - clValidatorsBalance * totalFee); const expectedTreasuryCutInShares = expectedSharesToMint; await expect( accounting.handleOracleReport( report({ - clBalance: ether("1.0"), // 1 ether of profit + clValidatorsBalance: ether("1.0"), // 1 ether of profit }), ), ) diff --git a/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol b/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol index fb4dff79c9..4446170ea8 100644 --- a/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol @@ -36,8 +36,8 @@ contract AccountingOracle__MockForSanityChecker { ReportValues( data.refSlot * SECONDS_PER_SLOT, slotsElapsed * SECONDS_PER_SLOT, - data.numValidators, - data.clBalanceGwei * 1e9, + data.clValidatorsBalanceGwei * 1e9, + data.clPendingBalanceGwei * 1e9, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, diff --git a/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol b/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol index 15ae72c3f5..267ec10460 100644 --- a/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol +++ b/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol @@ -13,8 +13,13 @@ contract Accounting__MockForAccountingOracle is IReportReceiver { } HandleOracleReportCallData public lastCall__handleOracleReport; + uint256 public totalDepositsRecorded; function handleOracleReport(ReportValues memory values) external override { lastCall__handleOracleReport = HandleOracleReportCallData(values, ++lastCall__handleOracleReport.callCount); } + + function recordDeposit(uint256 amount) external { + totalDepositsRecorded += amount; + } } diff --git a/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol b/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol new file mode 100644 index 0000000000..62d79fc060 --- /dev/null +++ b/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/** + * @notice This is a mock of EIP-7251's consolidation request pre-deploy contract. + */ +contract EIP7251ConsolidationRequest__Mock { + uint256[100] __gap; // NB: to avoid storage collision with the predeployed withdrawals contract https://github.com/NomicFoundation/edr/issues/865 + bytes public fee; + bool public mock__failOnAddRequest; + bool public mock__failOnGetFee; + + bool public constant MOCK = true; + + event ConsolidationRequestAdded__Mock(bytes request, uint256 fee); + + function mock__setFailOnAddRequest(bool _failOnAddRequest) external { + mock__failOnAddRequest = _failOnAddRequest; + } + + function mock__setFailOnGetFee(bool _failOnGetFee) external { + mock__failOnGetFee = _failOnGetFee; + } + + function mock__setFee(uint256 _fee) external { + require(_fee > 0, "fee must be greater than 0"); + fee = abi.encode(_fee); + } + + function mock__setFeeRaw(bytes calldata _rawFeeBytes) external { + fee = _rawFeeBytes; + } + + // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7251.md#add-consolidation-request + fallback(bytes calldata input) external payable returns (bytes memory) { + // calculate the fee path + if (input.length == 0) { + require(!mock__failOnGetFee, "Inhibitor still active"); + return fee; + } + + // add consolidation request path + require(input.length == 48 * 2, "Invalid callData length"); // 48 bytes source + 48 bytes target + require(!mock__failOnAddRequest, "fail on add request"); + + uint256 feeValue = abi.decode(fee, (uint256)); + if (msg.value < feeValue) { + revert("Insufficient value for fee"); + } + + emit ConsolidationRequestAdded__Mock(input, msg.value); + } +} diff --git a/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol b/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol index 48d7118523..175e225804 100644 --- a/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol @@ -23,6 +23,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -30,6 +31,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } address public immutable lido; @@ -47,6 +49,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -54,6 +57,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; constructor(ContractAddresses memory addresses) { lido = addresses.lido; @@ -71,6 +75,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { oracleDaemonConfig = addresses.oracleDaemonConfig; validatorExitDelayVerifier = addresses.validatorExitDelayVerifier; triggerableWithdrawalsGateway = addresses.triggerableWithdrawalsGateway; + consolidationGateway = addresses.consolidationGateway; accounting = addresses.accounting; wstETH = addresses.wstETH; predepositGuarantee = addresses.predepositGuarantee; @@ -78,6 +83,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { vaultFactory = addresses.vaultFactory; lazyOracle = addresses.lazyOracle; operatorGrid = addresses.operatorGrid; + topUpGateway = addresses.topUpGateway; } function coreComponents() external view returns (address, address, address, address, address, address) { diff --git a/test/0.8.9/contracts/LidoLocator__MockMutable.sol b/test/0.8.9/contracts/LidoLocator__MockMutable.sol index 99c4aefaa4..8417d2b013 100644 --- a/test/0.8.9/contracts/LidoLocator__MockMutable.sol +++ b/test/0.8.9/contracts/LidoLocator__MockMutable.sol @@ -22,6 +22,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -29,6 +30,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } error ZeroAddress(); @@ -48,6 +50,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -55,6 +58,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; constructor(Config memory _config) { accountingOracle = _assertNonZero(_config.accountingOracle); @@ -72,6 +76,7 @@ contract LidoLocator__MockMutable is ILidoLocator { oracleDaemonConfig = _assertNonZero(_config.oracleDaemonConfig); validatorExitDelayVerifier = _assertNonZero(_config.validatorExitDelayVerifier); triggerableWithdrawalsGateway = _assertNonZero(_config.triggerableWithdrawalsGateway); + consolidationGateway = _assertNonZero(_config.consolidationGateway); accounting = _assertNonZero(_config.accounting); wstETH = _assertNonZero(_config.wstETH); predepositGuarantee = _assertNonZero(_config.predepositGuarantee); @@ -79,6 +84,7 @@ contract LidoLocator__MockMutable is ILidoLocator { vaultFactory = _assertNonZero(_config.vaultFactory); lazyOracle = _assertNonZero(_config.lazyOracle); operatorGrid = _assertNonZero(_config.operatorGrid); + topUpGateway = _assertNonZero(_config.topUpGateway); } function coreComponents() external view returns (address, address, address, address, address, address) { diff --git a/test/0.8.9/contracts/Lido__MockForAccounting.sol b/test/0.8.9/contracts/Lido__MockForAccounting.sol index 7e8209971d..5fe4a4ade7 100644 --- a/test/0.8.9/contracts/Lido__MockForAccounting.sol +++ b/test/0.8.9/contracts/Lido__MockForAccounting.sol @@ -7,9 +7,13 @@ contract Lido__MockForAccounting { uint256 public depositedValidatorsValue; uint256 public reportClValidators; uint256 public reportClBalance; + uint256 public reportClValidatorsBalance; + uint256 public reportClPendingBalance; + uint256 public depositedLastReport; + uint256 public depositedCurrentReport; - // Emitted when validators number delivered by the oracle - event CLValidatorsUpdated(uint256 indexed reportTimestamp, uint256 preCLValidators, uint256 postCLValidators); + // Emitted when CL balances are updated by the oracle + event CLBalancesUpdated(uint256 indexed reportTimestamp, uint256 clValidatorsBalance, uint256 clPendingBalance); event Mock__CollectRewardsAndProcessWithdrawals( uint256 _reportTimestamp, uint256 _reportClBalance, @@ -31,14 +35,46 @@ contract Lido__MockForAccounting { depositedValidatorsValue = _amount; } + function mock__setClValidatorsBalance(uint256 _amount) external { + reportClValidatorsBalance = _amount; + } + + function mock__setClPendingBalance(uint256 _amount) external { + reportClPendingBalance = _amount; + } + + function mock__setDepositedLastReportBalance(uint256 _amount) external { + depositedLastReport = _amount; + } + + function mock__setDepositedCurrentReportBalance(uint256 _amount) external { + depositedCurrentReport = _amount; + } + function getBeaconStat() external view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance) { depositedValidators = depositedValidatorsValue; - beaconValidators = reportClValidators; - beaconBalance = 0; + beaconValidators = depositedValidators; + beaconBalance = reportClValidatorsBalance + reportClPendingBalance; + } + + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + clValidatorsBalanceAtLastReport = reportClValidatorsBalance; + clPendingBalanceAtLastReport = reportClPendingBalance; + depositedSinceLastReport = depositedLastReport; + depositedForCurrentReport = depositedCurrentReport; } function getTotalPooledEther() external pure returns (uint256) { @@ -91,24 +127,15 @@ contract Lido__MockForAccounting { uint256 _sharesMintedAsFees ) external {} - /** - * @notice Process CL related state changes as a part of the report processing - * @dev All data validation was done by Accounting and OracleReportSanityChecker - * @param _reportTimestamp timestamp of the report - * @param _preClValidators number of validators in the previous CL state (for event compatibility) - * @param _reportClValidators number of validators in the current CL state - * @param _reportClBalance total balance of the current CL state - */ function processClStateUpdate( uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance + uint256 _clValidatorsBalance, + uint256 _clPendingBalance ) external { - reportClValidators = _reportClValidators; - reportClBalance = _reportClBalance; + reportClValidatorsBalance = _clValidatorsBalance; + reportClPendingBalance = _clPendingBalance; - emit CLValidatorsUpdated(_reportTimestamp, _preClValidators, _reportClValidators); + emit CLBalancesUpdated(_reportTimestamp, _clValidatorsBalance, _clPendingBalance); } function mintShares(address _recipient, uint256 _sharesAmount) external { diff --git a/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol b/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol index f65ceae4fe..76b466ca94 100644 --- a/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol +++ b/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol @@ -18,12 +18,12 @@ contract Lido__MockForDepositSecurityModule { } function deposit( - uint256 maxDepositsCount, + uint256 maxDepositsAmount, uint256 stakingModuleId, bytes calldata depositCalldata ) external returns (uint256 keysCount) { - emit StakingModuleDeposited(maxDepositsCount, uint24(stakingModuleId), depositCalldata); - return maxDepositsCount; + emit StakingModuleDeposited(maxDepositsAmount, uint24(stakingModuleId), depositCalldata); + return maxDepositsAmount; } function canDeposit() external view returns (bool) { diff --git a/test/0.8.9/contracts/Lido__MockForSanityChecker.sol b/test/0.8.9/contracts/Lido__MockForSanityChecker.sol new file mode 100644 index 0000000000..09c0356f61 --- /dev/null +++ b/test/0.8.9/contracts/Lido__MockForSanityChecker.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract Lido__MockForSanityChecker { + uint256 public clValidatorsBalance; + uint256 public clPendingBalance; + uint256 public depositedLastReport; + uint256 public depositedCurrentReport; + uint256 public contractVersion; + + function mock__setBalanceStats( + uint256 _clValidatorsBalance, + uint256 _clPendingBalance, + uint256 _depositedLastReport, + uint256 _depositedCurrentReport + ) external { + clValidatorsBalance = _clValidatorsBalance; + clPendingBalance = _clPendingBalance; + depositedLastReport = _depositedLastReport; + depositedCurrentReport = _depositedCurrentReport; + } + + function mock__setContractVersion(uint256 _version) external { + contractVersion = _version; + } + + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + clValidatorsBalanceAtLastReport = clValidatorsBalance; + clPendingBalanceAtLastReport = clPendingBalance; + depositedSinceLastReport = depositedLastReport; + depositedForCurrentReport = depositedCurrentReport; + } + + function getContractVersion() external view returns (uint256) { + return contractVersion; + } +} diff --git a/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol b/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol new file mode 100644 index 0000000000..28de36e2d4 --- /dev/null +++ b/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity 0.8.9; + +/** + * @notice Mock NodeOperatorsRegistry for testing + * @dev This mock is permissive - it accepts any pubkey for any (nodeOpId, keyIndex) combination + * Tests can optionally configure specific keys using setSigningKey() + */ +contract NodeOperatorsRegistry__Mock { + mapping(uint256 => mapping(uint256 => bytes)) public signingKeys; + + // If true, return any non-empty key even if not explicitly set + bool public permissiveMode = true; + + function setSigningKey(uint256 nodeOperatorId, uint256 keyIndex, bytes memory key) external { + signingKeys[nodeOperatorId][keyIndex] = key; + } + + function setPermissiveMode(bool _permissive) external { + permissiveMode = _permissive; + } + + function getSigningKey( + uint256 nodeOperatorId, + uint256 keyIndex + ) external view returns (bytes memory key, bytes memory depositSignature, bool used) { + key = signingKeys[nodeOperatorId][keyIndex]; + + // In permissive mode, return empty key if not explicitly set + // The ValidatorsExitBus contract will skip validation for empty keys + // This allows tests to work without pre-configuring every possible (nodeOpId, keyIndex) combination + // Tests can still explicitly set keys using setSigningKey() if needed + + depositSignature = new bytes(96); + used = false; + } + + function getNodeOperatorsCount() external pure returns (uint256) { + return 100; + } +} diff --git a/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol b/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol index 1be8722d58..02c4f940cb 100644 --- a/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol +++ b/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol @@ -7,40 +7,57 @@ pragma solidity 0.8.9; import { OracleReportSanityChecker, LimitsList, - LimitsListPacked, - LimitsListPacker + AccountingCoreLimitsPacked, + OperationalLimitsPacked, + LimitsListPacker, + LimitsListUnpacker } from "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol"; contract OracleReportSanityCheckerWrapper is OracleReportSanityChecker { using LimitsListPacker for LimitsList; + using LimitsListUnpacker for AccountingCoreLimitsPacked; - LimitsListPacked private _limitsListPacked; + // Test-only storage for codec roundtrip checks; these are not the parent's private slots. + AccountingCoreLimitsPacked private _accountingCoreLimitsPacked; + OperationalLimitsPacked private _operationalLimitsPacked; constructor( address _lidoLocator, - address _accountingOracle, address _accounting, address _admin, - LimitsList memory _limitsList - ) OracleReportSanityChecker(_lidoLocator, _accountingOracle, _accounting, _admin, _limitsList) {} - - function addReportData(uint256 _timestamp, uint256 _exitedValidatorsCount, uint256 _negativeCLRebase) public { - _addReportData(_timestamp, _exitedValidatorsCount, _negativeCLRebase); + LimitsList memory _limitsList, + bool _postMigrationFirstReportDone + ) OracleReportSanityChecker(_lidoLocator, _accounting, _admin, _limitsList) { + if (_postMigrationFirstReportDone) { + _finalizePostReportState(0, 0); + } } - function sumNegativeRebasesNotOlderThan(uint256 _timestamp) public view returns (uint256) { - return _sumNegativeRebasesNotOlderThan(_timestamp); + function addReportData(uint256 _timestamp, uint256 _clBalance, uint256 _deposits, uint256 _clWithdrawals) public { + _addReportData(_timestamp, _clBalance, _deposits, _clWithdrawals); } - function exitedValidatorsAtTimestamp(uint256 _timestamp) public view returns (uint256) { - return _exitedValidatorsAtTimestamp(_timestamp); + function exposeAccountingCorePackedLimits() public view returns (AccountingCoreLimitsPacked memory) { + return _accountingCoreLimitsPacked; } - function exposePackedLimits() public view returns (LimitsListPacked memory) { - return _limitsListPacked; + function exposeOperationalPackedLimits() public view returns (OperationalLimitsPacked memory) { + return _operationalLimitsPacked; } function packAndStore() public { - _limitsListPacked = getOracleReportLimits().pack(); + LimitsList memory limits = getOracleReportLimits(); + _accountingCoreLimitsPacked = limits.packAccountingCore(); + _operationalLimitsPacked = limits.packOperational(); + } + + function packRawLimits( + LimitsList memory _limitsList + ) external pure returns (AccountingCoreLimitsPacked memory, OperationalLimitsPacked memory) { + return (_limitsList.packAccountingCore(), _limitsList.packOperational()); + } + + function roundtripRawLimits(LimitsList memory _limitsList) external pure returns (LimitsList memory) { + return _limitsList.packAccountingCore().unpack(_limitsList.packOperational()); } } diff --git a/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol b/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol index 4d0235eb3d..673064e783 100644 --- a/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol +++ b/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol @@ -9,7 +9,7 @@ contract OracleReportSanityChecker__MockForAccounting { bool private checkSimulatedShareRateReverts; uint256 private _withdrawals; uint256 private _elRewards; - uint256 private _simulatedSharesToBurn; + uint256 private _sharesFromWQToBurn; uint256 private _sharesToBurn; error CheckAccountingOracleReportReverts(); @@ -18,13 +18,15 @@ contract OracleReportSanityChecker__MockForAccounting { function checkAccountingOracleReport( uint256, //_timeElapsed, - uint256, //_preCLBalance, - uint256, //_postCLBalance, + uint256, //_preCLValidatorsBalance, + uint256, //_preCLPendingBalance, + uint256, //_postCLValidatorsBalance, + uint256, //_postCLPendingBalance, uint256, //_withdrawalVaultBalance, uint256, //_elRewardsVaultBalance, uint256, //_sharesRequestedToBurn, - uint256, //_preCLValidators, - uint256 //_postCLValidators + uint256, //_deposits + uint256 //_withdrawalsVaultTransfer ) external view { if (checkAccountingOracleReportReverts) revert CheckAccountingOracleReportReverts(); } @@ -37,31 +39,27 @@ contract OracleReportSanityChecker__MockForAccounting { } function smoothenTokenRebase( - uint256, // _preTotalPooledEther, - uint256, // _preTotalShares, + uint256, // _preInternalEther, + uint256, // _preInternalShares, uint256, // _preCLBalance, uint256, // _postCLBalance, uint256, // _withdrawalVaultBalance, uint256, // _elRewardsVaultBalance, uint256, // _sharesRequestedToBurn, - uint256, // _etherToLockForWithdrawals, - uint256 // _newSharesToBurnForWithdrawals - ) - external - view - returns (uint256 withdrawals, uint256 elRewards, uint256 simulatedSharesToBurn, uint256 sharesToBurn) - { + uint256, // _etherToFinalizeWithdrawals, + uint256 // _sharesToBurnFromWithdrawalQueue + ) external view returns (uint256 withdrawals, uint256 elRewards, uint256 sharesFromWQToBurn, uint256 sharesToBurn) { withdrawals = _withdrawals; elRewards = _elRewards; - simulatedSharesToBurn = _simulatedSharesToBurn; + sharesFromWQToBurn = _sharesFromWQToBurn; sharesToBurn = _sharesToBurn; } function checkSimulatedShareRate( - uint256, // _postTotalPooledEther, - uint256, // _postTotalShares, - uint256, // _etherLockedOnWithdrawalQueue, - uint256, // _sharesBurntDueToWithdrawals, + uint256, // _postInternalEther, + uint256, // _postInternalShares, + uint256, // _etherToFinalizeWithdrawals, + uint256, // _sharesToBurnFromWithdrawalQueue, uint256 // _simulatedShareRate ) external view { if (checkSimulatedShareRateReverts) revert CheckSimulatedShareRateReverts(); @@ -84,12 +82,12 @@ contract OracleReportSanityChecker__MockForAccounting { function mock__smoothenTokenRebaseReturn( uint256 withdrawals, uint256 elRewards, - uint256 simulatedSharesToBurn, + uint256 sharesFromWQToBurn, uint256 sharesToBurn ) external { _withdrawals = withdrawals; _elRewards = elRewards; - _simulatedSharesToBurn = simulatedSharesToBurn; + _sharesFromWQToBurn = sharesFromWQToBurn; _sharesToBurn = sharesToBurn; } } diff --git a/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol b/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol new file mode 100644 index 0000000000..1613464d15 --- /dev/null +++ b/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/// @notice Minimal mock to control MaxEB weights for ValidatorsExitBus tests +contract OracleReportSanityChecker__MockForExitBusWeights { + uint256 private _w1; + uint256 private _w2; + + constructor(uint256 w1, uint256 w2) { + _w1 = w1; + _w2 = w2; + } + + function setWeights(uint256 w1, uint256 w2) external { + _w1 = w1; + _w2 = w2; + } + + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256) { + return _w1; + } + + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256) { + return _w2; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockBadKeys.sol b/test/0.8.9/contracts/StakingModule__MockBadKeys.sol new file mode 100644 index 0000000000..deefcd0014 --- /dev/null +++ b/test/0.8.9/contracts/StakingModule__MockBadKeys.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/// @notice Minimal mock that returns configurable raw bytes for signing keys +contract StakingModule__MockBadKeys { + bytes private _returned; + + function setReturned(bytes calldata data) external { + _returned = data; + } + + function getSigningKeys( + uint256 /* nodeOpId */, + uint256 /* startIndex */, + uint256 /* keysCount */ + ) external view returns (bytes memory) { + return _returned; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol b/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol new file mode 100644 index 0000000000..da9c390eaa --- /dev/null +++ b/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/** + * @notice Universal mock for staking modules that returns any requested signing key + * @dev This mock implements both legacy (NOR, SDVT) and new (CSM, CuratedV2) interfaces + * and can be used for key verification testing in ValidatorsExitBus + */ +contract StakingModule__MockForKeyVerification { + // Storage: nodeOpId => keyIndex => pubkey (48 bytes) + mapping(uint256 => mapping(uint256 => bytes)) private _keys; + + /// @notice Configure a signing key for testing + /// @param nodeOpId Node operator ID + /// @param keyIndex Key index + /// @param pubkey Public key (48 bytes) + function setSigningKey(uint256 nodeOpId, uint256 keyIndex, bytes calldata pubkey) external { + require(pubkey.length == 48, "Invalid pubkey length"); + _keys[nodeOpId][keyIndex] = pubkey; + } + + /// @notice Legacy interface (NOR, SDVT): getSigningKeys returns pubkeys, signatures, and used flags + /// @param _nodeOperatorId Node operator ID + /// @param _offset Key index to start from + /// @param _limit Number of keys to return + /// @return pubkeys Concatenated public keys (48 bytes each) + /// @return signatures Empty (not needed for exit verification) + /// @return used Empty (not needed for exit verification) + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys, bytes memory signatures, bool[] memory used) { + require(_limit == 1, "Mock only supports _limit=1"); + + bytes memory key = _keys[_nodeOperatorId][_offset]; + if (key.length == 0) { + // Permissive mode: generate a deterministic 48-byte key + // This allows tests to work without explicitly configuring every key + bytes32 hash1 = keccak256(abi.encode(_nodeOperatorId, _offset)); + bytes32 hash2 = keccak256(abi.encode(_nodeOperatorId, _offset, 1)); + key = new bytes(48); + assembly { + // Copy first 32 bytes from hash1 + mstore(add(key, 32), hash1) + // Copy next 16 bytes from hash2 (total 48 bytes) + mstore(add(key, 64), hash2) + } + } + + pubkeys = key; + signatures = new bytes(0); + used = new bool[](1); + } +} diff --git a/test/0.8.9/contracts/StakingRouter__Harness.sol b/test/0.8.9/contracts/StakingRouter__Harness.sol deleted file mode 100644 index 054a39b452..0000000000 --- a/test/0.8.9/contracts/StakingRouter__Harness.sol +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -// for testing purposes only - -pragma solidity 0.8.9; - -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; -import {UnstructuredStorage} from "contracts/0.8.9/lib/UnstructuredStorage.sol"; - -contract StakingRouter__Harness is StakingRouter { - using UnstructuredStorage for bytes32; - - constructor(address _depositContract) StakingRouter(_depositContract) {} - - function getStakingModuleIndexById(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleIndexById(_stakingModuleId); - } - - function getStakingModuleByIndex(uint256 _stakingModuleIndex) external view returns (StakingModule memory) { - return _getStakingModuleByIndex(_stakingModuleIndex); - } - - function testing_setBaseVersion(uint256 version) external { - CONTRACT_VERSION_POSITION.setStorageUint256(version); - } - - function testing_setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) external { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - _setStakingModuleStatus(stakingModule, _status); - } -} diff --git a/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol b/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol index 83111db9a3..7089be6785 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol @@ -6,6 +6,8 @@ pragma solidity 0.8.9; import {IStakingRouter} from "contracts/0.8.9/oracle/AccountingOracle.sol"; contract StakingRouter__MockForAccountingOracle is IStakingRouter { + error InvalidValidatorBalancesReport(); + struct UpdateExitedKeysByModuleCallData { uint256[] moduleIds; uint256[] exitedKeysCounts; @@ -19,6 +21,13 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { } mapping(uint256 => uint256) internal _exitedKeysCountsByModuleId; + mapping(uint256 => uint256) internal _moduleBalancesWei; + mapping(uint256 => uint64) internal _validatorBalancesGweiByModuleId; + mapping(uint256 => uint64) internal _pendingBalancesGweiByModuleId; + mapping(uint256 => bool) internal _moduleExistsById; + uint256[] internal _registeredModuleIds; + + uint256 internal _totalStakingModulesBalanceWei; UpdateExitedKeysByModuleCallData internal _lastCall_updateExitedKeysByModule; @@ -38,6 +47,15 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { /// IStakingRouter /// + function mock__registerStakingModule(uint256 moduleId) external { + if (_moduleExistsById[moduleId]) { + return; + } + + _moduleExistsById[moduleId] = true; + _registeredModuleIds.push(moduleId); + } + function updateExitedValidatorsCountByStakingModule( uint256[] calldata moduleIds, uint256[] calldata exitedKeysCounts @@ -52,11 +70,60 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { uint256 moduleId = moduleIds[i]; newlyExitedValidatorsCount += exitedKeysCounts[i] - _exitedKeysCountsByModuleId[moduleId]; _exitedKeysCountsByModuleId[moduleId] = exitedKeysCounts[i]; + _moduleExistsById[moduleId] = true; } return newlyExitedValidatorsCount; } + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external { + this.validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + + uint256 totalBalance = _totalStakingModulesBalanceWei; + for (uint256 i = 0; i < _stakingModuleIds.length; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + uint256 previousBalance = _moduleBalancesWei[moduleId]; + uint256 currentBalance = (_validatorBalancesGwei[i]) * 1 gwei; + + if (currentBalance >= previousBalance) { + totalBalance += currentBalance - previousBalance; + } else { + totalBalance -= previousBalance - currentBalance; + } + + _moduleBalancesWei[moduleId] = currentBalance; + _validatorBalancesGweiByModuleId[moduleId] = uint64(_validatorBalancesGwei[i]); + _moduleExistsById[moduleId] = true; + } + _totalStakingModulesBalanceWei = totalBalance; + } + + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view { + uint256 modulesCount = _registeredModuleIds.length; + if (_stakingModuleIds.length != modulesCount || _validatorBalancesGwei.length != modulesCount) { + revert InvalidValidatorBalancesReport(); + } + + for (uint256 i = 0; i < modulesCount; ++i) { + if (_stakingModuleIds[i] != _registeredModuleIds[i]) { + revert InvalidValidatorBalancesReport(); + } + if (_validatorBalancesGwei[i] > type(uint64).max) { + revert InvalidValidatorBalancesReport(); + } + } + } + + function getDepositAmountFromLastSlot(uint256) external view returns (uint256) { + return 0; + } + function reportStakingModuleExitedValidatorsCountByNodeOperator( uint256 stakingModuleId, bytes calldata nodeOperatorIds, @@ -70,4 +137,22 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { function onValidatorsCountsByNodeOperatorReportingFinished() external { ++totalCalls_onValidatorsCountsByNodeOperatorReportingFinished; } + + function getModuleValidatorsBalance(uint256 moduleId) external view returns (uint256) { + return _moduleBalancesWei[moduleId]; + } + + function hasStakingModule(uint256 moduleId) external view returns (bool) { + return _moduleExistsById[moduleId]; + } + + function getStakingModuleStateAccounting( + uint256 moduleId + ) external view returns (uint64 validatorsBalanceGwei, uint64 exitedValidatorsCount) { + return (_validatorBalancesGweiByModuleId[moduleId], uint64(_exitedKeysCountsByModuleId[moduleId])); + } + + function getTotalModulesValidatorsBalance() external view returns (uint256) { + return _totalStakingModulesBalanceWei; + } } diff --git a/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol b/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol index d489dd29e3..d812dfad9c 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol @@ -1,10 +1,25 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; -import {IStakingRouter} from "contracts/0.8.9/DepositSecurityModule.sol"; -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; +import {StakingModuleStatus} from "contracts/0.8.25/sr/SRTypes.sol"; + +interface IStakingRouter { + function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool); + function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256); + function hasStakingModule(uint256 _stakingModuleId) external view returns (bool); + function decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external; + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external; + function canDeposit(uint256 _stakingModuleId) external view returns (bool); +} contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { error StakingModuleUnregistered(); @@ -14,17 +29,14 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { bytes nodeOperatorIds, bytes vettedSigningKeysCounts ); - event StakingModuleDeposited(uint256 maxDepositsCount, uint24 stakingModuleId, bytes depositCalldata); - event StakingModuleStatusSet( - uint24 indexed stakingModuleId, - StakingRouter.StakingModuleStatus status, - address setBy - ); + event StakingModuleDeposited(uint24 stakingModuleId, bytes depositCalldata); + event StakingModuleStatusSet(uint24 indexed stakingModuleId, StakingModuleStatus status, address setBy); - StakingRouter.StakingModuleStatus private status; + StakingModuleStatus private status; uint256 private stakingModuleNonce; uint256 private stakingModuleLastDepositBlock; uint256 private stakingModuleMaxDepositsPerBlock; + uint256 private stakingModuleMaxDepositsAmountPerBlock; uint256 private stakingModuleMinDepositBlockDistance; uint256 private registeredStakingModuleId; @@ -32,13 +44,15 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { registeredStakingModuleId = stakingModuleId; } + function receiveDepositableEther() external payable { + // Mock function to receive ETH from Lido.withdrawDepositableEther + } + function deposit( - uint256 maxDepositsCount, uint256 stakingModuleId, bytes calldata depositCalldata - ) external payable whenModuleIsRegistered(stakingModuleId) returns (uint256 keysCount) { - emit StakingModuleDeposited(maxDepositsCount, uint24(stakingModuleId), depositCalldata); - return maxDepositsCount; + ) external whenModuleIsRegistered(stakingModuleId) { + emit StakingModuleDeposited(uint24(stakingModuleId), depositCalldata); } function decreaseStakingModuleVettedKeysCountByNodeOperator( @@ -55,13 +69,13 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { function getStakingModuleStatus( uint256 stakingModuleId - ) external view whenModuleIsRegistered(stakingModuleId) returns (StakingRouter.StakingModuleStatus) { + ) external view whenModuleIsRegistered(stakingModuleId) returns (StakingModuleStatus) { return status; } function setStakingModuleStatus( uint256 _stakingModuleId, - StakingRouter.StakingModuleStatus _status + StakingModuleStatus _status ) external whenModuleIsRegistered(_stakingModuleId) { emit StakingModuleStatusSet(uint24(_stakingModuleId), _status, msg.sender); status = _status; @@ -70,19 +84,23 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { function getStakingModuleIsStopped( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.Stopped; + return status == StakingModuleStatus.Stopped; } function getStakingModuleIsDepositsPaused( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.DepositsPaused; + return status == StakingModuleStatus.DepositsPaused; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) && status == StakingModuleStatus.Active; } function getStakingModuleIsActive( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.Active; + return status == StakingModuleStatus.Active; } function getStakingModuleNonce( @@ -111,6 +129,12 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { return stakingModuleMaxDepositsPerBlock; } + function getStakingModuleMaxDepositsAmountPerBlock( + uint256 stakingModuleId + ) external view whenModuleIsRegistered(stakingModuleId) returns (uint256) { + return stakingModuleMaxDepositsAmountPerBlock; + } + function setStakingModuleMaxDepositsPerBlock(uint256 value) external { stakingModuleMaxDepositsPerBlock = value; } diff --git a/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol b/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol index e998d50755..9983c1c931 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol @@ -1,19 +1,20 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; +import {StakingModule} from "contracts/0.8.25/sr/SRTypes.sol"; contract StakingRouter__MockForSanityChecker { - mapping(uint256 => StakingRouter.StakingModule) private modules; + mapping(uint256 => StakingModule) private modules; + mapping(uint256 => bool) private moduleExistsById; uint256[] private moduleIds; constructor() {} function mock__addStakingModuleExitedValidators(uint24 moduleId, uint256 exitedValidators) external { - StakingRouter.StakingModule memory module = StakingRouter.StakingModule( + StakingModule memory module = StakingModule( moduleId, address(0), 0, @@ -26,9 +27,12 @@ contract StakingRouter__MockForSanityChecker { exitedValidators, 0, 0, + 0, + 1, // wcType 0 ); modules[moduleId] = module; + moduleExistsById[moduleId] = true; moduleIds.push(moduleId); } @@ -42,13 +46,25 @@ contract StakingRouter__MockForSanityChecker { break; } } + delete modules[moduleId]; + delete moduleExistsById[moduleId]; } function getStakingModuleIds() external view returns (uint256[] memory) { return moduleIds; } - function getStakingModule(uint256 stakingModuleId) public view returns (StakingRouter.StakingModule memory module) { + function getStakingModule(uint256 stakingModuleId) public view returns (StakingModule memory module) { return modules[stakingModuleId]; } + + function hasStakingModule(uint256 stakingModuleId) external view returns (bool) { + return moduleExistsById[stakingModuleId]; + } + + function getStakingModuleStateAccounting( + uint256 stakingModuleId + ) external view returns (uint64 validatorsBalanceGwei, uint64 pendingBalanceGwei, uint64 exitedValidatorsCount) { + return (0, 0, uint64(modules[stakingModuleId].exitedValidatorsCount)); + } } diff --git a/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol b/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol new file mode 100644 index 0000000000..91190e9258 --- /dev/null +++ b/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +import {IStakingRouter} from "contracts/0.8.9/oracle/ValidatorsExitBus.sol"; + +contract StakingRouter__MockForValidatorsExitBus is IStakingRouter { + error StakingModuleUnregistered(); + + struct StakingModuleData { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + mapping(uint256 => StakingModuleData) internal _modules; + mapping(uint256 => bool) internal _moduleConfigured; + + /// @notice Mock function to set up module configuration for tests + /// @param moduleId The module ID + /// @param withdrawalCredentialsType The withdrawal credentials type (0x01 or 0x02) + function setStakingModuleWithdrawalCredentialsType(uint256 moduleId, uint8 withdrawalCredentialsType) external { + _modules[moduleId].id = uint24(moduleId); + _modules[moduleId].withdrawalCredentialsType = withdrawalCredentialsType; + _moduleConfigured[moduleId] = true; + // Set a placeholder address - tests can override with setStakingModuleAddress if needed + if (_modules[moduleId].stakingModuleAddress == address(0)) { + _modules[moduleId].stakingModuleAddress = address(uint160(moduleId + 0x1000)); + } + } + + /// @notice Mock function to set staking module address + /// @param moduleId The module ID + /// @param moduleAddress The module address + function setStakingModuleAddress(uint256 moduleId, address moduleAddress) external { + _modules[moduleId].stakingModuleAddress = moduleAddress; + _moduleConfigured[moduleId] = true; + } + + function getStakingModuleStateConfig( + uint256 _stakingModuleId + ) external view returns (ModuleStateConfig memory stateConfig) { + _validateModuleId(_stakingModuleId); + StakingModuleData memory data = _modules[_stakingModuleId]; + return + ModuleStateConfig({ + moduleAddress: data.stakingModuleAddress, + moduleFee: data.stakingModuleFee, + treasuryFee: data.treasuryFee, + stakeShareLimit: data.stakeShareLimit, + priorityExitShareThreshold: data.priorityExitShareThreshold, + status: data.status, + withdrawalCredentialsType: data.withdrawalCredentialsType + }); + } + + function _validateModuleId(uint256 _moduleId) internal view { + /// @dev require module configured and non-zero id + if (_moduleId == 0 || !_moduleConfigured[_moduleId]) { + revert StakingModuleUnregistered(); + } + } + + // Stub implementations for other IStakingRouter methods (not used in ValidatorsExitBus) + function updateExitedValidatorsCountByStakingModule( + uint256[] calldata, + uint256[] calldata + ) external pure returns (uint256) { + revert("Not implemented"); + } + + function getDepositAmountFromLastSlot(uint256) external pure returns (uint256) { + revert("Not implemented"); + } + + function reportStakingModuleExitedValidatorsCountByNodeOperator( + uint256, + bytes calldata, + bytes calldata + ) external pure { + revert("Not implemented"); + } + + function onValidatorsCountsByNodeOperatorReportingFinished() external pure { + revert("Not implemented"); + } +} diff --git a/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol b/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol index c7382f9ad8..cd12f64932 100644 --- a/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol +++ b/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol @@ -58,4 +58,28 @@ contract ValidatorsExitBus__Harness is ValidatorsExitBusOracle, ITimeProvider { function getRequestStatus(bytes32 exitRequestHash) external view returns (RequestStatus memory requestStatus) { requestStatus = _storageRequestStatus()[exitRequestHash]; } + + // Expose internal function for unit testing + function calculateTotalExitBalanceEth(bytes calldata data, uint256 dataFormat) external view returns (uint256) { + return _calculateTotalExitBalanceEth(data, dataFormat); + } + + /// @notice Expose base timestamp calculation (without consensus override) for coverage + function callBaseTimestamp() external view returns (uint32) { + return super._getTimestamp(); + } + + /// @notice Expose internal validator data decoder for coverage of unsupported formats + function callGetValidatorData( + bytes calldata data, + uint256 dataFormat, + uint256 index + ) external pure returns (ValidatorData memory) { + return _getValidatorData(data, dataFormat, index); + } + + /// @notice Expose internal dispatcher for direct branch coverage + function callProcessExitRequestsList(bytes calldata data, uint256 dataFormat) external { + _processExitRequestsList(data, dataFormat); + } } diff --git a/test/0.8.9/contracts/VaultHub__MockForAccReport.sol b/test/0.8.9/contracts/VaultHub__MockForAccReport.sol new file mode 100644 index 0000000000..e4eb7262ee --- /dev/null +++ b/test/0.8.9/contracts/VaultHub__MockForAccReport.sol @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.9; + +import {IVaultHub} from "contracts/common/interfaces/IVaultHub.sol"; + +contract VaultHub__MockForAccountingReport is IVaultHub { + uint256 private badDebtToInternalize_; + + function mock__badDebtToInternalize() external view returns (uint256) { + return badDebtToInternalize_; + } + + function setBadDebtToInternalize(uint256 _badDebt) external { + badDebtToInternalize_ = _badDebt; + } + + function decreaseInternalizedBadDebt(uint256 _badDebt) external { + badDebtToInternalize_ -= _badDebt; + } + + function badDebtToInternalize() external view override returns (uint256) { + return badDebtToInternalize_; + } + + function badDebtToInternalizeForLastRefSlot() external view override returns (uint256) { + return badDebtToInternalize_; + } +} diff --git a/test/0.8.9/contracts/WithdrawalVault__Harness.sol b/test/0.8.9/contracts/WithdrawalVault__Harness.sol index 8bbefb2f82..59dfb326c2 100644 --- a/test/0.8.9/contracts/WithdrawalVault__Harness.sol +++ b/test/0.8.9/contracts/WithdrawalVault__Harness.sol @@ -9,8 +9,20 @@ contract WithdrawalVault__Harness is WithdrawalVault { constructor( address _lido, address _treasury, - address _triggerableWithdrawalsGateway - ) WithdrawalVault(_lido, _treasury, _triggerableWithdrawalsGateway) {} + address _triggerableWithdrawalsGateway, + address _consolidationGateway, + address _withdrawalRequest, + address _consolidationRequest + ) + WithdrawalVault( + _lido, + _treasury, + _triggerableWithdrawalsGateway, + _consolidationGateway, + _withdrawalRequest, + _consolidationRequest + ) + {} function harness__initializeContractVersionTo(uint256 _version) external { _initializeContractVersionTo(_version); diff --git a/test/0.8.9/depositSecurityModule.test.ts b/test/0.8.9/depositSecurityModule.test.ts index 28dec79a02..749fd9defb 100644 --- a/test/0.8.9/depositSecurityModule.test.ts +++ b/test/0.8.9/depositSecurityModule.test.ts @@ -30,6 +30,7 @@ import { Snapshot } from "test/suite"; const UNREGISTERED_STAKING_MODULE_ID = 1; const STAKING_MODULE_ID = 100; const MAX_DEPOSITS_PER_BLOCK = 100; +// const MAX_DEPOSITS_AMOUNT_PER_BLOCK_WEI = BigInt(MAX_DEPOSITS_PER_BLOCK) * parseEther("32"); const MIN_DEPOSIT_BLOCK_DISTANCE = 14; const PAUSE_INTENT_VALIDITY_PERIOD_BLOCKS = 10; const MAX_OPERATORS_PER_UNVETTING = 20; @@ -169,8 +170,11 @@ describe("DepositSecurityModule.sol", () => { expect(minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); await stakingRouter.setStakingModuleMaxDepositsPerBlock(MAX_DEPOSITS_PER_BLOCK); + // await stakingRouter.setStakingModuleMaxDepositsAmountPerBlock(MAX_DEPOSITS_AMOUNT_PER_BLOCK_WEI); const maxDepositsPerBlock = await stakingRouter.getStakingModuleMaxDepositsPerBlock(STAKING_MODULE_ID); expect(maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + // const maxDepositsAmountPerBlock = await stakingRouter.getStakingModuleMaxDepositsAmountPerBlock(STAKING_MODULE_ID); + // expect(maxDepositsAmountPerBlock).to.equal(MAX_DEPOSITS); await depositContract.set_deposit_root(DEPOSIT_ROOT); expect(await depositContract.get_deposit_root()).to.equal(DEPOSIT_ROOT); @@ -1173,9 +1177,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); }); @@ -1240,9 +1242,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian2, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (0,1)", async () => { @@ -1254,9 +1254,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian2], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (0,2)", async () => { @@ -1268,9 +1266,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (1,2)", async () => { @@ -1282,9 +1278,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian2, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); }); }); diff --git a/test/0.8.9/lidoLocator.test.ts b/test/0.8.9/lidoLocator.test.ts index 00a375baf9..928cf84839 100644 --- a/test/0.8.9/lidoLocator.test.ts +++ b/test/0.8.9/lidoLocator.test.ts @@ -21,6 +21,7 @@ const services = [ "oracleDaemonConfig", "validatorExitDelayVerifier", "triggerableWithdrawalsGateway", + "consolidationGateway", "accounting", "predepositGuarantee", "wstETH", @@ -28,8 +29,7 @@ const services = [ "vaultFactory", "lazyOracle", "operatorGrid", - "vaultFactory", - "lazyOracle", + "topUpGateway", ] as const; type ArrayToUnion = A[number]; diff --git a/test/0.8.9/oracle/accountingOracle.accessControl.test.ts b/test/0.8.9/oracle/accountingOracle.accessControl.test.ts index 897cfd9d6e..550ddb0de2 100644 --- a/test/0.8.9/oracle/accountingOracle.accessControl.test.ts +++ b/test/0.8.9/oracle/accountingOracle.accessControl.test.ts @@ -66,10 +66,12 @@ describe("AccountingOracle.sol:accessControl", () => { reportFields = { consensusVersion: AO_CONSENSUS_VERSION, refSlot: refSlot, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), diff --git a/test/0.8.9/oracle/accountingOracle.happyPath.test.ts b/test/0.8.9/oracle/accountingOracle.happyPath.test.ts index c13ac0028c..2d5725b2c4 100644 --- a/test/0.8.9/oracle/accountingOracle.happyPath.test.ts +++ b/test/0.8.9/oracle/accountingOracle.happyPath.test.ts @@ -132,10 +132,12 @@ describe("AccountingOracle.sol:happyPath", () => { reportFields = { consensusVersion: AO_CONSENSUS_VERSION, refSlot: refSlot, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -222,14 +224,12 @@ describe("AccountingOracle.sol:happyPath", () => { it("Accounting got the oracle report", async () => { const lastOracleReportCall = await mockAccounting.lastCall__handleOracleReport(); expect(lastOracleReportCall.callCount).to.equal(1); - expect(lastOracleReportCall.arg.timeElapsed).to.equal( - (reportFields.refSlot - ORACLE_LAST_REPORT_SLOT) * SECONDS_PER_SLOT, - ); - expect(lastOracleReportCall.arg.clValidators).to.equal(reportFields.numValidators); - expect(lastOracleReportCall.arg.clBalance).to.equal(BigInt(reportFields.clBalanceGwei) * ONE_GWEI); - expect(lastOracleReportCall.arg.withdrawalVaultBalance).to.equal(reportFields.withdrawalVaultBalance); - expect(lastOracleReportCall.arg.elRewardsVaultBalance).to.equal(reportFields.elRewardsVaultBalance); - expect(lastOracleReportCall.arg.withdrawalFinalizationBatches.map(Number)).to.have.ordered.members( + expect(lastOracleReportCall.arg[1]).to.equal((reportFields.refSlot - ORACLE_LAST_REPORT_SLOT) * SECONDS_PER_SLOT); + expect(lastOracleReportCall.arg[2]).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastOracleReportCall.arg[3]).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + expect(lastOracleReportCall.arg[4]).to.equal(reportFields.withdrawalVaultBalance); + expect(lastOracleReportCall.arg[5]).to.equal(reportFields.elRewardsVaultBalance); + expect(lastOracleReportCall.arg[7].map(Number)).to.have.ordered.members( reportFields.withdrawalFinalizationBatches.map(Number), ); }); diff --git a/test/0.8.9/oracle/accountingOracle.submitReport.test.ts b/test/0.8.9/oracle/accountingOracle.submitReport.test.ts index 0fdf42e515..f3881b8737 100644 --- a/test/0.8.9/oracle/accountingOracle.submitReport.test.ts +++ b/test/0.8.9/oracle/accountingOracle.submitReport.test.ts @@ -10,6 +10,7 @@ import { Accounting__MockForAccountingOracle, AccountingOracle__Harness, HashConsensus__Harness, + Lido__MockForAccounting, OracleReportSanityChecker, StakingRouter__MockForAccountingOracle, WithdrawalQueue__MockForAccountingOracle, @@ -47,6 +48,7 @@ describe("AccountingOracle.sol:submitReport", () => { let extraDataItems: string[]; let oracleVersion: bigint; let deadline: BigNumberish; + let mockLido: Lido__MockForAccounting; let mockStakingRouter: StakingRouter__MockForAccountingOracle; let extraData: ExtraDataType; let mockAccounting: Accounting__MockForAccountingOracle; @@ -61,10 +63,12 @@ describe("AccountingOracle.sol:submitReport", () => { const getReportFields = (override = {}) => ({ consensusVersion: AO_CONSENSUS_VERSION, refSlot: 0n, - numValidators: 10n, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -105,6 +109,7 @@ describe("AccountingOracle.sol:submitReport", () => { oracle = deployed.oracle; consensus = deployed.consensus; + mockLido = deployed.lido; mockStakingRouter = deployed.stakingRouter; mockAccounting = deployed.accounting; sanityChecker = deployed.oracleReportSanityChecker; @@ -351,7 +356,7 @@ describe("AccountingOracle.sol:submitReport", () => { it("reverts with UnexpectedDataHash", async () => { const incorrectReportFields = { ...reportFields, - numValidators: Number(reportFields.numValidators) - 1, + clValidatorsBalanceGwei: getBigInt(reportFields.clValidatorsBalanceGwei) - ONE_GWEI, }; const incorrectReportItems = getReportDataItems(incorrectReportFields); @@ -424,24 +429,32 @@ describe("AccountingOracle.sol:submitReport", () => { ).to.be.revertedWithCustomError(oracle, "InvalidExitedValidatorsData"); }); - it("reverts with ExitedValidatorsLimitExceeded if exited validators rate limit will be reached", async () => { - // Really simple test here for now - // TODO: Come up with more tests for better coverage of edge-case scenarios that can be accrued - // during calculation `exitedValidatorsPerDay` rate in AccountingOracle:612 - const totalExitedValidators = reportFields.numExitedValidatorsByStakingModule.reduce( - (sum: BigNumberish, curr: BigNumberish) => getBigInt(sum) + getBigInt(curr), - 0, + it("reverts with ExitedEthAmountPerDayLimitExceeded if exited ETH amount per day limit is reached", async () => { + const totalExitedValidators: bigint = reportFields.numExitedValidatorsByStakingModule.reduce( + (sum, curr) => sum + getBigInt(curr), + 0n, ); - const exitingRateLimit = getBigInt(totalExitedValidators) - 1n; + const exitingRateLimit = 0n; await sanityChecker.grantRole( - await sanityChecker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await sanityChecker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), admin.address, ); - await sanityChecker.setExitedValidatorsPerDayLimit(exitingRateLimit); - expect((await sanityChecker.getOracleReportLimits()).exitedValidatorsPerDayLimit).to.equal(exitingRateLimit); + await sanityChecker.setExitedEthAmountPerDayLimit(exitingRateLimit); + + const limits = await sanityChecker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(exitingRateLimit); + + const refSlotDelta = reportFields.refSlot - (await oracle.getLastProcessingRefSlot()); + const timeElapsed = refSlotDelta * SECONDS_PER_SLOT; + const exitedEthAmount = totalExitedValidators * limits.exitedValidatorEthAmountLimit * 10n ** 18n; + const exitedEthAmountPerDay = + timeElapsed === 0n ? exitedEthAmount * 86_400n : (exitedEthAmount * 86_400n) / timeElapsed; + const exitedEthAmountPerDayLimitWithConsolidation = + (limits.exitedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * 10n ** 18n; + await expect(oracle.connect(member1).submitReportData(reportFields, oracleVersion)) - .to.be.revertedWithCustomError(sanityChecker, "ExitedValidatorsLimitExceeded") - .withArgs(exitingRateLimit, totalExitedValidators); + .to.be.revertedWithCustomError(sanityChecker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(exitedEthAmountPerDayLimitWithConsolidation, exitedEthAmountPerDay); }); }); @@ -463,7 +476,12 @@ describe("AccountingOracle.sol:submitReport", () => { GENESIS_TIME + reportFields.refSlot * SECONDS_PER_SLOT, ); - expect(lastOracleReportToAccounting.arg.clBalance).to.equal(reportFields.clBalanceGwei + "000000000"); + expect(lastOracleReportToAccounting.arg.clValidatorsBalance).to.equal( + reportFields.clValidatorsBalanceGwei + "000000000", + ); + expect(lastOracleReportToAccounting.arg.clPendingBalance).to.equal( + reportFields.clPendingBalanceGwei + "000000000", + ); expect(lastOracleReportToAccounting.arg.withdrawalVaultBalance).to.equal(reportFields.withdrawalVaultBalance); expect(lastOracleReportToAccounting.arg.elRewardsVaultBalance).to.equal(reportFields.elRewardsVaultBalance); expect(lastOracleReportToAccounting.arg.withdrawalFinalizationBatches.map(Number)).to.have.ordered.members( @@ -640,5 +658,212 @@ describe("AccountingOracle.sol:submitReport", () => { expect(data.dataHash).to.equal(reportFields.extraDataHash); }); }); + + context("Balance-based accounting", () => { + it("should revert with InvalidClBalancesData if a staking module id does not exist", async () => { + const { newReportFields } = await prepareNextReportInNextFrame( + getReportFields({ + stakingModuleIdsWithUpdatedBalance: [999], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], + }), + ); + + await expect( + oracle.connect(member1).submitReportData(newReportFields, oracleVersion), + ).to.be.revertedWithCustomError(mockStakingRouter, "InvalidValidatorBalancesReport"); + }); + + it("should accept different balance values", async () => { + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(reportFields, oracleVersion)).not.to.be.reverted; + }); + + it("should process balance data correctly", async () => { + expect((await mockAccounting.lastCall__handleOracleReport()).callCount).to.equal(0); + + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + expect(lastCall.callCount).to.equal(1); + expect(lastCall.arg.clValidatorsBalance).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastCall.arg.clPendingBalance).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + }); + + it("should accept zero active balance", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + // Router mock stores validators balance only; pending is seeded on the Lido mock. + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(64n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 64n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [0n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept zero pending balance", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + // Seed the previous router balances to the target values; this case checks zero pending itself, not one-frame growth. + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [1000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(1000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(0n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 1000n * ONE_GWEI, + clPendingBalanceGwei: 0n, + validatorBalancesGweiByStakingModule: [1000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept large balance values", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [60000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(60000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(5000n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 60000n * ONE_GWEI, + clPendingBalanceGwei: 5000n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [60000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should handle pending larger than active", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(500n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 100n * ONE_GWEI, + clPendingBalanceGwei: 500n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [100n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should convert gwei to wei correctly", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(456n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 123n * ONE_GWEI, + clPendingBalanceGwei: 456n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [123n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + expect(lastCall.arg.clValidatorsBalance).to.equal(123n * ONE_GWEI * 1000000000n); + expect(lastCall.arg.clPendingBalance).to.equal(456n * ONE_GWEI * 1000000000n); + }); + + it("should accept both balances zero", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 0n, + validatorBalancesGweiByStakingModule: [0n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept minimal gwei values", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 1n, + clPendingBalanceGwei: 1n, + validatorBalancesGweiByStakingModule: [1n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should handle realistic scenarios", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [30000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(30000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(1000n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 30000n * ONE_GWEI, + clPendingBalanceGwei: 1000n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [30000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should verify ReportValues structure", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + + expect(lastCall.arg).to.be.an("array"); + expect(lastCall.arg).to.have.length(9); + expect(lastCall.arg[0]).to.be.a("bigint"); + expect(lastCall.arg[1]).to.be.a("bigint"); + expect(lastCall.arg[2]).to.be.a("bigint"); + expect(lastCall.arg[3]).to.be.a("bigint"); + expect(lastCall.arg[2]).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastCall.arg[3]).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + }); + }); }); }); diff --git a/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts b/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts index bb1276ec89..329794f6ab 100644 --- a/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts +++ b/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts @@ -50,10 +50,12 @@ const getDefaultExtraData = (): ExtraDataType => ({ const getDefaultReportFields = (override = {}) => ({ consensusVersion: AO_CONSENSUS_VERSION, refSlot: 0, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -836,62 +838,65 @@ describe("AccountingOracle.sol:submitReportExtraData", () => { }); }); - context("checks data type for UnsupportedExtraDataType reverts (only supported types are `1` and `2`)", () => { - // contextual helper to prepeare wrong typed data - const getExtraWithCustomType = (typeCustom: bigint) => { - const extraData = { - exitedKeys: [{ moduleId: 1, nodeOpIds: [1], keysCounts: [2] }], - }; - const item = extraData.exitedKeys[0]; - const extraDataItems = []; - extraDataItems.push(encodeExtraDataItem(0, typeCustom, item.moduleId, item.nodeOpIds, item.keysCounts)); - return { - extraData, - extraDataItems, - wrongTypedIndex: 0, - typeCustom, + context( + "checks data type for UnsupportedExtraDataType reverts (only supported type is `2` and `1` is deprecated)", + () => { + // contextual helper to prepeare wrong typed data + const getExtraWithCustomType = (typeCustom: bigint) => { + const extraData = { + exitedKeys: [{ moduleId: 1, nodeOpIds: [1], keysCounts: [2] }], + }; + const item = extraData.exitedKeys[0]; + const extraDataItems = []; + extraDataItems.push(encodeExtraDataItem(0, typeCustom, item.moduleId, item.nodeOpIds, item.keysCounts)); + return { + extraData, + extraDataItems, + wrongTypedIndex: 0, + typeCustom, + }; }; - }; - it("if type `0` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(0n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `0` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(0n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("if type `3` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(3n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `4` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(4n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("if type `1` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(1n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "DeprecatedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `1` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(1n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "DeprecatedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("succeeds if `2` was passed", async () => { - const { extraDataItems } = getExtraWithCustomType(2n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - const tx = await oracle.connect(member1).submitReportExtraDataList(extraDataList); - await expect(tx).to.emit(oracle, "ExtraDataSubmitted").withArgs(reportFields.refSlot, anyValue, anyValue); - }); - }); + it("succeeds if `2` was passed", async () => { + const { extraDataItems } = getExtraWithCustomType(2n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + const tx = await oracle.connect(member1).submitReportExtraDataList(extraDataList); + await expect(tx).to.emit(oracle, "ExtraDataSubmitted").withArgs(reportFields.refSlot, anyValue, anyValue); + }); + }, + ); context("should check node operators processing limits with OracleReportSanityChecker", () => { it("by reverting TooManyNodeOpsPerExtraDataItem if there was too much node operators", async () => { diff --git a/test/0.8.9/oracle/accountingOracle.upgrade.test.ts b/test/0.8.9/oracle/accountingOracle.upgrade.test.ts index 2d0dd92ec2..85d67f7830 100644 --- a/test/0.8.9/oracle/accountingOracle.upgrade.test.ts +++ b/test/0.8.9/oracle/accountingOracle.upgrade.test.ts @@ -8,7 +8,7 @@ import { AccountingOracle__Harness } from "typechain-types"; import { deployAndConfigureAccountingOracle } from "test/deploy"; describe("AccountingOracle.sol:upgrade", () => { - context("finalizeUpgrade_v3", () => { + context("finalizeUpgrade_v5", () => { let admin: HardhatEthersSigner; let oracle: AccountingOracle__Harness; const NEW_CONSENSUS_VERSION = 42n; // Just a test value @@ -17,19 +17,19 @@ describe("AccountingOracle.sol:upgrade", () => { [admin] = await ethers.getSigners(); const deployed = await deployAndConfigureAccountingOracle(admin.address); oracle = deployed.oracle; - await oracle.setContractVersion(3); // Set initial contract version to 3 + await oracle.setContractVersion(4); // Set initial contract version to 4 }); - // TODO: test version increment because finalizeUpgrade_v4 should be called on a v2 contract + // TODO: test version increment because finalizeUpgrade_v5 should be called on a v4 contract it("successfully updates contract and consensus versions", async () => { // Get initial versions const initialContractVersion = await oracle.getContractVersion(); const initialConsensusVersion = await oracle.getConsensusVersion(); - await oracle.connect(admin).finalizeUpgrade_v4(NEW_CONSENSUS_VERSION); + await oracle.connect(admin).finalizeUpgrade_v5(NEW_CONSENSUS_VERSION); const newContractVersion = await oracle.getContractVersion(); - expect(newContractVersion).to.equal(4); + expect(newContractVersion).to.equal(5); expect(newContractVersion).to.not.equal(initialContractVersion); // Verify consensus version updated to the provided value diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts index 80dead68bb..778e9d6259 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts @@ -4,11 +4,15 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, seedMockModuleSigningKeys } from "test/deploy"; import { Snapshot } from "test/suite"; const PUBKEYS = [ @@ -22,6 +26,14 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let originalState: string; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let initTx: ContractTransactionResponse; let oracleVersion: bigint; @@ -39,6 +51,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -58,10 +71,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -72,6 +91,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + mockModules = deployed.mockModules; initTx = await initVEBO({ admin: admin.address, oracle, consensus, resumeAfterDeploy: true }); @@ -83,15 +103,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; + await seedMockModuleSigningKeys(mockModules, exitRequests); reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, requestsCount: exitRequests.length, data: encodeExitRequestsDataList(exitRequests), }; @@ -118,14 +139,14 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, // consensusVersion: CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, @@ -136,6 +157,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { await consensus.connect(member1).submitReport(refSlot, reportHash, VEBO_CONSENSUS_VERSION); await consensus.connect(member3).submitReport(refSlot, reportHash, VEBO_CONSENSUS_VERSION); + await seedMockModuleSigningKeys(deployed.mockModules, exitRequests); await deploy(); }); @@ -164,7 +186,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { }); it("should revert without admin address", async () => { await expect( - oracle.initialize(ZeroAddress, await consensus.getAddress(), VEBO_CONSENSUS_VERSION, 0, 600, 13000, 1, 48), + oracle.initialize( + ZeroAddress, + await consensus.getAddress(), + VEBO_CONSENSUS_VERSION, + 0, + 600, + 13_000n, // 13,000 ETH + 32n, // 32 ETH + 48, + ), ).to.be.revertedWithCustomError(oracle, "AdminCannotBeZero"); }); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts new file mode 100644 index 0000000000..1704cf8e98 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts @@ -0,0 +1,325 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + OracleReportSanityChecker, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { de0x, numberToHex } from "lib"; + +import { DATA_FORMAT_LIST, DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO } from "test/deploy"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +// Module IDs used in tests +const LEGACY_MODULE_ID = 1; // Module with 0x01 withdrawal credentials (32 ETH) +const MAXEB_MODULE_ID_1 = 3; // Module with 0x02 withdrawal credentials (2048 ETH) +const MAXEB_MODULE_ID_2 = 5; // Another module with 0x02 withdrawal credentials +const MAXEB_MODULE_ID_3 = 7; // Another module with 0x02 withdrawal credentials + +// Balance constants from WithdrawalCredentials.sol +const LEGACY_MODULE_MAX_BALANCE_ETH = 32n; // 32 ETH +const MAXEB_MODULE_MAX_BALANCE_ETH = 2048n; // 2048 ETH + +describe("ValidatorsExitBusOracle.sol:balanceCalculation", () => { + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + let sanityChecker: OracleReportSanityChecker; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + keyIndex?: number; // Optional for format 2 + } + + const encodeExitRequestHexV1 = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestHexV2 = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex || 0, 8) + + pubkeyHex + ); + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[], dataFormat: number) => { + const encoder = dataFormat === DATA_FORMAT_LIST ? encodeExitRequestHexV1 : encodeExitRequestHexV2; + return "0x" + requests.map(encoder).join(""); + }; + + before(async () => { + const signers = await ethers.getSigners(); + admin = signers[0]; + + const deployed = await deployVEBO(await admin.getAddress()); + oracle = deployed.oracle; + sanityChecker = deployed.oracleReportSanityChecker; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + }); + + describe("_calculateTotalExitBalanceEth", () => { + describe("Format 1 (DATA_FORMAT_LIST)", () => { + it("should calculate balance for single legacy validator (32 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for single MaxEB validator (2048 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for multiple legacy validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for multiple MaxEB validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: MAXEB_MODULE_ID_3, nodeOpId: 3, valIndex: 30, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for mixed module types", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, valPubkey: PUBKEYS[2] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 3, valIndex: 30, valPubkey: PUBKEYS[3] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + const expected = + LEGACY_MODULE_MAX_BALANCE_ETH * 2n + // 2 legacy validators + MAXEB_MODULE_MAX_BALANCE_ETH * 2n; // 2 MaxEB validators + expect(totalBalance).to.equal(expected); + }); + + it("should return zero for empty data", async () => { + const data = "0x"; + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(0n); + }); + }); + + describe("Format 2 (DATA_FORMAT_LIST_WITH_KEY_INDEX)", () => { + it("should calculate balance for single legacy validator (32 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 5, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for single MaxEB validator (2048 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, keyIndex: 7, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for multiple legacy validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for multiple MaxEB validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, keyIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: MAXEB_MODULE_ID_3, nodeOpId: 3, valIndex: 30, keyIndex: 30, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for mixed module types", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 3, valPubkey: PUBKEYS[2] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 3, valIndex: 30, keyIndex: 4, valPubkey: PUBKEYS[3] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const expected = + LEGACY_MODULE_MAX_BALANCE_ETH * 2n + // 2 legacy validators + MAXEB_MODULE_MAX_BALANCE_ETH * 2n; // 2 MaxEB validators + expect(totalBalance).to.equal(expected); + }); + + it("should return zero for empty data", async () => { + const data = "0x"; + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(0n); + }); + + it("should ignore keyIndex when calculating balance", async () => { + // Same module, different keyIndexes should result in same total balance + const requests1: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + ]; + const requests2: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 100, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 200, valPubkey: PUBKEYS[1] }, + ]; + + const data1 = encodeExitRequestsDataList(requests1, DATA_FORMAT_LIST_WITH_KEY_INDEX); + const data2 = encodeExitRequestsDataList(requests2, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance1 = await oracle.calculateTotalExitBalanceEth(data1, DATA_FORMAT_LIST_WITH_KEY_INDEX); + const totalBalance2 = await oracle.calculateTotalExitBalanceEth(data2, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance1).to.equal(totalBalance2); + expect(totalBalance1).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 2n); + }); + }); + + describe("Edge cases", () => { + it("should handle large number of validators", async () => { + const requests: ExitRequest[] = []; + for (let i = 0; i < 100; i++) { + requests.push({ + moduleId: LEGACY_MODULE_ID, + nodeOpId: 1, + valIndex: i, + valPubkey: PUBKEYS[i % PUBKEYS.length], + }); + } + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 100n); + }); + + it("should handle module with 0x02 withdrawal credentials (MaxEB)", async () => { + const { oracle: newOracle, stakingRouter: localRouter } = await deployVEBO(await admin.getAddress()); + + // Configure module 999 as MaxEB (0x02) + await localRouter.setStakingModuleWithdrawalCredentialsType(999, 0x02); + + const requests: ExitRequest[] = [{ moduleId: 999, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await newOracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should handle module with 0x01 withdrawal credentials (Legacy)", async () => { + const { oracle: newOracle, stakingRouter: localRouter } = await deployVEBO(await admin.getAddress()); + + // Configure module 888 as Legacy (0x01) + await localRouter.setStakingModuleWithdrawalCredentialsType(888, 0x01); + + const requests: ExitRequest[] = [{ moduleId: 888, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await newOracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("uses MaxEB weights from sanity checker and applies governance updates", async () => { + await sanityChecker.grantRole(await sanityChecker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), admin.address); + await sanityChecker.setMaxEffectiveBalanceWeightWCType01(40n); + await sanityChecker.setMaxEffectiveBalanceWeightWCType02(4_096n); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + expect(await oracle.MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01()).to.equal(40n); + expect(await oracle.MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02()).to.equal(4_096n); + expect(await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST)).to.equal(4_136n); + }); + + it("reverts for unconfigured modules", async () => { + // Module 777 is not configured in the router + const requests: ExitRequest[] = [{ moduleId: 777, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + await expect(oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModuleUnregistered", + ); + }); + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts new file mode 100644 index 0000000000..f771af0f76 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts @@ -0,0 +1,240 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + HashConsensus__Harness, + OracleReportSanityChecker, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; + +import { DATA_FORMAT_LIST, DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +// Constants from WithdrawalCredentials.sol +const LEGACY_MODULE_ID = 1; // Module with 0x01 withdrawal credentials (32 ETH) +const LEGACY_MODULE_MAX_BALANCE_ETH = 32n; // 32 ETH +const MAXEB_MODULE_MAX_BALANCE_ETH = 2048n; // 2048 ETH + +describe("ValidatorsExitBusOracle.sol:balanceIntegration", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let oracleReportSanityChecker: OracleReportSanityChecker; + let admin: HardhatEthersSigner; + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; + + let oracleVersion: bigint; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + keyIndex?: number; // Optional for format 2 + } + + const encodeExitRequestHexV1 = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestHexV2 = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex || 0, 8) + + pubkeyHex + ); + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[], dataFormat: number) => { + const encoder = dataFormat === DATA_FORMAT_LIST ? encodeExitRequestHexV1 : encodeExitRequestHexV2; + return "0x" + requests.map(encoder).join(""); + }; + + const calcValidatorsExitBusReportDataHash = (items: { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + }) => { + const reportData = [items.consensusVersion, items.refSlot, items.requestsCount, items.dataFormat, items.data]; + const reportDataHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [reportData]), + ); + return reportDataHash; + }; + + const triggerConsensusOnHash = async (hash: string) => { + const { refSlot } = await consensus.getCurrentFrame(); + await consensus.connect(member1).submitReport(refSlot, hash, VEBO_CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, hash, VEBO_CONSENSUS_VERSION); + expect((await consensus.getConsensusState()).consensusReport).to.equal(hash); + }; + + const prepareReportAndSubmitHash = async ( + requests: ExitRequest[], + dataFormat: number = DATA_FORMAT_LIST, + options = { reportFields: {} }, + ) => { + const { refSlot } = await consensus.getCurrentFrame(); + const reportData = { + consensusVersion: VEBO_CONSENSUS_VERSION, + dataFormat, + refSlot, + requestsCount: requests.length, + data: encodeExitRequestsDataList(requests, dataFormat), + ...options.reportFields, + }; + + const reportHash = calcValidatorsExitBusReportDataHash(reportData); + + await triggerConsensusOnHash(reportHash); + + return { reportData, reportHash }; + }; + + before(async () => { + const signers = await ethers.getSigners(); + admin = signers[0]; + member1 = signers[1]; + member2 = signers[2]; + member3 = signers[3]; + + const deployed = await deployVEBO(await admin.getAddress()); + consensus = deployed.consensus; + oracle = deployed.oracle; + oracleReportSanityChecker = deployed.oracleReportSanityChecker; + mockModules = deployed.mockModules; + + // Configure signing keys for Format 2 testing (key verification) + // Set up keys for all combinations used in tests + for (let i = 0; i < PUBKEYS.length; i++) { + // Module 1 (legacy): keys for nodeOpId 1, keyIndex 1-5 + await mockModules.module1.setSigningKey(1, i + 1, PUBKEYS[i]); + + // Module 3 (MaxEB): keys for nodeOpId 1, keyIndex 10-14 + await mockModules.module3.setSigningKey(1, 10 + i, PUBKEYS[i]); + + // Module 3 (MaxEB): keys for nodeOpId 2, keyIndex 20 + // Multiple PUBKEYS can map to the same keyIndex for different tests + await mockModules.module3.setSigningKey(2, 20, PUBKEYS[0]); // Used in mixed validator test + + // Module 5 (MaxEB): keys for nodeOpId 2, keyIndex 20 + await mockModules.module5.setSigningKey(2, 20, PUBKEYS[1]); // Used in MaxEB validator test + } + + // Additional keys for "same balance" comparison test + await mockModules.module1.setSigningKey(1, 100, PUBKEYS[0]); // Format 1 vs Format 2 test + await mockModules.module3.setSigningKey(2, 200, PUBKEYS[1]); // Format 1 vs Format 2 test + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + }); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + + oracleVersion = await oracle.getContractVersion(); + }); + + describe("Balance calculation integration with sanity checker", () => { + let originalState: string; + + before(async () => { + // Grant the role to admin for setting limits + await oracleReportSanityChecker + .connect(admin) + .grantRole(await oracleReportSanityChecker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), admin.address); + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + it("should pass sanity check for legacy validators (Format 2)", async () => { + // Set limit to allow 10 legacy validators (320 ETH) + const limit = LEGACY_MODULE_MAX_BALANCE_ETH * 10n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + + it("should pass sanity check for MaxEB validators (Format 2)", async () => { + // Set limit to allow 2 MaxEB validators (4096 ETH) + const limit = MAXEB_MODULE_MAX_BALANCE_ETH * 2n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: 3, nodeOpId: 1, valIndex: 10, keyIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[1] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + + it("should pass sanity check for mixed validators (Format 2)", async () => { + // Set limit to allow: 5 legacy (160 ETH) + 1 MaxEB (2048 ETH) = 2208 ETH + const limit = LEGACY_MODULE_MAX_BALANCE_ETH * 5n + MAXEB_MODULE_MAX_BALANCE_ETH * 1n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 13, keyIndex: 4, valPubkey: PUBKEYS[3] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 14, keyIndex: 5, valPubkey: PUBKEYS[4] }, + { moduleId: 3, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[0] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts new file mode 100644 index 0000000000..fbefeca202 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts @@ -0,0 +1,159 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + HashConsensus__Harness, + OracleReportSanityChecker__MockForExitBusWeights, + StakingModule__MockBadKeys, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { numberToHex } from "lib"; + +import { + DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + makeMockPubkey, + updateLidoLocatorImplementation, +} from "test/deploy"; + +const PUBKEY_AA = "0x" + "aa".repeat(48); +const PUBKEY_BB = "0x" + "bb".repeat(48); + +const encodeV1 = (moduleId: number, nodeOpId: number, valIndex: number, pubkey: string) => + ("0x" + + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + pubkey.slice(2)) as `0x${string}`; + +const encodeV2 = (moduleId: number, nodeOpId: number, valIndex: number, keyIndex: number, pubkey: string) => + ("0x" + + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkey.slice(2)) as `0x${string}`; + +describe("ValidatorsExitBusOracle.sol:edge coverage", () => { + let oracle: ValidatorsExitBus__Harness; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; + let consensus: HashConsensus__Harness; + let admin: HardhatEthersSigner; + let locatorAddr: string; + + beforeEach(async () => { + [admin] = await ethers.getSigners(); + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle as ValidatorsExitBus__Harness; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + consensus = deployed.consensus as HashConsensus__Harness; + locatorAddr = deployed.locatorAddr; + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + lastProcessingRefSlot: 0, + }); + }); + + it("unpackExitRequest happy path + bounds", async () => { + const request = encodeV1(1, 2, 3, PUBKEY_AA); + + const [pubkey, nodeOpId, moduleId, valIndex] = await oracle.unpackExitRequest(request, DATA_FORMAT_LIST, 0); + expect(pubkey).to.equal(PUBKEY_AA); + expect(nodeOpId).to.equal(2n); + expect(moduleId).to.equal(1n); + expect(valIndex).to.equal(3n); + + await expect(oracle.unpackExitRequest(request, DATA_FORMAT_LIST, 1)).to.be.revertedWithCustomError( + oracle, + "ExitDataIndexOutOfRange", + ); + }); + + it("base _getTimestamp is reachable", async () => { + const ts = await oracle.callBaseTimestamp(); + expect(ts).to.be.greaterThan(0); + }); + + it("unsupported formats revert in decoder, dispatcher, and balance calc", async () => { + const request = encodeV1(1, 1, 1, PUBKEY_AA); + + await expect(oracle.callGetValidatorData(request, 3, 0)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + await expect(oracle.callProcessExitRequestsList(request, 3)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + await expect(oracle.calculateTotalExitBalanceEth(request, 3)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + }); + + it("processExitRequestsList supports format 2 and reverts on unsorted data", async () => { + const req1 = encodeV2(1, 1, 2, 0, makeMockPubkey(1, 0)); // valIndex 2 + const req2 = encodeV2(1, 1, 1, 1, makeMockPubkey(1, 1)); // valIndex 1 (unordered) + const data = (req1 + req2.slice(2)) as `0x${string}`; + + await expect( + oracle.callProcessExitRequestsList(data, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidRequestsDataSortOrder"); + }); + + it("calculateTotalExitBalanceEth reverts on unexpected WC type", async () => { + await stakingRouter.setStakingModuleWithdrawalCredentialsType(30, 0x03); // unsupported + const req = encodeV1(30, 1, 1, PUBKEY_AA); + + await expect(oracle.calculateTotalExitBalanceEth(req, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + oracle, + "UnexpectedWCType", + ); + }); + + it("InvalidMaxEBWeight when sanity checker returns zero", async () => { + const mockSanity = (await ethers.deployContract("OracleReportSanityChecker__MockForExitBusWeights", [ + 0n, + 32n, + ])) as OracleReportSanityChecker__MockForExitBusWeights; + + await updateLidoLocatorImplementation(locatorAddr, { + oracleReportSanityChecker: await mockSanity.getAddress(), + }); + + const req = encodeV1(1, 1, 1, PUBKEY_AA); + await expect(oracle.calculateTotalExitBalanceEth(req, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + oracle, + "InvalidMaxEBWeight", + ); + }); + + it("verifyKey detects invalid lengths and mismatched pubkeys", async () => { + const badModule = (await ethers.deployContract("StakingModule__MockBadKeys")) as StakingModule__MockBadKeys; + await stakingRouter.setStakingModuleWithdrawalCredentialsType(40, 0x01); + await stakingRouter.setStakingModuleAddress(40, await badModule.getAddress()); + + // invalid length (empty) + await badModule.setReturned("0x"); + const req = encodeV2(40, 1, 1, 0, PUBKEY_AA); + await expect( + oracle.callProcessExitRequestsList(req, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidRetrievedKeyLength"); + + // mismatched pubkey (returns PUBKEY_BB but request has PUBKEY_AA) + await badModule.setReturned(PUBKEY_BB); + await expect( + oracle.callProcessExitRequestsList(req, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidPublicKey"); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts index 87dd6c83e7..779a90e149 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts @@ -7,18 +7,27 @@ import { LidoLocator, ValidatorsExitBus__Harness } from "typechain-types"; import { EPOCHS_PER_FRAME, INITIAL_FAST_LANE_LENGTH_SLOTS, SLOTS_PER_EPOCH, VEBO_CONSENSUS_VERSION } from "lib"; -import { deployLidoLocator } from "test/deploy"; +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; import { Snapshot } from "test/suite"; -describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { +describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v3", () => { let originalState: string; let locator: LidoLocator; let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; + const NEW_CONSENSUS_VERSION = 42n; before(async () => { locator = await deployLidoLocator(); [admin] = await ethers.getSigners(); + + const stakingRouter = await ethers.deployContract("StakingRouter__MockForValidatorsExitBus"); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(1, 0x01); + + await updateLidoLocatorImplementation(await locator.getAddress(), { + stakingRouter: await stakingRouter.getAddress(), + }); + oracle = await ethers.deployContract("ValidatorsExitBus__Harness", [12n, 100n, await locator.getAddress()]); const consensus = await ethers.deployContract("HashConsensus__Harness", [ @@ -31,7 +40,16 @@ describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { await oracle.getAddress(), ]); - await oracle.initialize(admin, await consensus.getAddress(), VEBO_CONSENSUS_VERSION, 0, 10, 100, 1, 48); + await oracle.initialize( + admin, + await consensus.getAddress(), + VEBO_CONSENSUS_VERSION, + 0, + 10, + 100n, // 100 ETH + 32n, // 32 ETH + 48, + ); }); beforeEach(async () => (originalState = await Snapshot.take())); @@ -40,28 +58,42 @@ describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { // contract version it("should revert if set wrong version", async () => { - await expect(oracle.finalizeUpgrade_v2(10, 100, 1, 48)).to.be.revertedWithCustomError( + await expect(oracle.finalizeUpgrade_v3(10, 100n, 32n, 48, NEW_CONSENSUS_VERSION)).to.be.revertedWithCustomError( oracle, "InvalidContractVersionIncrement", ); }); it("should successfully finalize upgrade", async () => { - await oracle.setContractVersion(1); - - await oracle.finalizeUpgrade_v2(15, 150, 1, 48); + // Simulate pre-upgrade state (contract at version 2) + await oracle.setContractVersion(2); + + // Set balance limits in ETH (not Gwei, not validator counts) + const maxExitBalanceEth = 150n; // 150 ETH + const balancePerFrameEth = 32n; // 32 ETH (1 legacy validator) + const maxValidatorsPerReport = 15; + const frameDuration = 48; + + await oracle.finalizeUpgrade_v3( + maxValidatorsPerReport, + maxExitBalanceEth, + balancePerFrameEth, + frameDuration, + NEW_CONSENSUS_VERSION, + ); - expect(await oracle.getContractVersion()).to.equal(2); + expect(await oracle.getContractVersion()).to.equal(3); + expect(await oracle.getConsensusVersion()).to.equal(NEW_CONSENSUS_VERSION); const exitRequestLimitData = await oracle.getExitRequestLimitFullInfo(); - expect(exitRequestLimitData.maxExitRequestsLimit).to.equal(150); - expect(exitRequestLimitData.exitsPerFrame).to.equal(1); - expect(exitRequestLimitData.frameDurationInSec).to.equal(48); + expect(exitRequestLimitData.maxExitBalanceEth).to.equal(maxExitBalanceEth); + expect(exitRequestLimitData.balancePerFrameEth).to.equal(balancePerFrameEth); + expect(exitRequestLimitData.frameDurationInSec).to.equal(frameDuration); - expect(await oracle.getMaxValidatorsPerReport()).to.equal(15); + expect(await oracle.getMaxValidatorsPerReport()).to.equal(maxValidatorsPerReport); - // should not allow to run finalizeUpgrade_v2 again - await expect(oracle.finalizeUpgrade_v2(10, 100, 1, 48)).to.be.revertedWithCustomError( + // should not allow finalizeUpgrade_v3 to run again + await expect(oracle.finalizeUpgrade_v3(10, 100, 1, 48, NEW_CONSENSUS_VERSION + 1n)).to.be.revertedWithCustomError( oracle, "InvalidContractVersionIncrement", ); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts index 830bc1692e..21d0d379e3 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts @@ -4,16 +4,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; import { computeTimestampAtSlot, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, SECONDS_PER_FRAME, + seedMockModuleSigningKeys, SLOTS_PER_FRAME, } from "test/deploy"; import { Snapshot } from "test/suite"; @@ -29,7 +35,16 @@ const PUBKEYS = [ describe("ValidatorsExitBusOracle.sol:gas", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; let admin: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -41,11 +56,13 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const NODE_OPS_PER_MODULE = 100; let nextValIndex = 1; + let nextKeyIndex = 1; interface ExitRequest { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -65,10 +82,16 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -92,8 +115,9 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const moduleId = Math.floor(i / requestsPerModule); const nodeOpId = Math.floor((i - moduleId * requestsPerModule) / requestsPerNodeOp); const valIndex = nextValIndex++; + const keyIndex = nextKeyIndex++; const valPubkey = PUBKEYS[valIndex % PUBKEYS.length]; - requests.push({ moduleId: moduleId + 1, nodeOpId, valIndex, valPubkey }); + requests.push({ moduleId: moduleId + 1, nodeOpId, valIndex, keyIndex, valPubkey }); } return { requests, requestsPerModule, requestsPerNodeOp }; @@ -108,6 +132,13 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + mockModules = deployed.mockModules; + + // Use legacy withdrawal credentials (32 ETH per validator) for all modules exercised in this suite + for (let moduleId = 1; moduleId <= 5; moduleId++) { + await stakingRouter.setStakingModuleWithdrawalCredentialsType(moduleId, 0x01); + } await initVEBO({ admin: admin.address, @@ -133,7 +164,7 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { ); }); - for (const totalRequests of [10, 50, 100, 1000, 2000]) { + for (const totalRequests of [10, 50, 100, 600]) { context(`Total requests: ${totalRequests}`, () => { let exitRequests: { requests: ExitRequest[]; requestsPerModule: number; requestsPerNodeOp: number }; let reportFields: ReportFields; @@ -164,9 +195,10 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.requests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests.requests), }; + await seedMockModuleSigningKeys(mockModules, exitRequests.requests); reportHash = calcValidatorsExitBusReportDataHash(reportFields); @@ -225,7 +257,7 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const procState = await oracle.getProcessingState(); expect(procState.dataHash).to.equal(reportHash); expect(procState.dataSubmitted).to.equal(true); - expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); expect(procState.requestsCount).to.equal(exitRequests.requests.length); expect(procState.requestsSubmitted).to.equal(exitRequests.requests.length); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts index ebc282a4a2..0b4c4b38b6 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts @@ -4,16 +4,21 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; import { computeTimestampAtSlot, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, SECONDS_PER_FRAME, + seedMockModuleSigningKeys, SLOTS_PER_FRAME, } from "test/deploy"; @@ -29,6 +34,14 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; let exitRequests: ExitRequest[]; @@ -46,6 +59,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -65,10 +79,16 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -81,6 +101,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -132,18 +153,19 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; + await seedMockModuleSigningKeys(mockModules, exitRequests); reportHash = calcValidatorsExitBusReportDataHash(reportFields); @@ -229,7 +251,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { expect(procState.dataHash).to.equal(reportHash); expect(procState.processingDeadlineTime).to.equal(computeTimestampAtSlot(frame.reportProcessingDeadlineSlot)); expect(procState.dataSubmitted).to.equal(true); - expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); expect(procState.requestsCount).to.equal(exitRequests.length); expect(procState.requestsSubmitted).to.equal(exitRequests.length); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts index 7955a51c41..6504d2f600 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts @@ -52,9 +52,33 @@ const hashExitRequest = (request: { dataFormat: number; data: string }) => { ); }; +// Helper to extract timestamp from ValidatorExitRequest event +// More memory-efficient than keeping full receipt in scope +const getTimestampFromTx = async ( + tx: Awaited>, + oracleInterface: ValidatorsExitBus__Harness["interface"], +): Promise => { + const receipt = await tx.wait(); + if (!receipt) { + throw new Error("Transaction receipt is null"); + } + for (const log of receipt.logs) { + try { + const parsed = oracleInterface.parseLog({ topics: [...log.topics], data: log.data }); + if (parsed?.name === "ValidatorExitRequest") { + return parsed.args[4]; // Return timestamp immediately + } + } catch { + // Skip logs from other contracts + } + } + throw new Error("ValidatorExitRequest event not found"); +}; + describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; let exitRequests = [ @@ -172,7 +196,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { it("Should revert if wrong DATA_FORMAT", async () => { const exitRequestWrongDataFormat: ExitRequestData = { - dataFormat: 2, + dataFormat: 3, data: encodeExitRequestsDataList(exitRequests), }; const hash = hashExitRequest(exitRequestWrongDataFormat); @@ -182,7 +206,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await expect(oracle.submitExitRequestsData(exitRequestWrongDataFormat)) .to.be.revertedWithCustomError(oracle, "UnsupportedRequestsDataFormat") - .withArgs(2); + .withArgs(3); }); it("Should revert if contains duplicates", async () => { @@ -298,20 +322,21 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.grantRole(submitRole, authorizedEntity); }); - // ----------------------------------------------------------------------------- - // Shared test data - // ----------------------------------------------------------------------------- - const MAX_EXIT_REQUESTS_LIMIT = 5; - const EXITS_PER_FRAME = 1; + // Limit configuration (in ETH, as used by the contract) + // The limit allows up to 5 validators worth of balance: + // 2 legacy (64 ETH) + 3 MaxEB slots (6144 ETH) = 6208 ETH total + const MAX_EXIT_BALANCE_ETH = 6_208n; // Total balance of all 5 validators in ETH + const BALANCE_PER_FRAME_ETH = 2_048n; // 1 MaxEB validator per frame (2048 ETH) const FRAME_DURATION = 48; // Data for case when limit is not enough to process entire request + // Total: 2×32 ETH (module 1) + 2×2048 ETH (module 2) + 1×2048 ETH (module 3) = 6208 ETH const VALIDATORS: ExitRequest[] = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, - { moduleId: 3, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[4] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, // 32 ETH + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, // 32 ETH + { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, // 2048 ETH + { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, // 2048 ETH + { moduleId: 3, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[4] }, // 2048 ETH ]; const REQUEST = { @@ -325,28 +350,29 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { const reportLimitRole = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await expect( - oracle.connect(stranger).setExitRequestLimit(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION), + oracle.connect(stranger).setExitRequestLimit(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION), ).to.be.revertedWithOZAccessControlError(await stranger.getAddress(), reportLimitRole); }); it("Should not allow to set exits per frame bigger than max limit", async () => { await expect( - oracle.connect(authorizedEntity).setExitRequestLimit(10, 12, FRAME_DURATION), - ).to.be.revertedWithCustomError(oracle, "TooLargeExitsPerFrame"); + oracle.connect(authorizedEntity).setExitRequestLimit(10n * 2048n, 12n * 2048n, FRAME_DURATION), + ).to.be.revertedWithCustomError(oracle, "TooLargeItemsPerFrame"); }); it("Should deliver request as it is below limit", async () => { const exitLimitTx = await oracle .connect(authorizedEntity) - .setExitRequestLimit(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION); + .setExitRequestLimit(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION); await expect(exitLimitTx) - .to.emit(oracle, "ExitRequestsLimitSet") - .withArgs(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION); + .to.emit(oracle, "ExitBalanceLimitSet") + .withArgs(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, // 32 ETH + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, // 32 ETH ]; + // Total: 64 ETH = 64,000,000,000 Gwei (well below limit) exitRequest = { dataFormat: DATA_FORMAT_LIST, @@ -369,36 +395,51 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("Should not allow to deliver if limit doesnt cover full request", async () => { + // Previous test consumed 64 ETH (2 legacy validators × 32 ETH) + // The limit starts at MAX_EXIT_BALANCE_ETH (6208 ETH), not BALANCE_PER_FRAME_ETH + // Remaining: 6208 - 64 = 6144 ETH + const consumedEth = 64n; // 2 legacy validators × 32 ETH + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + const requestTotalEth = 6208n; // 2×32 + 3×2048 ETH + await oracle.connect(authorizedEntity).submitExitRequestsHash(HASH_REQUEST); await expect(oracle.submitExitRequestsData(REQUEST)) .to.be.revertedWithCustomError(oracle, "ExitRequestsLimitExceeded") - .withArgs(5, 3); + .withArgs(requestTotalEth, remainingEth); }); - it("Current limit should be equal to 0", async () => { + it("Current limit should reflect consumed balance", async () => { const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); - expect(data.exitsPerFrame).to.equal(EXITS_PER_FRAME); + const consumedEth = 64n; // 64 ETH from previous test + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + + expect(data.maxExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); + expect(data.balancePerFrameEth).to.equal(BALANCE_PER_FRAME_ETH); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(3); - expect(data.currentExitRequestsLimit).to.equal(3); + expect(data.prevExitBalanceEth).to.equal(remainingEth); + expect(data.currentExitBalanceEth).to.equal(remainingEth); }); - it("Should current limit should be increased on 2 if 2*48 seconds passed", async () => { + it("Should current limit should be increased if 2*48 seconds passed", async () => { await consensus.advanceTimeBy(2 * 4 * 12); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); - expect(data.exitsPerFrame).to.equal(EXITS_PER_FRAME); + const consumedEth = 64n; // 64 ETH from first test + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + + expect(data.maxExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); + expect(data.balancePerFrameEth).to.equal(BALANCE_PER_FRAME_ETH); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(3); - expect(data.currentExitRequestsLimit).to.equal(5); + expect(data.prevExitBalanceEth).to.equal(remainingEth); + // After 2 frames (2×48 seconds), we get 2 more frames worth of balance: 6144 + 4096 = 10240 ETH + // But capped at MAX_EXIT_BALANCE_ETH (6208 ETH) + expect(data.currentExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); }); it("Should process requests after 2 frames passes", async () => { const emitTx = await oracle.submitExitRequestsData(REQUEST); - const timestamp = await oracle.getTime(); + const timestamp = await getTimestampFromTx(emitTx, oracle.interface); for (let i = 0; i < 5; i++) { const request = VALIDATORS[i]; @@ -438,9 +479,10 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("Should not allow to process request larger than MAX_VALIDATORS_PER_REPORT", async () => { - await consensus.advanceTimeBy(MAX_EXIT_REQUESTS_LIMIT * 4 * 12); + // Advance time to ensure we have enough balance limit + await consensus.advanceTimeBy(MAX_EXIT_BALANCE_ETH * 4n * 12n); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.currentExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); + expect(data.currentExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); const maxRequestsPerReport = 4; @@ -448,6 +490,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await expect(tx).to.emit(oracle, "SetMaxValidatorsPerReport").withArgs(maxRequestsPerReport); expect(await oracle.connect(authorizedEntity).getMaxValidatorsPerReport()).to.equal(maxRequestsPerReport); + // Create a request with 5 validators (exceeds maxRequestsPerReport of 4) const exitRequestsRandom = [ { moduleId: 100, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, { moduleId: 101, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, @@ -465,26 +508,27 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.connect(authorizedEntity).submitExitRequestsHash(exitRequestHashRandom); + // Should fail because 5 validators > maxRequestsPerReport (4) await expect(oracle.submitExitRequestsData(exitRequestRandom)) .to.be.revertedWithCustomError(oracle, "TooManyExitRequestsInReport") .withArgs(5, 4); }); - it("Should set maxExitRequestsLimit equal to 0 and return as currentExitRequestsLimit type(uint256).max", async () => { - // can't set just maxExitRequestsLimit to 0, as it will be less than exitsPerFrame + it("Should set maxExitBalanceEth equal to 0 and return as currentExitBalanceEth type(uint256).max", async () => { + // can't set just maxExitBalanceEth to 0, as it will be less than balancePerFrameEth const exitLimitTx = await oracle.connect(authorizedEntity).setExitRequestLimit(0, 0, FRAME_DURATION); - await expect(exitLimitTx).to.emit(oracle, "ExitRequestsLimitSet").withArgs(0, 0, FRAME_DURATION); + await expect(exitLimitTx).to.emit(oracle, "ExitBalanceLimitSet").withArgs(0, 0, FRAME_DURATION); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(0); - expect(data.exitsPerFrame).to.equal(0); + expect(data.maxExitBalanceEth).to.equal(0); + expect(data.balancePerFrameEth).to.equal(0); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(0); - expect(data.currentExitRequestsLimit).to.equal(2n ** 256n - 1n); + expect(data.prevExitBalanceEth).to.equal(0); + expect(data.currentExitBalanceEth).to.equal(2n ** 256n - 1n); }); - it("Should not check limit, if maxLimitRequests equal to 0 (means limit was not set)", async () => { + it("Should not check limit, if maxExitBalanceEth equal to 0 (means limit was not set)", async () => { const exitRequestsRandom = [ { moduleId: 100, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, { moduleId: 101, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, @@ -500,25 +544,23 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.connect(authorizedEntity).submitExitRequestsHash(exitRequestRandomHash); const emitTx = await oracle.submitExitRequestsData(exitRequestRandom); - const timestamp = await oracle.getTime(); + const timestamp = await getTimestampFromTx(emitTx, oracle.interface); - for (let i = 0; i < 2; i++) { - const request = exitRequestsRandom[i]; - await expect(emitTx) - .to.emit(oracle, "ValidatorExitRequest") - .withArgs(request.moduleId, request.nodeOpId, request.valIndex, request.valPubkey, timestamp); - } + // Check each event individually + await expect(emitTx).to.emit(oracle, "ValidatorExitRequest").withArgs(100, 0, 0, PUBKEYS[0], timestamp); + + await expect(emitTx).to.emit(oracle, "ValidatorExitRequest").withArgs(101, 0, 2, PUBKEYS[1], timestamp); await expect(emitTx).to.emit(oracle, "ExitDataProcessing").withArgs(exitRequestRandomHash); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(0); - expect(data.exitsPerFrame).to.equal(0); + expect(data.maxExitBalanceEth).to.equal(0); + expect(data.balancePerFrameEth).to.equal(0); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(0); + expect(data.prevExitBalanceEth).to.equal(0); // as time is mocked and we didnt change it since last consume, currentExitRequestsLimit was not increased - expect(data.currentExitRequestsLimit).to.equal(2n ** 256n - 1n); + expect(data.currentExitBalanceEth).to.equal(2n ** 256n - 1n); }); }); @@ -546,7 +588,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { it("Check version", async () => { // set in initialize in deployVEBO - expect(await oracle.getContractVersion()).to.equal(2); + expect(await oracle.getContractVersion()).to.equal(3); }); it("Store exit hash", async () => { @@ -554,14 +596,14 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("set new version", async () => { - await oracle.setContractVersion(3); - expect(await oracle.getContractVersion()).to.equal(3); + await oracle.setContractVersion(4); + expect(await oracle.getContractVersion()).to.equal(4); }); it("Should revert if request has old contract version", async () => { await expect(oracle.submitExitRequestsData(REQUEST)) .to.be.revertedWithCustomError(oracle, "UnexpectedContractVersion") - .withArgs(3, 2); + .withArgs(4, 3); }); }); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts index 016d36e19c..07d8cd7fdd 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts @@ -4,11 +4,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, OracleReportSanityChecker, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + OracleReportSanityChecker, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { computeTimestampAtSlot, DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { + computeTimestampAtSlot, + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + seedMockModuleSigningKeys, +} from "test/deploy"; import { Snapshot } from "test/suite"; const PUBKEYS = [ @@ -25,6 +36,14 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let oracleReportSanityChecker: OracleReportSanityChecker; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -40,6 +59,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -59,10 +79,16 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -77,13 +103,15 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { }; const prepareReportAndSubmitHash = async ( - requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }], + requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }], options = { reportFields: {} }, ) => { + await seedMockModuleSigningKeys(mockModules, requests); + const { refSlot } = await consensus.getCurrentFrame(); const reportData = { consensusVersion: VEBO_CONSENSUS_VERSION, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, refSlot, requestsCount: requests.length, data: encodeExitRequestsDataList(requests), @@ -102,6 +130,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { oracle = deployed.oracle; consensus = deployed.consensus; oracleReportSanityChecker = deployed.oracleReportSanityChecker; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -175,7 +204,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("dataFormat = 0 reverts", async () => { const dataFormatUnsupported = 0; const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { dataFormat: dataFormatUnsupported } }, ); @@ -184,10 +213,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { .withArgs(dataFormatUnsupported); }); - it("dataFormat = 2 reverts", async () => { - const dataFormatUnsupported = 2; + it("dataFormat = 3 reverts", async () => { + const dataFormatUnsupported = 3; const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { dataFormat: dataFormatUnsupported } }, ); @@ -198,7 +227,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("dataFormat = 1 pass", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -207,7 +236,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("enforces data length", () => { it("reverts if there is more data than expected", async () => { const { refSlot } = await consensus.getCurrentFrame(); - const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }]; const { reportData } = await prepareReportAndSubmitHash(exitRequests, { reportFields: { refSlot, data: encodeExitRequestsDataList(exitRequests) + "aaaaaaaaaaaaaaaaaa" }, }); @@ -220,7 +249,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("reverts if there is less data than expected", async () => { const { refSlot } = await consensus.getCurrentFrame(); - const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }]; const data = encodeExitRequestsDataList(exitRequests); const { reportData } = await prepareReportAndSubmitHash(exitRequests, { @@ -238,7 +267,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("pass if there is exact amount of data", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -247,27 +276,33 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("invokes sanity check", () => { before(async () => { await oracleReportSanityChecker.grantRole( - await oracleReportSanityChecker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), + await oracleReportSanityChecker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), admin.address, ); }); it("reverts if request limit is reached", async () => { - const exitRequestsLimit = 1; - await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + // Module 5 (not curated) = 2048 ETH per validator + // Set limit to 1 validator worth + const exitRequestsLimit = 2_048n; // 2048 ETH + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(exitRequestsLimit); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]); + // 2 validators = 4096 ETH (actual balance that exceeds the limit) + const actualBalance = 2_048n * 2n; await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) - .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectNumberOfExitRequestsPerReport") - .withArgs(exitRequestsLimit); + .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectSumOfExitBalancePerReport") + .withArgs(actualBalance); }); it("pass if requests amount equals to limit", async () => { - const exitRequestsLimit = 1; - await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + // Module 5 (not curated) = 2048 ETH per validator + // Set limit to exactly 1 validator worth + const exitRequestsLimit = 2_048n; // 2048 ETH + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(exitRequestsLimit); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -276,7 +311,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("validates data.requestsCount field with given data", () => { it("reverts if requestsCount does not match with encoded data size", async () => { const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { requestsCount: 2 } }, ); @@ -289,7 +324,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("reverts if moduleId equals zero", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 0, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 0, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( @@ -300,8 +335,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("emits ValidatorExitRequest events", async () => { const requests = [ - { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); const tx = await oracle.connect(member1).submitReportData(reportData, oracleVersion); @@ -333,8 +368,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { const { refSlot } = await consensus.getCurrentFrame(); const requests = [ - { moduleId: 4, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); await oracle.connect(member1).submitReportData(reportData, oracleVersion); @@ -343,7 +378,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { expect(storageAfter.refSlot).to.equal(refSlot); expect(storageAfter.requestsCount).to.equal(requests.length); expect(storageAfter.requestsProcessed).to.equal(requests.length); - expect(storageAfter.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(storageAfter.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); }); it("updates total requests processed count", async () => { @@ -352,7 +387,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { expect(countStep0).to.equal(currentCount); // Step 1 — process 1 item - const requestsStep1 = [{ moduleId: 3, nodeOpId: 1, valIndex: 2, valPubkey: PUBKEYS[1] }]; + const requestsStep1 = [{ moduleId: 3, nodeOpId: 1, valIndex: 2, valPubkey: PUBKEYS[1], keyIndex: 1 }]; const { reportData: reportStep1 } = await prepareReportAndSubmitHash(requestsStep1); await oracle.connect(member1).submitReportData(reportStep1, oracleVersion); const countStep1 = await oracle.getTotalRequestsProcessed(); @@ -362,8 +397,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { // Step 2 — process 2 items await consensus.advanceTimeToNextFrameStart(); const requestsStep2 = [ - { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData: reportStep2 } = await prepareReportAndSubmitHash(requestsStep2); await oracle.connect(member1).submitReportData(reportStep2, oracleVersion); @@ -446,9 +481,9 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { }); it("reverts on hash mismatch", async () => { - const requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }]; + const requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }]; const { reportHash: actualReportHash } = await prepareReportAndSubmitHash(requests); - const newRequests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[1] }]; + const newRequests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[1], keyIndex: 1 }]; const { refSlot } = await consensus.getCurrentFrame(); // change pubkey @@ -456,7 +491,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot, requestsCount: requests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(newRequests), }; @@ -497,7 +532,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("should increase after report", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion, { from: member1 }); requestCount += 1; @@ -507,8 +542,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("should double increase for two exits", async () => { await consensus.advanceTimeToNextFrameStart(); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, - { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0], keyIndex: 2 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); requestCount += 2; @@ -549,8 +584,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("consensus report submitted", async () => { ({ reportData: report, reportHash: hash } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3] }, + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3], keyIndex: 2 }, ])); const state = await oracle.getProcessingState(); @@ -573,7 +608,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { computeTimestampAtSlot((await consensus.getCurrentFrame()).reportProcessingDeadlineSlot), hash, true, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, 2, 2, ]); @@ -609,20 +644,23 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("Set exit limit", async () => { const role = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await oracle.grantRole(role, admin); - const exitLimitTx = await oracle.connect(admin).setExitRequestLimit(7, 1, 48); - await expect(exitLimitTx).to.emit(oracle, "ExitRequestsLimitSet").withArgs(7, 1, 48); + // Set limit to allow 4160 ETH (2 legacy + 2 MaxEB validators) + // Max: 7000 ETH, Per frame: 5000 ETH (enough to cover 4160 ETH) + const exitLimitTx = await oracle.connect(admin).setExitRequestLimit(7_000n, 5_000n, 48); + await expect(exitLimitTx).to.emit(oracle, "ExitBalanceLimitSet").withArgs(7_000n, 5_000n, 48); }); it("deliver report by actor different from oracle", async () => { const requests = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; + await seedMockModuleSigningKeys(mockModules, requests); const data = await encodeExitRequestsDataList(requests); const exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST_WITH_KEY_INDEX]), ); const role = await oracle.SUBMIT_REPORT_HASH_ROLE(); @@ -632,7 +670,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { await expect(submitTx).to.emit(oracle, "RequestsHashSubmitted").withArgs(exitRequestHash); const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data, }; @@ -656,10 +694,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("oracle does not consume common veb limits", async () => { const requests = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 4, valPubkey: PUBKEYS[4] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 4, valPubkey: PUBKEYS[4], keyIndex: 4 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); @@ -693,10 +731,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { after(async () => await Snapshot.restore(originalState)); const validators = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; let exitRequestHash: string; @@ -704,7 +742,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("create hash", async () => { const data = await encodeExitRequestsDataList(validators); exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST_WITH_KEY_INDEX]), ); }); @@ -784,24 +822,30 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { const role = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await oracle.grantRole(role, admin); - await oracle.connect(admin).setExitRequestLimit(100, 1, 48); + // Set limit to allow 4160 ETH (2 legacy + 2 MaxEB validators) + // Max: 100000 ETH, Per frame: 5000 ETH + await oracle.connect(admin).setExitRequestLimit(100_000n, 5_000n, 48); }); after(async () => await Snapshot.restore(originalState)); const validators = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; let exitRequestHash: string; let exitRequests: string; it("create hash", async () => { + await seedMockModuleSigningKeys(mockModules, validators); exitRequests = await encodeExitRequestsDataList(validators); exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [exitRequests, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode( + ["bytes", "uint256"], + [exitRequests, DATA_FORMAT_LIST_WITH_KEY_INDEX], + ), ); }); @@ -818,7 +862,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("submit report by actor different from oracle", async () => { const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: exitRequests, }; @@ -928,7 +972,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("submit report pass", async () => { const encodedEmptyRequestList = encodeExitRequestsDataList([]); const exitHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [encodedEmptyRequestList, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode( + ["bytes", "uint256"], + [encodedEmptyRequestList, DATA_FORMAT_LIST_WITH_KEY_INDEX], + ), ); await expect(oracle.connect(member1).getDeliveryTimestamp(exitHash)).to.be.revertedWithCustomError( diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts index fef2fe9540..7256c960d0 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts @@ -5,13 +5,20 @@ import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { HashConsensus__Harness, + StakingModule__MockForKeyVerification, TriggerableWithdrawalsGateway__MockForVEB, ValidatorsExitBus__Harness, } from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { DATA_FORMAT_LIST, deployVEBO, initVEBO, SECONDS_PER_FRAME } from "test/deploy"; +import { + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + SECONDS_PER_FRAME, + seedMockModuleSigningKeys, +} from "test/deploy"; // ----------------------------------------------------------------------------- // Constants & helpers @@ -37,6 +44,7 @@ interface ExitRequest { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -56,10 +64,16 @@ const calcValidatorsExitBusReportDataHash = (items: ReportFields) => { return reportDataHash; }; -const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { +const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -85,6 +99,14 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let triggerableWithdrawalsGateway: TriggerableWithdrawalsGateway__MockForVEB; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -99,6 +121,7 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { oracle = deployed.oracle; consensus = deployed.consensus; triggerableWithdrawalsGateway = deployed.triggerableWithdrawalsGateway; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -124,10 +147,10 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { describe("Submit via oracle flow ", async () => { const exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 2, nodeOpId: 0, valIndex: 3, keyIndex: 3, valPubkey: PUBKEYS[3] }, ]; let reportFields: ReportFields; @@ -135,8 +158,8 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { before(async () => { [admin, member1, member2, member3, authorizedEntity, stranger] = await ethers.getSigners(); - await deploy(); + await seedMockModuleSigningKeys(mockModules, exitRequests); }); it("some time passes", async () => { @@ -150,7 +173,7 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; @@ -286,13 +309,13 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { describe("Submit via trustfull method", () => { const exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 2, keyIndex: 2, valPubkey: PUBKEYS[1] }, ]; const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; @@ -300,8 +323,8 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { before(async () => { [admin, member1, member2, member3, authorizedEntity] = await ethers.getSigners(); - await deploy(); + await seedMockModuleSigningKeys(mockModules, exitRequests); }); it("should revert if request was not submitted", async () => { @@ -380,13 +403,14 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { it("should revert with error if module id is equal to 0", async () => { const requests = [ - { moduleId: 0, nodeOpId: 1, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 0, nodeOpId: 1, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 2, keyIndex: 2, valPubkey: PUBKEYS[1] }, ]; + await seedMockModuleSigningKeys(mockModules, requests); const request = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(requests), }; diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts new file mode 100644 index 0000000000..e58c212e2b --- /dev/null +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts @@ -0,0 +1,977 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + Accounting__MockForSanityChecker, + AccountingOracle__MockForSanityChecker, + Burner__MockForSanityChecker, + LidoLocator__MockForSanityChecker, + OracleReportSanityCheckerWrapper, + StakingModule__MockForStakingRouter, + StakingRouter__Harness, + StakingRouter__MockForAccountingOracle, + WithdrawalQueue__MockForSanityChecker, +} from "typechain-types"; + +import { ether, impersonate, ONE_GWEI, randomWCType1, WithdrawalCredentialsType } from "lib"; + +import { deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const ONE_DAY = 24n * 60n * 60n; + +describe("OracleReportSanityChecker.sol:checkModuleAndCLBalancesChangeRates", () => { + type ModuleBalance = { + id: bigint; + validatorsBalanceWei: bigint; + pendingWei?: bigint; + }; + + const limits = { + exitedEthAmountPerDayLimit: 100n, + appearedEthAmountPerDayLimit: 100n, + annualBalanceIncreaseBPLimit: 1_000n, + simulatedShareRateDeviationBPLimit: 250n, + maxBalanceExitRequestedPerReportInEth: 65_000n, + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, + maxItemsPerExtraDataTransaction: 15n, + maxNodeOperatorsPerExtraDataItem: 16n, + requestTimestampMargin: 128n, + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: 360n, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 10n, + exitedValidatorEthAmountLimit: 1n, + externalPendingBalanceCapEth: 0n, + }; + + let checker: OracleReportSanityCheckerWrapper; + let locator: LidoLocator__MockForSanityChecker; + let burner: Burner__MockForSanityChecker; + let accounting: Accounting__MockForSanityChecker; + let withdrawalQueue: WithdrawalQueue__MockForSanityChecker; + let stakingRouter: StakingRouter__MockForAccountingOracle; + let accountingOracle: AccountingOracle__MockForSanityChecker; + + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let elRewardsVault: HardhatEthersSigner; + + let originalState: string; + + const toGwei = (weiAmount: bigint) => weiAmount / ONE_GWEI; + + const toModuleInput = (modules: ModuleBalance[]) => { + const ids = modules.map((m) => m.id); + const validatorBalancesGweiByStakingModule = modules.map((m) => toGwei(m.validatorsBalanceWei)); + + return { + ids, + validatorBalancesGweiByStakingModule, + }; + }; + + const seedPreviousBalances = async (modules: ModuleBalance[]) => { + const input = toModuleInput(modules); + for (const id of input.ids) { + await stakingRouter.mock__registerStakingModule(id); + } + // Router state seeds validators balance only; pending budget is passed to the checker explicitly. + await stakingRouter.reportValidatorBalancesByStakingModule(input.ids, input.validatorBalancesGweiByStakingModule); + }; + + const check = async ( + modules: ModuleBalance[], + { + preCLPendingBalanceWei = 0n, + // Module fixtures carry only post-report pending; router state itself no longer stores module pending. + postCLPendingBalanceWei = modules.reduce((sum, m) => sum + (m.pendingWei ?? 0n), 0n), + depositsWei = 0n, + timeElapsed = ONE_DAY, + }: { + preCLPendingBalanceWei?: bigint; + postCLPendingBalanceWei?: bigint; + depositsWei?: bigint; + timeElapsed?: bigint; + } = {}, + ) => { + const ids = modules.map((m) => m.id); + const validatorBalancesWeiByStakingModule = modules.map((m) => m.validatorsBalanceWei); + const postCLValidatorsBalanceWei = validatorBalancesWeiByStakingModule.reduce((sum, val) => sum + val, 0n); + const previousModuleStates = await Promise.all(ids.map((id) => stakingRouter.getStakingModuleStateAccounting(id))); + const preCLValidatorsBalanceWei = previousModuleStates.reduce( + (sum, [validatorsBalanceGwei]) => sum + validatorsBalanceGwei * ONE_GWEI, + 0n, + ); + return checker.checkModuleAndCLBalancesChangeRates( + ids, + validatorBalancesWeiByStakingModule, + preCLValidatorsBalanceWei, + preCLPendingBalanceWei, + postCLValidatorsBalanceWei, + postCLPendingBalanceWei, + depositsWei, + timeElapsed, + ); + }; + + const deployCheckerWithRouterModules = async (modulesCount = 1, postMigrationFirstReportDone = true) => { + const routerHarness = (await deployStakingRouter({ deployer, admin }, {})) as { + stakingRouter: StakingRouter__Harness; + }; + const moduleIds: bigint[] = []; + + await routerHarness.stakingRouter.connect(admin).initialize(admin.address, randomWCType1()); + await routerHarness.stakingRouter + .connect(admin) + .grantRole(await routerHarness.stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin.address); + await routerHarness.stakingRouter + .connect(admin) + .grantRole(await routerHarness.stakingRouter.REPORT_EXITED_VALIDATORS_ROLE(), admin.address); + for (let i = 0; i < modulesCount; i++) { + const module = (await ethers.deployContract( + "StakingModule__MockForStakingRouter", + deployer, + )) as StakingModule__MockForStakingRouter; + + await routerHarness.stakingRouter + .connect(admin) + .addStakingModule(`new module ${i + 1}`, await module.getAddress(), { + stakeShareLimit: 10_000n, + priorityExitShareThreshold: 10_000n, + stakingModuleFee: 500n, + treasuryFee: 500n, + maxDepositsPerBlock: 150n, + minDepositBlockDistance: 25n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + moduleIds.push(BigInt(i + 1)); + } + + const locatorWithRouter = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await routerHarness.stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: deployer.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + const checkerWithRouter = await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locatorWithRouter.getAddress(), + await accounting.getAddress(), + admin.address, + limits, + postMigrationFirstReportDone, + ]); + + return { + checkerWithRouter, + stakingRouterHarness: routerHarness.stakingRouter, + moduleIds, + }; + }; + + const checkGlobalReport = ( + sanityChecker: OracleReportSanityCheckerWrapper, + accountingSigner: HardhatEthersSigner, + { + timeElapsed = ONE_DAY, + preValidatorsWei = 0n, + prePendingWei = 0n, + postValidatorsWei = 0n, + postPendingWei = 0n, + withdrawalVaultBalanceWei = 0n, + elRewardsVaultBalanceWei = 0n, + sharesRequestedToBurn = 0n, + depositsWei = 0n, + withdrawalsVaultTransferWei = 0n, + }: { + timeElapsed?: bigint; + preValidatorsWei?: bigint; + prePendingWei?: bigint; + postValidatorsWei?: bigint; + postPendingWei?: bigint; + withdrawalVaultBalanceWei?: bigint; + elRewardsVaultBalanceWei?: bigint; + sharesRequestedToBurn?: bigint; + depositsWei?: bigint; + withdrawalsVaultTransferWei?: bigint; + }, + ) => + sanityChecker + .connect(accountingSigner) + .checkAccountingOracleReport( + timeElapsed, + preValidatorsWei, + prePendingWei, + postValidatorsWei, + postPendingWei, + withdrawalVaultBalanceWei, + elRewardsVaultBalanceWei, + sharesRequestedToBurn, + depositsWei, + withdrawalsVaultTransferWei, + ); + + before(async () => { + [deployer, admin, manager, elRewardsVault] = await ethers.getSigners(); + + withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForSanityChecker"); + burner = await ethers.deployContract("Burner__MockForSanityChecker"); + accounting = await ethers.deployContract("Accounting__MockForSanityChecker"); + stakingRouter = await ethers.deployContract("StakingRouter__MockForAccountingOracle"); + + accountingOracle = await ethers.deployContract("AccountingOracle__MockForSanityChecker", [ + deployer.address, + 12, + 1_606_824_023, + ]); + + locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: deployer.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + checker = await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + limits, + true, + ]); + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + it("passes for empty module arrays and zero totals", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([], [], 0n, 0n, 0n, 0n, 0n, ONE_DAY)).not.to.be.reverted; + }); + + it("skips module-specific checks for the first report of a newly added module", async () => { + const { checkerWithRouter, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const firstReportTotalBalanceWei = ether("120"); + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [firstReportTotalBalanceWei], + firstReportTotalBalanceWei, + 0n, + firstReportTotalBalanceWei, + 0n, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("skips the module validators balance increase check on the first post-migration report and applies it on the second", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(1, false); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const previousValidatorsBalanceWei = ether("40150"); + const prePendingBalanceWei = ether("120"); + const excessiveValidatorsGrowthWei = ether("112"); + const postValidatorsBalanceWei = previousValidatorsBalanceWei + excessiveValidatorsGrowthWei; + const postPendingBalanceWei = ether("20"); + const activatedBalanceWei = prePendingBalanceWei - postPendingBalanceWei; + const expectedValidatorsGrowthLimitWei = + activatedBalanceWei + + ((previousValidatorsBalanceWei + activatedBalanceWei) * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + + const problematicModuleReport = () => + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [postValidatorsBalanceWei], + previousValidatorsBalanceWei, + prePendingBalanceWei, + postValidatorsBalanceWei, + postPendingBalanceWei, + 0n, + ONE_DAY, + ); + + await stakingRouterHarness + .connect(admin) + .reportValidatorBalancesByStakingModule([moduleId], [previousValidatorsBalanceWei / ONE_GWEI]); + + await expect(problematicModuleReport()).not.to.be.reverted; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + preValidatorsWei: previousValidatorsBalanceWei, + prePendingWei: prePendingBalanceWei, + postValidatorsWei: previousValidatorsBalanceWei, + postPendingWei: prePendingBalanceWei, + }), + ).not.to.be.reverted; + + await expect(problematicModuleReport()) + .to.be.revertedWithCustomError(checkerWithRouter, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, excessiveValidatorsGrowthWei); + }); + + it("supports cold-start onboarding across the global path and module bootstrap flow", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const depositedWei = ether("200"); + const activatedValidatorsWei = ether("100"); + const remainingPendingWei = depositedWei - activatedValidatorsWei; + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + postPendingWei: depositedWei, + depositsWei: depositedWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [0n], + 0n, + 0n, + 0n, + depositedWei, + depositedWei, + ONE_DAY, + ), + ).not.to.be.reverted; + + await stakingRouterHarness.connect(admin).reportValidatorBalancesByStakingModule([moduleId], [0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + prePendingWei: depositedWei, + postValidatorsWei: activatedValidatorsWei, + postPendingWei: remainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [activatedValidatorsWei], + 0n, + depositedWei, + activatedValidatorsWei, + remainingPendingWei, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("supports cold-start onboarding across multiple new modules", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(2); + const [moduleOneId, moduleTwoId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const moduleOneInitialPendingWei = ether("120"); + const moduleTwoInitialPendingWei = ether("80"); + const totalInitialPendingWei = moduleOneInitialPendingWei + moduleTwoInitialPendingWei; + const moduleOneActivatedValidatorsWei = ether("60"); + const moduleTwoActivatedValidatorsWei = ether("40"); + const moduleOneRemainingPendingWei = moduleOneInitialPendingWei - moduleOneActivatedValidatorsWei; + const moduleTwoRemainingPendingWei = moduleTwoInitialPendingWei - moduleTwoActivatedValidatorsWei; + const totalActivatedValidatorsWei = moduleOneActivatedValidatorsWei + moduleTwoActivatedValidatorsWei; + const totalRemainingPendingWei = moduleOneRemainingPendingWei + moduleTwoRemainingPendingWei; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + postPendingWei: totalInitialPendingWei, + depositsWei: totalInitialPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleOneId, moduleTwoId], + [0n, 0n], + 0n, + 0n, + 0n, + totalInitialPendingWei, + totalInitialPendingWei, + ONE_DAY, + ), + ).not.to.be.reverted; + + await stakingRouterHarness + .connect(admin) + .reportValidatorBalancesByStakingModule([moduleOneId, moduleTwoId], [0n, 0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + prePendingWei: totalInitialPendingWei, + postValidatorsWei: totalActivatedValidatorsWei, + postPendingWei: totalRemainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleOneId, moduleTwoId], + [moduleOneActivatedValidatorsWei, moduleTwoActivatedValidatorsWei], + 0n, + totalInitialPendingWei, + totalActivatedValidatorsWei, + totalRemainingPendingWei, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("supports cold-start onboarding with timeElapsed = 0 under allowance and rate-normalization fallbacks", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const zeroTimeElapsed = 0n; + const initialPendingWei = ether("10"); + const expectedModulePerDayLimitWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + const maxModuleActivationGwei = expectedModulePerDayLimitWei / ONE_DAY / ONE_GWEI; + const maxModuleActivationWei = maxModuleActivationGwei * ONE_GWEI; + const remainingPendingWei = initialPendingWei - maxModuleActivationWei; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + timeElapsed: zeroTimeElapsed, + postPendingWei: initialPendingWei, + depositsWei: initialPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [0n], + 0n, + 0n, + 0n, + initialPendingWei, + initialPendingWei, + zeroTimeElapsed, + ), + ).not.to.be.reverted; + + await stakingRouterHarness.connect(admin).reportValidatorBalancesByStakingModule([moduleId], [0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + timeElapsed: zeroTimeElapsed, + prePendingWei: initialPendingWei, + postValidatorsWei: maxModuleActivationWei, + postPendingWei: remainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [maxModuleActivationWei], + 0n, + initialPendingWei, + maxModuleActivationWei, + remainingPendingWei, + 0n, + zeroTimeElapsed, + ), + ).not.to.be.reverted; + }); + + it("reverts with InvalidClBalancesData on array length mismatch", async () => { + await expect( + checker.checkModuleAndCLBalancesChangeRates([1n], [], 0n, 0n, 1n, 0n, 0n, ONE_DAY), + ).to.be.revertedWithCustomError(checker, "InvalidClBalancesData"); + }); + + it("reverts with InconsistentValidatorsBalanceByModule when validators balance sum mismatches", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([1n, 2n], [10n, 20n], 0n, 0n, 40n, 3n, 0n, ONE_DAY)) + .to.be.revertedWithCustomError(checker, "InconsistentValidatorsBalanceByModule") + .withArgs(40n, 30n); + }); + + it("reverts with IncorrectTotalPendingBalance when reported pending exceeds funded protocol pending", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([1n, 2n], [10n, 20n], 0n, 0n, 30n, 4n, 0n, ONE_DAY)) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, 4n); + }); + + it("allows redistribution between modules when total CL balance is unchanged", async () => { + const redistributionWei = limits.consolidationEthAmountPerDayLimit * ether("1"); + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: redistributionWei }, + { id: 2n, validatorsBalanceWei: redistributionWei }, + ]); + + await expect( + check([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: redistributionWei * 2n }, + ]), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalPendingBalance when a module reports more pending than the protocol funded", async () => { + const previousPendingWei = ether("10"); + const reportedPendingWei = previousPendingWei + ether("1"); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: 0n, pendingWei: reportedPendingWei }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(previousPendingWei, reportedPendingWei); + }); + + it("allows module-reported pending to exceed funded protocol pending within external pending balance cap", async () => { + const externalPendingBalanceCapEth = 2n; + const previousPendingWei = ether("10"); + const maxAllowedPendingWei = previousPendingWei + externalPendingBalanceCapEth * ether("1"); + const reportedPendingWei = maxAllowedPendingWei - ether("1"); + + await checker.connect(admin).grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: 0n, pendingWei: reportedPendingWei }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ).not.to.be.reverted; + }); + + it("allows module-reported pending exactly at external pending balance cap", async () => { + const externalPendingBalanceCapEth = 2n; + const previousPendingWei = ether("10"); + const maxAllowedPendingWei = previousPendingWei + externalPendingBalanceCapEth * ether("1"); + + await checker.connect(admin).grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: 0n, pendingWei: maxAllowedPendingWei }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ).not.to.be.reverted; + }); + + it("reverts when module-reported pending exceeds external pending balance cap", async () => { + const externalPendingBalanceCapEth = 2n; + const previousPendingWei = ether("10"); + const maxAllowedPendingWei = previousPendingWei + externalPendingBalanceCapEth * ether("1"); + const reportedPendingWei = previousPendingWei + (externalPendingBalanceCapEth + 1n) * ether("1"); + + await checker.connect(admin).grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: 0n, pendingWei: reportedPendingWei }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(maxAllowedPendingWei, reportedPendingWei); + }); + + it("does not treat external pending balance cap as activated balance budget on the module path", async () => { + const externalPendingBalanceCapEth = 2n; + const reportedPendingWei = (externalPendingBalanceCapEth - 1n) * ether("1"); + const validatorsIncreaseWei = ether("1"); + + await checker.connect(admin).grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect(check([{ id: 1n, validatorsBalanceWei: validatorsIncreaseWei, pendingWei: reportedPendingWei }])) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(0n, validatorsIncreaseWei); + }); + + it("does not treat external pending balance cap as aggregate module activation budget", async () => { + const externalPendingBalanceCapEth = 2n; + const previousModuleValidatorsWei = ether("109500"); + const previousPendingWei = 0n; + const reportedPendingWei = ether("1"); + const totalPreviousValidatorsWei = previousModuleValidatorsWei * 2n; + const validatorsAprSafetyCapWei = + (totalPreviousValidatorsWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + const expectedModuleIncreaseLimitWei = + validatorsAprSafetyCapWei + limits.consolidationEthAmountPerDayLimit * ether("1"); + + await checker.connect(admin).grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: previousModuleValidatorsWei }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei }, + ]); + + await expect( + check( + [ + { id: 1n, validatorsBalanceWei: previousModuleValidatorsWei + ether("71"), pendingWei: reportedPendingWei }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei - ether("11"), pendingWei: 0n }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + postCLPendingBalanceWei: reportedPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalModuleValidatorsBalanceIncrease") + .withArgs(expectedModuleIncreaseLimitWei, ether("71")); + }); + + it("allows pending-to-validators activation within a module when module total is unchanged", async () => { + const previousPendingWei = ether("100"); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: ether("100"), pendingWei: 0n }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when module increase exceeds the global activation budget", async () => { + const previousValidatorsWei = ether("219000"); + const currentIncreasePerDay = ether("121"); + const previousPendingWei = ether("60"); + const expectedValidatorsGrowthLimitWei = + previousPendingWei + + ((previousValidatorsWei + previousPendingWei) * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: previousValidatorsWei + currentIncreasePerDay, pendingWei: 0n }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, currentIncreasePerDay); + }); + + it("sums module increases across modules before checking appeared limit", async () => { + const previousModuleValidatorsWei = ether("109500"); + const previousPendingWei = ether("60"); + const totalPreviousValidatorsWei = previousModuleValidatorsWei * 2n; + const totalPositiveModuleIncreaseWei = ether("131"); + const expectedModuleIncreaseLimitWei = + previousPendingWei + + ((totalPreviousValidatorsWei + previousPendingWei) * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n) + + limits.consolidationEthAmountPerDayLimit * ether("1"); + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: previousModuleValidatorsWei }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei }, + ]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousModuleValidatorsWei + totalPositiveModuleIncreaseWei, + pendingWei: 0n, + }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei - ether("71"), pendingWei: 0n }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalModuleValidatorsBalanceIncrease") + .withArgs(expectedModuleIncreaseLimitWei, totalPositiveModuleIncreaseWei); + }); + + it("reverts with IncorrectTotalActivatedBalance when consumed pending exceeds the global appeared limit", async () => { + const appearedLimitPerPeriodWei = limits.appearedEthAmountPerDayLimit * ether("1"); + const totalConsumedPendingWei = ether("120"); + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n }, + ]); + + await expect( + check( + [ + { id: 1n, validatorsBalanceWei: 0n, pendingWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n, pendingWei: 0n }, + ], + { + preCLPendingBalanceWei: totalConsumedPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(appearedLimitPerPeriodWei, totalConsumedPendingWei); + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when reported validators balance growth exceeds consumed pending", async () => { + const consumedPendingWei = ether("20"); + const reportedValidatorsGrowthWei = ether("60"); + const expectedValidatorsGrowthLimitWei = + consumedPendingWei + (consumedPendingWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n }, + ]); + + await expect( + check( + [ + { id: 1n, validatorsBalanceWei: ether("30"), pendingWei: ether("20") }, + { id: 2n, validatorsBalanceWei: ether("30"), pendingWei: ether("20") }, + ], + { + preCLPendingBalanceWei: ether("60"), + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, reportedValidatorsGrowthWei); + }); + + it("allows reported validators balance growth above consumed pending within safetyCap", async () => { + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("10"); + const consumedPendingWei = ether("9"); + const safetyCapWei = + ((previousValidatorsWei + consumedPendingWei) * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + const maxAllowedValidatorsGrowthWei = consumedPendingWei + safetyCapWei; + const currentPendingWei = previousPendingWei - consumedPendingWei; + const requiredValidatorsIncreaseWei = maxAllowedValidatorsGrowthWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + requiredValidatorsIncreaseWei, + pendingWei: currentPendingWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ).not.to.be.reverted; + }); + + it("reverts when reported validators balance growth exceeds consumed pending plus safetyCap by an explicit overflow", async () => { + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("10"); + const consumedPendingWei = ether("9"); + const safetyCapWei = + ((previousValidatorsWei + consumedPendingWei) * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + const safetyCapOverflowWei = ether("1"); + const maxAllowedValidatorsGrowthWei = consumedPendingWei + safetyCapWei; + const reportedValidatorsGrowthWei = maxAllowedValidatorsGrowthWei + safetyCapOverflowWei; + const currentPendingWei = previousPendingWei - consumedPendingWei; + const requiredValidatorsIncreaseWei = reportedValidatorsGrowthWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + requiredValidatorsIncreaseWei, + pendingWei: currentPendingWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(maxAllowedValidatorsGrowthWei, reportedValidatorsGrowthWei); + }); + + it("allows an exact module increase at the appeared+consolidation limit", async () => { + const previousValidatorsWei = ether("36500"); + const previousPendingWei = ether("36500"); + const activatedWei = ether("100"); + const exactIncrease = (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + exactIncrease, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ).not.to.be.reverted; + }); + + it("allows validator growth funded by existing pending when total CL is unchanged", async () => { + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: ether("5") }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: ether("105"), pendingWei: 0n }], { + preCLPendingBalanceWei: ether("100"), + }), + ).not.to.be.reverted; + }); + + it("uses timeElapsed in per-day normalization (timeElapsed = 0 path)", async () => { + const activatedWei = ether("5"); + const appearedLimitForZeroElapsedWei = (limits.appearedEthAmountPerDayLimit * ether("1")) / 24n; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: activatedWei, pendingWei: 0n }], { + preCLPendingBalanceWei: activatedWei, + timeElapsed: 0n, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(appearedLimitForZeroElapsedWei, activatedWei); + }); + + it("normalizes module increases by a non-zero elapsed time", async () => { + const previousValidatorsWei = ether("43800"); + const previousPendingWei = ether("36500"); + const halfDay = ONE_DAY / 2n; + const activatedWei = ether("50"); + const safetyCapWei = + ((previousValidatorsWei + activatedWei) * limits.annualBalanceIncreaseBPLimit * halfDay) / + (365n * ONE_DAY * 10_000n); + const allowedValidatorsGrowthWei = activatedWei + safetyCapWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + allowedValidatorsGrowthWei, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + timeElapsed: halfDay, + }, + ), + ).not.to.be.reverted; + + const exceededValidatorsGrowthWei = allowedValidatorsGrowthWei + ether("1"); + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + exceededValidatorsGrowthWei, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + timeElapsed: halfDay, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedValidatorsGrowthWei, exceededValidatorsGrowthWei); + }); + + it("allows redistribution between modules even when maxCLBalanceDecreaseBP is zero", async () => { + const redistributionWei = limits.consolidationEthAmountPerDayLimit * ether("1"); + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: redistributionWei }, + { id: 2n, validatorsBalanceWei: redistributionWei }, + ]); + + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setMaxCLBalanceDecreaseBP(0n); + + await expect( + check([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: redistributionWei * 2n }, + ]), + ).not.to.be.reverted; + }); +}); diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts index 2939c92c08..78d148e9fe 100644 --- a/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts @@ -4,20 +4,25 @@ import { artifacts, ethers } from "hardhat"; import { anyValue } from "@nomicfoundation/hardhat-chai-matchers/withArgs"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { Accounting__MockForSanityChecker, AccountingOracle__MockForSanityChecker, + Lido__MockForSanityChecker, LidoLocator__MockForSanityChecker, OracleReportSanityChecker, StakingRouter__MockForSanityChecker, } from "typechain-types"; -import { ether, getCurrentBlockTimestamp, impersonate } from "lib"; +import { ether, impersonate } from "lib"; import { Snapshot } from "test/suite"; const SLOTS_PER_DAY = 7200n; +const REPORTS_WINDOW = 36; +const MAX_BASIS_POINTS = 10_000n; +const MAX_CL_BALANCE_DECREASE_BP = 360n; // 3.6% describe("OracleReportSanityChecker.sol:negative-rebase", () => { let locator: LidoLocator__MockForSanityChecker; @@ -25,40 +30,98 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { let accountingOracle: AccountingOracle__MockForSanityChecker; let accounting: Accounting__MockForSanityChecker; let stakingRouter: StakingRouter__MockForSanityChecker; + let lido: Lido__MockForSanityChecker; let deployer: HardhatEthersSigner; + let withdrawalVault: HardhatEthersSigner; let accountingSigner: HardhatEthersSigner; const defaultLimitsList = { - exitedValidatorsPerDayLimit: 50n, - appearedValidatorsPerDayLimit: 75n, + exitedEthAmountPerDayLimit: 50n, + appearedEthAmountPerDayLimit: 75n, annualBalanceIncreaseBPLimit: 10_00n, // 10% simulatedShareRateDeviationBPLimit: 2_00n, // 2% - maxValidatorExitRequestsPerReport: 2000n, + maxBalanceExitRequestedPerReportInEth: 64_000n, // Max ~65K ETH (close to uint16 max) + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 128n, - maxPositiveTokenRebase: 5_000_000n, // 0.05% - initialSlashingAmountPWei: 1000n, // 1 ETH = 1000 PWei - inactivityPenaltiesAmountPWei: 101n, // 0.101 ETH = 101 PWei - clBalanceOraclesErrorUpperBPLimit: 50n, // 0.5% + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: MAX_CL_BALANCE_DECREASE_BP, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, + externalPendingBalanceCapEth: 0n, }; let originalState: string; + const callCheck = ( + preCLBalance: bigint, + postCLBalance: bigint, + withdrawalVaultBalance = 0n, + deposits = 0n, + withdrawalsVaultTransfer = 0n, + timeElapsed = 24n * 60n * 60n, + preCLPendingBalance = 0n, + postCLPendingBalance = 0n, + ) => + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + timeElapsed, + preCLBalance - deposits - preCLPendingBalance, + preCLPendingBalance, + postCLBalance - postCLPendingBalance, + postCLPendingBalance, + withdrawalVaultBalance, + 0n, + 0n, + deposits, + withdrawalsVaultTransfer, + ); + + // Deposits remain in pending until they are activated on the validators side. + const callCheckWithPendingDeposits = ( + preCLBalance: bigint, + postCLBalance: bigint, + deposits: bigint, + { + withdrawalVaultBalance = 0n, + withdrawalsVaultTransfer = 0n, + timeElapsed = 24n * 60n * 60n, + }: { + withdrawalVaultBalance?: bigint; + withdrawalsVaultTransfer?: bigint; + timeElapsed?: bigint; + } = {}, + ) => + callCheck( + preCLBalance, + postCLBalance, + withdrawalVaultBalance, + deposits, + withdrawalsVaultTransfer, + timeElapsed, + 0n, + deposits, + ); + + const maxDiffFor = (adjusted: bigint) => (adjusted * MAX_CL_BALANCE_DECREASE_BP) / MAX_BASIS_POINTS; + const deploySecondOpinionOracle = async () => { const secondOpinionOracle = await ethers.deployContract("SecondOpinionOracle__Mock"); const clOraclesRole = await checker.SECOND_OPINION_MANAGER_ROLE(); await checker.grantRole(clOraclesRole, deployer.address); - // 10000 BP - 100% - // 74 BP - 0.74% await checker.setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinionOracle.getAddress(), 74n); return secondOpinionOracle; }; before(async () => { - [deployer] = await ethers.getSigners(); + [deployer, withdrawalVault] = await ethers.getSigners(); + await setBalance(withdrawalVault.address, ether("10000")); const sanityCheckerAddress = deployer.address; @@ -71,10 +134,11 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { 1606824023, ]); stakingRouter = await ethers.deployContract("StakingRouter__MockForSanityChecker"); + lido = await ethers.deployContract("Lido__MockForSanityChecker"); locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ { - lido: deployer.address, + lido: await lido.getAddress(), depositSecurityModule: deployer.address, elRewardsVault: deployer.address, accountingOracle: await accountingOracle.getAddress(), @@ -84,11 +148,12 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { stakingRouter: await stakingRouter.getAddress(), treasury: deployer.address, withdrawalQueue: deployer.address, - withdrawalVault: deployer.address, + withdrawalVault: withdrawalVault.address, postTokenRebaseReceiver: deployer.address, oracleDaemonConfig: deployer.address, validatorExitDelayVerifier: deployer.address, triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, accounting: await accounting.getAddress(), wstETH: deployer.address, vaultHub: deployer.address, @@ -96,18 +161,17 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { lazyOracle: deployer.address, predepositGuarantee: deployer.address, operatorGrid: deployer.address, + topUpGateway: deployer.address, }, ]); - const locatorAddress = await locator.getAddress(); - const accountingOracleAddress = await accountingOracle.getAddress(); - const accountingAddress = await accounting.getAddress(); - - checker = await ethers - .getContractFactory("OracleReportSanityChecker") - .then((f) => - f.deploy(locatorAddress, accountingOracleAddress, accountingAddress, deployer.address, defaultLimitsList), - ); + const factory = await ethers.getContractFactory("OracleReportSanityChecker"); + checker = await factory.deploy( + await locator.getAddress(), + await accounting.getAddress(), + deployer.address, + defaultLimitsList, + ); accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); }); @@ -118,13 +182,13 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { context("OracleReportSanityChecker checkAccountingOracleReport authorization", () => { it("should allow calling from Accounting address", async () => { - await checker.connect(accountingSigner).checkAccountingOracleReport(0, 110 * 1e9, 109.99 * 1e9, 0, 0, 0, 10, 10); + await callCheck(ether("100"), ether("100")); }); it("should not allow calling from non-Accounting address", async () => { const [, otherClient] = await ethers.getSigners(); await expect( - checker.connect(otherClient).checkAccountingOracleReport(0, 110 * 1e9, 110.01 * 1e9, 0, 0, 0, 10, 10), + checker.connect(otherClient).checkAccountingOracleReport(0, ether("100"), 0, ether("100"), 0, 0, 0, 0, 0, 0), ).to.be.revertedWithCustomError(checker, "CalledNotFromAccounting"); }); }); @@ -144,29 +208,41 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { it("has compact packed limits representation", async () => { const artifact = await artifacts.readArtifact("OracleReportSanityCheckerWrapper"); - const functionABI = artifact.abi.find( - (entry) => entry.type === "function" && entry.name === "exposePackedLimits", + const accountingCoreABI = artifact.abi.find( + (entry) => entry.type === "function" && entry.name === "exposeAccountingCorePackedLimits", + ); + const operationalABI = artifact.abi.find( + (entry) => entry.type === "function" && entry.name === "exposeOperationalPackedLimits", ); const sizeOfCalc = (x: string) => { switch (x) { case "uint256": return 256; + case "uint128": + return 128; case "uint64": return 64; case "uint32": return 32; case "uint16": return 16; + case "uint8": + return 8; default: expect.fail(`Unknown type ${x}`); } }; - const structSizeInBits = functionABI.outputs[0].components + const accountingCoreSizeInBits = accountingCoreABI.outputs[0].components + .map((x: { type: string }) => x.type) + .reduce((acc: number, x: string) => acc + sizeOfCalc(x), 0); + const operationalSizeInBits = operationalABI.outputs[0].components .map((x: { type: string }) => x.type) .reduce((acc: number, x: string) => acc + sizeOfCalc(x), 0); - expect(structSizeInBits).to.lessThanOrEqual(256); + + expect(accountingCoreSizeInBits).to.lessThanOrEqual(256); + expect(operationalSizeInBits).to.lessThanOrEqual(256); }); it("second opinion can be changed or removed", async () => { @@ -186,295 +262,862 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { }); }); - context("OracleReportSanityChecker rebase report data", () => { - async function newChecker() { - return await ethers.deployContract("OracleReportSanityCheckerWrapper", [ - await locator.getAddress(), - await accountingOracle.getAddress(), - await accounting.getAddress(), - deployer.address, - Object.values(defaultLimitsList), - ]); - } - - it("sums negative rebases for a few days", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - expect(await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY)).to.equal(0); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 10, 150); - expect(await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY)).to.equal(250); - }); - - it("sums negative rebases for 18 days", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 0, 700); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 0, 13); - await reportChecker.addReportData(timestamp - 17n * SLOTS_PER_DAY, 0, 10); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 0, 5); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 0, 150); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 0, 100); - - const expectedSum = await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY); - expect(expectedSum).to.equal(100 + 150 + 5 + 10); - }); - - it("returns exited validators count", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 11, 100); - await reportChecker.addReportData(timestamp - 17n * SLOTS_PER_DAY, 12, 100); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 13, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 14, 100); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 15, 100); - - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 19n * SLOTS_PER_DAY)).to.equal(10); - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 18n * SLOTS_PER_DAY)).to.equal(11); - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 1n * SLOTS_PER_DAY)).to.equal(15); - }); - - it("returns exited validators count for missed or non-existent report", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 11, 100); - await reportChecker.addReportData(timestamp - 15n * SLOTS_PER_DAY, 12, 100); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 13, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 14, 100); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 15, 100); - - // Out of range: day -20 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 20n * SLOTS_PER_DAY)).to.equal(0); - // Missed report: day -6 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 6n * SLOTS_PER_DAY)).to.equal(12); - // Missed report: day -7 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 7n * SLOTS_PER_DAY)).to.equal(12); - // Expected report: day 15 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 15n * SLOTS_PER_DAY)).to.equal(12); - // Missed report: day -16 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 16n * SLOTS_PER_DAY)).to.equal(11); + context("OracleReportSanityChecker balance-based CL decrease check", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + context("early exit predicate", () => { + it("passes when postCL >= preCL (no decrease)", async () => { + await expect( + callCheck(ether("101"), ether("101.001"), 0n, 0n, 0n, 4n * 24n * 60n * 60n, ether("1"), ether("0.999")), + ).not.to.be.reverted; + }); + + it("passes when postCL + withdrawals >= preCL", async () => { + await expect(callCheck(ether("105"), ether("100"), ether("5"))).not.to.be.reverted; + }); + + it("passes when postCL + withdrawals == preCL", async () => { + await expect(callCheck(ether("100"), ether("95"), ether("5"))).not.to.be.reverted; + }); + + it("passes when postCL == preCL", async () => { + await expect(callCheck(ether("100"), ether("100"))).not.to.be.reverted; + }); + + it("does not use cumulative withdrawal vault balance for early exit when no new CL withdrawals", async () => { + const baseline = ether("10000"); + const unchangedVaultBalance = ether("100"); + const postCL = ether("9550"); + const actualDiff = baseline - postCL; + const adjusted = baseline - unchangedVaultBalance; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // First report with non-zero vault balance sets _lastVaultBalanceAfterTransfer. + // Validators drop matches clWithdrawals (100 ETH) so no "appeared" balance. + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9900"), unchangedVaultBalance, 0n, 0n); + + // Same vault balance on the next report means clWithdrawals == 0 for this period. + // The check must not early-exit based on cumulative vault balance. + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9900"), postCL, unchangedVaultBalance, 0n, 0n)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("first report (no history)", () => { + it("passes on first report even with large decrease", async () => { + await expect(callCheck(ether("100"), ether("50"))).not.to.be.reverted; + }); + }); + + context("single-period decrease", () => { + it("decrease within limit passes and emits NegativeCLRebaseAccepted", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("decrease exactly at limit passes", async () => { + const baseline = ether("10000"); + const expectedMaxDiff = maxDiffFor(baseline); + const postCL = baseline - expectedMaxDiff; + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, expectedMaxDiff, expectedMaxDiff); + }); + + it("decrease exceeding limit reverts with IncorrectCLBalanceDecrease", async () => { + const baseline = ether("10000"); + const postCL = ether("9500"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("deposits and withdrawals adjustment", () => { + it("deposits increase adjusted balance and allowed decrease", async () => { + const baseline = ether("10000"); + const depositAmount = ether("500"); + const postCL = ether("9700"); + const principalCL = baseline + depositAmount; + const actualDiff = baseline - postCL; + const adjusted = baseline + depositAmount; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted includes depositAmount -> + // expectedMaxDiff is larger than without deposits -> actualDiff fits + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(principalCL, postCL, depositAmount)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("withdrawals decrease adjusted balance and allowed decrease", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const wVault = ether("200"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVault; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted = baseline - wVault -> + // smaller expectedMaxDiff, but actualDiff still within limit + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, wVault)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("large withdrawals trigger stricter limit and cause revert", async () => { + const baseline = ether("10000"); + const postCL = ether("9600"); + const wVault = ether("300"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVault; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted = baseline - wVault -> + // expectedMaxDiff shrinks below actualDiff -> reverts + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, wVault)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("deposits and withdrawals combined over multiple reports", async () => { + const baseline = ether("10000"); + const report2Deposits = ether("200"); + const report2Withdrawals = ether("100"); + const report3Deposits = ether("300"); + const report3Withdrawals = ether("50"); + const postCL = ether("9700"); + + const actualDiff = baseline - postCL; + const totalDeposits = report2Deposits + report3Deposits; + const totalWithdrawals = report2Withdrawals + report3Withdrawals; + const adjusted = baseline + totalDeposits - totalWithdrawals; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheckWithPendingDeposits(ether("10200"), ether("9900"), report2Deposits, { + withdrawalVaultBalance: report2Withdrawals, + withdrawalsVaultTransfer: report2Withdrawals, + }); + + // adjusted = baseline + totalDeposits - totalWithdrawals + // actualDiff = baseline - postCL + await setRefSlot(baseRefSlot); + await expect( + callCheckWithPendingDeposits(ether("10150"), postCL, report3Deposits, { + withdrawalVaultBalance: report3Withdrawals, + withdrawalsVaultTransfer: report3Withdrawals, + }), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("repeated withdrawalVaultBalance snapshots make the limit stricter", async () => { + const baseline = ether("10000"); + const repeatedWVaultSnapshot = ether("150"); + const postCL = ether("9650"); + const actualDiff = baseline - postCL; + const totalCLWithdrawals = repeatedWVaultSnapshot * 2n; + const adjusted = baseline - totalCLWithdrawals; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 3n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // preCL <= postCL + wVault -> early exit, but CL withdrawals are still stored in reportData + // Validators drop matches clWithdrawals so no "appeared" balance. + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, ether("9850"), repeatedWVaultSnapshot, 0n, repeatedWVaultSnapshot); + + // same for next report; repeated CL withdrawals tighten adjustedBase + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("9850"), ether("9700"), repeatedWVaultSnapshot, 0n, repeatedWVaultSnapshot); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("accumulation over multiple reports", () => { + it("gradual decrease over several reports accumulates", async () => { + const baseline = ether("10000"); + const finalPostCL = ether("9500"); + const cumulativeDiff = baseline - finalPostCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9800")); + + // cumulativeDiff = baseline - finalPostCL + // (summed over 2 decreases) > expectedMaxDiff + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), finalPostCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(cumulativeDiff, expectedMaxDiff); + }); + + it("balance recovery within window via deposits", async () => { + const baseline = ether("10000"); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9700")); + + // deposits raise adjusted balance, increasing the allowed decrease + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(ether("9700"), ether("9700"), ether("300"))).not.to.be.reverted; + }); + + it("single large decrease exceeds limit", async () => { + const baseline = ether("10000"); + const postCL = ether("9300"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await expect(callCheck(baseline, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("window boundary behavior", () => { + it("window grows adaptively from 1 to 36", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + // actualDiff measured from baseline (window start), not from previous report + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9800")); + + // window=2: cumulative actualDiff (300) < expectedMaxDiff (360) -> passes + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("window = 1 with only 2 reports", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("uses X-36 report as baseline at full window", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const wVaultReport1 = ether("400"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVaultReport1; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(baseline, stableBalance, wVaultReport1, 0n, wVaultReport1); + + for (let i = 2; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // At full window, baseline must still be report 0 (X-36), not report 1 (X-35). + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("uses a 36-day window by timestamps when reports are delayed", async () => { + const twoDaysInSeconds = 2n * 24n * 60n * 60n; + const baseline = ether("10000"); + const postCL = ether("9700"); + const oldWindowWithdrawal = ether("5"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await callCheck(baseline, baseline, 0n, 0n, 0n, twoDaysInSeconds); + await callCheck( + baseline + oldWindowWithdrawal, + baseline, + oldWindowWithdrawal, + oldWindowWithdrawal, + oldWindowWithdrawal, + twoDaysInSeconds, + ); + + for (let i = 0; i < 17; ++i) { + await callCheck(baseline, baseline, 0n, 0n, 0n, twoDaysInSeconds); + } + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, 0n, 0n, 0n, twoDaysInSeconds)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("excludes baseline report flows from adjusted balance", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const baselineWithdrawals = ether("2"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck( + baseline + baselineWithdrawals, + baseline, + baselineWithdrawals, + baselineWithdrawals, + baselineWithdrawals, + ); + + for (let i = 1; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + } + + // Baseline report flows should not affect adjusted balance. + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("old data is evicted after window is full", async () => { + const totalReports = REPORTS_WINDOW + 2; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const actualDiff = stableBalance - postCL; + const expectedMaxDiff = maxDiffFor(stableBalance); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // preCL <= postCL triggers early exit, storing stableBalance with zero deposits/withdrawals + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + + for (let i = 2; i <= REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // report 0 (baseline=10000) evicted -> new baseline = stableBalance + // actualDiff = stableBalance - postCL (small) < expectedMaxDiff -> passes + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("before eviction the old baseline is still in window", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const wVaultReport1 = ether("400"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVaultReport1; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(baseline, stableBalance, wVaultReport1, 0n, wVaultReport1); + + for (let i = 2; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // report 0 (baseline) still in window -> + // actualDiff = baseline - postCL (large) + // adjusted = baseline - wVaultReport1 -> + // expectedMaxDiff is small -> actualDiff > expectedMaxDiff -> reverts + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("eviction also removes old deposits from the window", async () => { + const totalReports = REPORTS_WINDOW + 3; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const actualDiff = stableBalance - postCL; + const expectedMaxDiff = maxDiffFor(stableBalance); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // deposits=1000 and wVault=500 stored with report 1; after eviction they leave the window + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheckWithPendingDeposits(stableBalance, ether("9100"), ether("1000"), { + withdrawalVaultBalance: ether("500"), + withdrawalsVaultTransfer: ether("500"), + }); + + // clean transition to stableBalance (becomes new baseline after eviction) + await setRefSlot(baseRefSlot - BigInt(totalReports - 2) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + + for (let i = 3; i <= REPORTS_WINDOW + 1; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // reports 0 and 1 evicted (deposits=1000, wVault=500 gone) + // new baseline = report 2 with zero deposits/withdrawals + // adjusted = stableBalance -> expectedMaxDiff based on stableBalance only + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); }); }); - context("OracleReportSanityChecker additional balance decrease check", () => { - it("works for IncorrectCLBalanceDecrease", async () => { - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("320"), ether("300"), 0, 0, 0, 10, 10), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 10n * ether("1") + 10n * ether("0.101")); + context("OracleReportSanityChecker day-one attack", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; }); - it("works as accumulation for IncorrectCLBalanceDecrease", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; - const prevRefSlot = refSlot - SLOTS_PER_DAY; + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); - await accountingOracle.setLastProcessingRefSlot(prevRefSlot); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("310"), 0, 0, 0, 10, 10); + it("3.6% on day 1 passes, repeated 3.6% on day 2 reverts", async () => { + const baseline = ether("10000"); + const day1PostCL = baseline - maxDiffFor(baseline); + const day2PostCL = day1PostCL - maxDiffFor(day1PostCL); - await accountingOracle.setLastProcessingRefSlot(refSlot); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("310"), ether("300"), 0, 0, 0, 10, 10), - ) + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // day 1: exactly at limit, passes + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await expect(callCheck(baseline, day1PostCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot - SLOTS_PER_DAY, day1PostCL, maxDiffFor(baseline), maxDiffFor(baseline)); + + // day 2: cumulative baseline -> day2PostCL ≈ 7.2% > 3.6% limit + await setRefSlot(baseRefSlot); + await expect(callCheck(day1PostCL, day2PostCL)).to.be.revertedWithCustomError( + checker, + "IncorrectCLBalanceDecrease", + ); + }); + + it("small daily decreases accumulate and trigger revert", async () => { + const baseline = ether("10000"); + const dailyDecrease = ether("100"); + const numReports = 5; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - BigInt(numReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // 3 reports of 1% decrease each: cumulative 3% < 3.6% limit + let currentBalance = baseline; + for (let i = 1; i <= 3; i++) { + const newBalance = currentBalance - dailyDecrease; + await setRefSlot(baseRefSlot - BigInt(numReports - i) * SLOTS_PER_DAY); + await callCheck(currentBalance, newBalance); + currentBalance = newBalance; + } + + // 4th decrease: cumulativeDiff = 4 × dailyDecrease (4%) + // > expectedMaxDiff (3.6%) + const cumulativeDiff = baseline - (currentBalance - dailyDecrease); + await setRefSlot(baseRefSlot); + await expect(callCheck(currentBalance, currentBalance - dailyDecrease)) .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 10n * ether("1") + 10n * ether("0.101")); + .withArgs(cumulativeDiff, expectedMaxDiff); + }); + }); + + context("OracleReportSanityChecker edge cases", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + it("maxCLBalanceDecreaseBP = 0 forbids any decrease", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await checker.setMaxCLBalanceDecreaseBP(0); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("10000"), ether("10000") - 1n)).to.be.revertedWithCustomError( + checker, + "IncorrectCLBalanceDecrease", + ); + }); + + it("maxCLBalanceDecreaseBP = 10000 allows any decrease", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await checker.setMaxCLBalanceDecreaseBP(10000); + + const baseline = ether("10000"); + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, ether("1"))) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, ether("1"), baseline - ether("1"), baseline); + }); + + it("reverts with IncorrectCLBalanceDecreaseWindowData when stored withdrawals exceed adjusted balance", async () => { + const baseline = ether("100"); + const hugeWithdrawals = baseline + 1n; + + await setRefSlot(baseRefSlot - 3n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, 0n, baseline, 0n, baseline); + + // A tiny follow-up withdrawal pushes the cumulative window withdrawals above the baseline. + await setRefSlot(baseRefSlot - 1n); + await callCheck(1n, 0n, 1n, 0n, 1n); + + // adjusted = baseline + 0 - hugeWithdrawals -> invalid window inputs for subtraction + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("80"), ether("50"))) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecreaseWindowData") + .withArgs(baseline, 0n, hugeWithdrawals); + }); + + it("reverts with IncorrectCLWithdrawalsVaultBalance when reported vault balance is below previous post-transfer state", async () => { + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + // Leave 200 ETH in the vault after the report so the next report cannot go below it. + await callCheck(ether("200"), 0n, ether("200"), 0n, 0n); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("100"), ether("100"), ether("199"), 0n, 0n)) + .to.be.revertedWithCustomError(checker, "IncorrectCLWithdrawalsVaultBalance") + .withArgs(ether("199"), ether("200")); + }); + + it("reverts with IncorrectWithdrawalsVaultTransfer when transfer exceeds reported vault balance", async () => { + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("100"), ether("100"), ether("100"), 0n, ether("101"))) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultTransfer") + .withArgs(ether("100"), ether("101")); + }); + + it("large balances (36M ETH) do not cause overflow", async () => { + const totalCLBalance = ether("36000000"); + const depositAmount = ether("1000000"); + const decrease = maxDiffFor(totalCLBalance); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheckWithPendingDeposits(totalCLBalance + depositAmount, totalCLBalance, depositAmount); + + const postCL = totalCLBalance - decrease; + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(postCL + depositAmount, postCL, depositAmount)).not.to.be.reverted; + }); + + it("getReportDataCount returns correct count after reports", async () => { + expect(await checker.getReportDataCount()).to.equal(0); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(1); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(2); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(3); }); + it("second opinion oracle is not consulted when decrease is within limit", async () => { + await deploySecondOpinionOracle(); + + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // actualDiff < expectedMaxDiff -> within limit -> + // Accepted (not Confirmed via second opinion) + await setRefSlot(baseRefSlot); + const tx = callCheck(baseline, postCL); + await expect(tx) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + await expect(tx).not.to.emit(checker, "NegativeCLRebaseConfirmed"); + }); + }); + + context("OracleReportSanityChecker setMaxCLBalanceDecreaseBP validation", () => { + it("accepts 0", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(0)).not.to.be.reverted; + }); + + it("accepts 10000 (MAX_BASIS_POINTS)", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(10000)).not.to.be.reverted; + }); + + it("reverts for 10001 with IncorrectLimitValue", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(10001)) + .to.be.revertedWithCustomError(checker, "IncorrectLimitValue") + .withArgs(10001, 0, 10000); + }); + + it("emits MaxCLBalanceDecreaseBPSet event on change", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.emit(checker, "MaxCLBalanceDecreaseBPSet").withArgs(500); + }); + }); + + context("OracleReportSanityChecker second opinion oracle", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + it("works for happy path and report is not ready", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); - // Expect to pass through - await checker.connect(accountingSigner).checkAccountingOracleReport(0, 96 * 1e9, 96 * 1e9, 0, 0, 0, 10, 10); + await callCheck(ether("10000"), ether("9700")); const secondOpinionOracle = await deploySecondOpinionOracle(); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("300"), 0, 0, 0, 10, 10), - ).to.be.revertedWithCustomError(checker, "NegativeRebaseFailedSecondOpinionReportIsNotReady"); + await expect(callCheck(ether("10000"), ether("9500"))).to.be.revertedWithCustomError( + checker, + "NegativeRebaseFailedSecondOpinionReportIsNotReady", + ); - await secondOpinionOracle.addReport(refSlot, { + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("300"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("300"), ether("0")); - }); - - it("works with staking router reports exited validators at day 18 and 54", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; - - const refSlot17 = refSlot - 17n * SLOTS_PER_DAY; - const refSlot18 = refSlot - 18n * SLOTS_PER_DAY; - const refSlot54 = refSlot - 54n * SLOTS_PER_DAY; - const refSlot55 = refSlot - 55n * SLOTS_PER_DAY; - - await stakingRouter.mock__addStakingModuleExitedValidators(1, 1); - await accountingOracle.setLastProcessingRefSlot(refSlot55); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await stakingRouter.mock__removeStakingModule(1); - await stakingRouter.mock__addStakingModuleExitedValidators(1, 2); - await accountingOracle.setLastProcessingRefSlot(refSlot54); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await stakingRouter.mock__removeStakingModule(1); - await stakingRouter.mock__addStakingModuleExitedValidators(1, 3); - await accountingOracle.setLastProcessingRefSlot(refSlot18); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await accountingOracle.setLastProcessingRefSlot(refSlot17); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("315"), 0, 0, 0, 10, 10); - - await accountingOracle.setLastProcessingRefSlot(refSlot); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("315"), ether("300"), 0, 0, 0, 10, 10), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 7n * ether("1") + 8n * ether("0.101")); + .withArgs(baseRefSlot, ether("9500"), ether("0")); }); it("works for reports close together", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); const secondOpinionOracle = await deploySecondOpinionOracle(); - // Second opinion balance is way bigger than general Oracle's (~1%) - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance diverges too much (~1%) -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("302", "gwei"), + clBalanceGwei: parseUnits("9600", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("299"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") - .withArgs(ether("299"), ether("302"), anyValue); + .withArgs(ether("9500"), ether("9600"), anyValue); - // Second opinion balance is almost equal general Oracle's (<0.74%) - should pass - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance within margin (<0.74%) -> passes + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("301", "gwei"), + clBalanceGwei: parseUnits("9510", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("299"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("299"), ether("0")); + .withArgs(baseRefSlot, ether("9500"), ether("0")); - // Second opinion balance is slightly less than general Oracle's (0.01%) - should fail - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance higher than reported -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: 100, + clBalanceGwei: parseUnits("9800", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, 110 * 1e9, 100.01 * 1e9, 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") - .withArgs(100.01 * 1e9, 100 * 1e9, anyValue); + .withArgs(ether("9500"), ether("9800"), anyValue); }); it("works for reports with incorrect withdrawal vault balance", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); const secondOpinionOracle = await deploySecondOpinionOracle(); - // Second opinion balance is almost equal general Oracle's (<0.74%) and withdrawal value is the same - should pass - await secondOpinionOracle.addReport(refSlot, { + // withdrawal vault matches -> passes + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: ether("1"), numValidators: 0, exitedValidators: 0, }); - await expect( - checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("330"), ether("299"), ether("1"), 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"), ether("1"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("299"), ether("1")); + .withArgs(baseRefSlot, ether("9500"), ether("1")); - // Second opinion withdrawal vault balance is different - should fail - await secondOpinionOracle.addReport(refSlot, { + // withdrawal vault mismatch -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("330"), ether("299"), ether("1"), 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"), ether("1"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedWithdrawalVaultBalanceMismatch") .withArgs(ether("1"), 0); }); }); context("OracleReportSanityChecker roles", () => { - it("CL Oracle related functions require INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE", async () => { - const role = await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(); + it("setMaxCLBalanceDecreaseBP requires MAX_CL_BALANCE_DECREASE_MANAGER_ROLE", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); - await expect(checker.setInitialSlashingAndPenaltiesAmount(0, 0)).to.be.revertedWithOZAccessControlError( + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.be.revertedWithOZAccessControlError( deployer.address, role, ); await checker.grantRole(role, deployer.address); - await expect(checker.setInitialSlashingAndPenaltiesAmount(1000, 101)).to.not.be.reverted; + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.not.be.reverted; }); - it("CL Oracle related functions require SECOND_OPINION_MANAGER_ROLE", async () => { + it("SECOND_OPINION_MANAGER_ROLE works", async () => { const clOraclesRole = await checker.SECOND_OPINION_MANAGER_ROLE(); await expect( @@ -485,4 +1128,94 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { await expect(checker.setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 74)).to.not.be.reverted; }); }); + + context("OracleReportSanityChecker migrateBaselineSnapshot", () => { + const CHURN_LIMIT = ether("57600"); + + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + it("is permissionless before migration completes", async () => { + await lido.mock__setContractVersion(4); + await expect(checker.migrateBaselineSnapshot()).not.to.be.reverted; + }); + + it("reverts with UnexpectedLidoVersion when version != 4", async () => { + await lido.mock__setContractVersion(3); + await expect(checker.migrateBaselineSnapshot()) + .to.be.revertedWithCustomError(checker, "UnexpectedLidoVersion") + .withArgs(3, 4); + }); + + it("seeds baseline and bootstrap entries in reportData and emits event", async () => { + const clActive = ether("10000000"); + const clPending = ether("500000"); + const deposits = ether("320000"); + const depositsCur = ether("320000"); + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(clActive, clPending, deposits, depositsCur); + + const expectedCLBalance = clActive + clPending; + + await expect(checker.migrateBaselineSnapshot()) + .to.emit(checker, "BaselineSnapshotMigrated") + .withArgs(expectedCLBalance, deposits, CHURN_LIMIT); + + expect(await checker.getReportDataCount()).to.equal(2); + + const baselineData = await checker.reportData(0); + expect(baselineData.timestamp).to.equal(0n); + expect(baselineData.clBalance).to.equal(expectedCLBalance); + expect(baselineData.deposits).to.equal(0); + expect(baselineData.clWithdrawals).to.equal(0); + + const bootstrapFlowData = await checker.reportData(1); + expect(bootstrapFlowData.timestamp).to.equal(0n); + expect(bootstrapFlowData.clBalance).to.equal(expectedCLBalance); + expect(bootstrapFlowData.deposits).to.equal(deposits); + expect(bootstrapFlowData.clWithdrawals).to.equal(CHURN_LIMIT); + }); + + it("reverts with MigrationAlreadyDone on second call", async () => { + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(ether("10000000"), ether("500000"), ether("320000"), ether("320000")); + + await checker.migrateBaselineSnapshot(); + await expect(checker.migrateBaselineSnapshot()).to.be.revertedWithCustomError(checker, "MigrationAlreadyDone"); + }); + + it("after migration, decrease within limit passes", async () => { + const clActive = ether("10000000"); + const clPending = ether("500000"); + const migrationDeposits = ether("320000"); + const migrationDepositsCur = ether("320000"); + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(clActive, clPending, migrationDeposits, migrationDepositsCur); + + await checker.migrateBaselineSnapshot(); + + // reportData[0] = baseline point with zero flows + // reportData[1] = bootstrap flow chunk with migration deposits/withdrawals + const baseline = clActive + clPending; + const postCL = ether("10200000"); + const actualDiff = baseline - postCL; + const adjusted = baseline + migrationDeposits - CHURN_LIMIT; + const expectedMaxDiff = maxDiffFor(adjusted); + + // Pass the actual vault balance as WVB since migration initialized _lastVaultBalanceAfterTransfer + const vaultBalance = await ethers.provider.getBalance(withdrawalVault.address); + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, vaultBalance)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + }); }); diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts index e26e066ab3..ebd4cee63c 100644 --- a/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts @@ -11,18 +11,19 @@ import { Burner__MockForSanityChecker, LidoLocator__MockForSanityChecker, OracleReportSanityChecker, + OracleReportSanityCheckerWrapper, StakingRouter__MockForSanityChecker, WithdrawalQueue__MockForSanityChecker, } from "typechain-types"; -import { ether, getCurrentBlockTimestamp, impersonate, randomAddress } from "lib"; +import { ether, impersonate } from "lib"; import { TOTAL_BASIS_POINTS } from "lib/constants"; import { Snapshot } from "test/suite"; -const MAX_UINT16 = BigInt(2 ** 16); -const MAX_UINT32 = BigInt(2 ** 32); -const MAX_UINT64 = BigInt(2 ** 64); +const OVER_UINT16 = 1n << 16n; +const OVER_UINT32 = 1n << 32n; +const OVER_UINT64 = 1n << 64n; describe("OracleReportSanityChecker.sol", () => { let checker: OracleReportSanityChecker; @@ -35,41 +36,31 @@ describe("OracleReportSanityChecker.sol", () => { let accountingOracle: AccountingOracle__MockForSanityChecker; let withdrawalVault: HardhatEthersSigner; + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let elRewardsVault: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + let manager: HardhatEthersSigner; const defaultLimits = { - exitedValidatorsPerDayLimit: 55n, - appearedValidatorsPerDayLimit: 100n, - annualBalanceIncreaseBPLimit: 10_00n, // 10% - simulatedShareRateDeviationBPLimit: 2_50n, // 2.5% - maxValidatorExitRequestsPerReport: 2000n, + exitedEthAmountPerDayLimit: 55n, + appearedEthAmountPerDayLimit: 100n, + annualBalanceIncreaseBPLimit: 1_000n, + simulatedShareRateDeviationBPLimit: 250n, + maxBalanceExitRequestedPerReportInEth: 65_000n, + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 128n, - maxPositiveTokenRebase: 5_000_000n, // 0.05% - initialSlashingAmountPWei: 1000n, - inactivityPenaltiesAmountPWei: 101n, - clBalanceOraclesErrorUpperBPLimit: 50n, // 0.5% - }; - - const correctOracleReport = { - timeElapsed: 24n * 60n * 60n, - preCLBalance: ether("100000"), - postCLBalance: ether("100001"), - withdrawalVaultBalance: 0n, - elRewardsVaultBalance: 0n, - sharesRequestedToBurn: 0n, - preCLValidators: 0n, - postCLValidators: 0n, - etherToLockForWithdrawals: 0n, + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: 360n, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 10n, + exitedValidatorEthAmountLimit: 1n, + externalPendingBalanceCapEth: 0n, }; - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - let elRewardsVault: HardhatEthersSigner; - - let stranger: HardhatEthersSigner; - let manager: HardhatEthersSigner; - let originalState: string; before(async () => { @@ -83,548 +74,1963 @@ describe("OracleReportSanityChecker.sol", () => { accountingOracle = await ethers.deployContract("AccountingOracle__MockForSanityChecker", [ deployer.address, - 12, // seconds per slot - 1606824023, // genesis time + 12, + 1_606_824_023, ]); stakingRouter = await ethers.deployContract("StakingRouter__MockForSanityChecker"); locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ { - lido: deployer, - depositSecurityModule: deployer, - elRewardsVault: elRewardsVault, - accountingOracle: accountingOracle, - oracleReportSanityChecker: deployer, - burner: burner, - validatorsExitBusOracle: deployer, - stakingRouter: stakingRouter, - treasury: deployer, - withdrawalQueue: withdrawalQueue, - withdrawalVault: withdrawalVault, - postTokenRebaseReceiver: deployer, - oracleDaemonConfig: deployer, - validatorExitDelayVerifier: deployer, - triggerableWithdrawalsGateway: deployer, - accounting: accounting, - predepositGuarantee: deployer, - wstETH: deployer, - vaultHub: deployer, - vaultFactory: deployer, - lazyOracle: deployer, - operatorGrid: deployer, + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: withdrawalVault.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, }, ]); checker = await ethers.deployContract("OracleReportSanityChecker", [ - locator, - accountingOracle, - accounting, - admin, + await locator.getAddress(), + await accounting.getAddress(), + admin.address, defaultLimits, ]); }); - beforeEach(async () => (originalState = await Snapshot.take())); + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + const deployCheckerWithLidoStats = async ( + contractVersion: bigint, + balanceStats: { clActive: bigint; clPending: bigint; deposits: bigint; depositsCurrent: bigint } = { + clActive: ether("100"), + clPending: ether("7"), + deposits: ether("3"), + depositsCurrent: ether("3"), + }, + ) => { + const lido = await ethers.deployContract("Lido__MockForSanityChecker"); + await lido.mock__setContractVersion(contractVersion); + await lido.mock__setBalanceStats( + balanceStats.clActive, + balanceStats.clPending, + balanceStats.deposits, + balanceStats.depositsCurrent, + ); + + const migrationLocator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: await lido.getAddress(), + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: withdrawalVault.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + const checkerWithLidoStats = await ethers.deployContract("OracleReportSanityChecker", [ + await migrationLocator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + ]); - afterEach(async () => await Snapshot.restore(originalState)); + return { checkerWithLidoStats, lido }; + }; - context("constructor", () => { - it("reverts if admin address is zero", async () => { + context("constructor and getters", () => { + it("reverts if admin is zero", async () => { await expect( ethers.deployContract("OracleReportSanityChecker", [ - locator, - accountingOracle, - accounting, + await locator.getAddress(), + await accounting.getAddress(), ZeroAddress, defaultLimits, ]), ).to.be.revertedWithCustomError(checker, "AdminCannotBeZero"); }); - }); - - context("getReportDataCount", () => { - it("retrieves correct report data count", async () => { - expect(await checker.getReportDataCount()).to.equal(0); - }); - }); - context("getLidoLocator", () => { - it("retrieves correct locator address", async () => { - expect(await checker.getLidoLocator()).to.equal(locator); - }); - }); + it("returns locator and initial limits", async () => { + expect(await checker.getLidoLocator()).to.equal(await locator.getAddress()); - context("getOracleReportLimits", () => { - it("retrieves correct oracle report limits", async () => { const limits = await checker.getOracleReportLimits(); - expect(limits.exitedValidatorsPerDayLimit).to.equal(defaultLimits.exitedValidatorsPerDayLimit); - expect(limits.appearedValidatorsPerDayLimit).to.equal(defaultLimits.appearedValidatorsPerDayLimit); + expect(limits.exitedEthAmountPerDayLimit).to.equal(defaultLimits.exitedEthAmountPerDayLimit); + expect(limits.appearedEthAmountPerDayLimit).to.equal(defaultLimits.appearedEthAmountPerDayLimit); expect(limits.annualBalanceIncreaseBPLimit).to.equal(defaultLimits.annualBalanceIncreaseBPLimit); - expect(limits.maxValidatorExitRequestsPerReport).to.equal(defaultLimits.maxValidatorExitRequestsPerReport); + expect(limits.simulatedShareRateDeviationBPLimit).to.equal(defaultLimits.simulatedShareRateDeviationBPLimit); + expect(limits.maxBalanceExitRequestedPerReportInEth).to.equal( + defaultLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(limits.maxEffectiveBalanceWeightWCType01).to.equal(defaultLimits.maxEffectiveBalanceWeightWCType01); + expect(limits.maxEffectiveBalanceWeightWCType02).to.equal(defaultLimits.maxEffectiveBalanceWeightWCType02); expect(limits.maxItemsPerExtraDataTransaction).to.equal(defaultLimits.maxItemsPerExtraDataTransaction); expect(limits.maxNodeOperatorsPerExtraDataItem).to.equal(defaultLimits.maxNodeOperatorsPerExtraDataItem); expect(limits.requestTimestampMargin).to.equal(defaultLimits.requestTimestampMargin); expect(limits.maxPositiveTokenRebase).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(limits.maxCLBalanceDecreaseBP).to.equal(defaultLimits.maxCLBalanceDecreaseBP); expect(limits.clBalanceOraclesErrorUpperBPLimit).to.equal(defaultLimits.clBalanceOraclesErrorUpperBPLimit); - expect(limits.initialSlashingAmountPWei).to.equal(defaultLimits.initialSlashingAmountPWei); - expect(limits.inactivityPenaltiesAmountPWei).to.equal(defaultLimits.inactivityPenaltiesAmountPWei); + expect(limits.consolidationEthAmountPerDayLimit).to.equal(defaultLimits.consolidationEthAmountPerDayLimit); + expect(limits.exitedValidatorEthAmountLimit).to.equal(defaultLimits.exitedValidatorEthAmountLimit); + expect(limits.externalPendingBalanceCapEth).to.equal(defaultLimits.externalPendingBalanceCapEth); }); - }); - context("getMaxPositiveTokenRebase", () => { - it("returns correct max positive token rebase", async () => { + it("returns max positive token rebase and max CL decrease BP", async () => { expect(await checker.getMaxPositiveTokenRebase()).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(await checker.getMaxCLBalanceDecreaseBP()).to.equal(defaultLimits.maxCLBalanceDecreaseBP); + expect(await checker.getMaxEffectiveBalanceWeightWCType01()).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType01, + ); + expect(await checker.getMaxEffectiveBalanceWeightWCType02()).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType02, + ); }); }); - context("setOracleReportLimits", () => { - const newLimits = { - exitedValidatorsPerDayLimit: 50, - appearedValidatorsPerDayLimit: 75, - annualBalanceIncreaseBPLimit: 15_00, - simulatedShareRateDeviationBPLimit: 1_50, // 1.5% - maxValidatorExitRequestsPerReport: 3000, - maxItemsPerExtraDataTransaction: 15 + 1, - maxNodeOperatorsPerExtraDataItem: 16 + 1, - requestTimestampMargin: 2048, - maxPositiveTokenRebase: 10_000_000, - initialSlashingAmountPWei: 2000, - inactivityPenaltiesAmountPWei: 303, - clBalanceOraclesErrorUpperBPLimit: 12, - }; - - before(async () => { - await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager); - }); + context("limits management", () => { + it("setOracleReportLimits: ACL and update", async () => { + const newLimits = { + ...defaultLimits, + exitedEthAmountPerDayLimit: 42n, + appearedEthAmountPerDayLimit: 88n, + consolidationEthAmountPerDayLimit: 7n, + exitedValidatorEthAmountLimit: 2n, + externalPendingBalanceCapEth: 9n, + }; - after(async () => { - await checker.connect(admin).revokeRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager); - }); + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( checker.connect(stranger).setOracleReportLimits(newLimits, ZeroAddress), ).to.be.revertedWithOZAccessControlError(stranger.address, await checker.ALL_LIMITS_MANAGER_ROLE()); + + await expect(checker.connect(manager).setOracleReportLimits(newLimits, ZeroAddress)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(42n) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(88n) + .to.emit(checker, "ConsolidationEthAmountPerDayLimitSet") + .withArgs(7n) + .to.emit(checker, "ExitedValidatorEthAmountLimitSet") + .withArgs(2n) + .to.emit(checker, "ExternalPendingBalanceCapEthSet") + .withArgs(9n); + + const limits = await checker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(42n); + expect(limits.appearedEthAmountPerDayLimit).to.equal(88n); + expect(limits.consolidationEthAmountPerDayLimit).to.equal(7n); + expect(limits.exitedValidatorEthAmountLimit).to.equal(2n); + expect(limits.externalPendingBalanceCapEth).to.equal(9n); }); - it("sets limits correctly", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.exitedValidatorsPerDayLimit).to.not.equal(newLimits.exitedValidatorsPerDayLimit); - expect(before.appearedValidatorsPerDayLimit).to.not.equal(newLimits.appearedValidatorsPerDayLimit); - expect(before.annualBalanceIncreaseBPLimit).to.not.equal(newLimits.annualBalanceIncreaseBPLimit); - expect(before.maxValidatorExitRequestsPerReport).to.not.equal(newLimits.maxValidatorExitRequestsPerReport); - expect(before.maxItemsPerExtraDataTransaction).to.not.equal(newLimits.maxItemsPerExtraDataTransaction); - expect(before.maxNodeOperatorsPerExtraDataItem).to.not.equal(newLimits.maxNodeOperatorsPerExtraDataItem); - expect(before.requestTimestampMargin).to.not.equal(newLimits.requestTimestampMargin); - expect(before.maxPositiveTokenRebase).to.not.equal(newLimits.maxPositiveTokenRebase); - expect(before.clBalanceOraclesErrorUpperBPLimit).to.not.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - expect(before.initialSlashingAmountPWei).to.not.equal(newLimits.initialSlashingAmountPWei); - expect(before.inactivityPenaltiesAmountPWei).to.not.equal(newLimits.inactivityPenaltiesAmountPWei); - - await checker.connect(manager).setOracleReportLimits(newLimits, ZeroAddress); - - const after = await checker.getOracleReportLimits(); - expect(after.exitedValidatorsPerDayLimit).to.equal(newLimits.exitedValidatorsPerDayLimit); - expect(after.appearedValidatorsPerDayLimit).to.equal(newLimits.appearedValidatorsPerDayLimit); - expect(after.annualBalanceIncreaseBPLimit).to.equal(newLimits.annualBalanceIncreaseBPLimit); - expect(after.maxValidatorExitRequestsPerReport).to.equal(newLimits.maxValidatorExitRequestsPerReport); - expect(after.maxItemsPerExtraDataTransaction).to.equal(newLimits.maxItemsPerExtraDataTransaction); - expect(after.maxNodeOperatorsPerExtraDataItem).to.equal(newLimits.maxNodeOperatorsPerExtraDataItem); - expect(after.requestTimestampMargin).to.equal(newLimits.requestTimestampMargin); - expect(after.maxPositiveTokenRebase).to.equal(newLimits.maxPositiveTokenRebase); - expect(after.clBalanceOraclesErrorUpperBPLimit).to.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - expect(after.initialSlashingAmountPWei).to.equal(newLimits.initialSlashingAmountPWei); - expect(after.inactivityPenaltiesAmountPWei).to.equal(newLimits.inactivityPenaltiesAmountPWei); - expect(after.clBalanceOraclesErrorUpperBPLimit).to.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - }); - - it("sets second opinion oracle", async () => { - const secondOpinionOracle = randomAddress(); - await expect(checker.connect(manager).setOracleReportLimits(newLimits, secondOpinionOracle)) - .to.emit(checker, "SecondOpinionOracleChanged") - .withArgs(secondOpinionOracle); + it("setExitedEthAmountPerDayLimit: validates bounds", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(OVER_UINT32)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(60n)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(60n); - expect(await checker.secondOpinionOracle()).to.equal(secondOpinionOracle); + expect((await checker.getOracleReportLimits()).exitedEthAmountPerDayLimit).to.equal(60n); }); - }); - context("setExitedValidatorsPerDayLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setExitedEthAmountPerDayLimit: ACL", async () => { + await expect(checker.connect(stranger).setExitedEthAmountPerDayLimit(60n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), + ); + }); + + it("sets exited/appeared ETH limits via dedicated setters", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await checker.connect(manager).setExitedEthAmountPerDayLimit(61n); + await checker.connect(manager).setAppearedEthAmountPerDayLimit(101n); + + const limits = await checker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(61n); + expect(limits.appearedEthAmountPerDayLimit).to.equal(101n); + }); + + it("dedicated exited/appeared ETH setters emit events", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(62n)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(62n); + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(102n)) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(102n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setExitedValidatorEthAmountLimit: validates min and updates", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedValidatorEthAmountLimit(0n)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + + await expect(checker.connect(manager).setExitedValidatorEthAmountLimit(3n)) + .to.emit(checker, "ExitedValidatorEthAmountLimitSet") + .withArgs(3n); + + expect((await checker.getOracleReportLimits()).exitedValidatorEthAmountLimit).to.equal(3n); }); - it("reverts if called by non-manager", async () => { + it("setExitedValidatorEthAmountLimit: ACL", async () => { await expect( - checker.connect(stranger).setExitedValidatorsPerDayLimit(100n), + checker.connect(stranger).setExitedValidatorEthAmountLimit(2n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), ); }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setExitedValidatorsPerDayLimit(MAX_UINT16)).to.be.revertedWithCustomError( + it("setExitedValidatorEthAmountLimit: validates uint16 upper bound", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(manager).setExitedValidatorEthAmountLimit(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + }); + + it("setExternalPendingBalanceCapEth: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setExternalPendingBalanceCapEth(5n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), + ); + + await expect(checker.connect(manager).setExternalPendingBalanceCapEth(OVER_UINT16)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); - }); - it("sets limit correctly and emits `ExitedValidatorsPerDayLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.exitedValidatorsPerDayLimit).to.not.equal(100n); + await expect(checker.connect(manager).setExternalPendingBalanceCapEth(5n)) + .to.emit(checker, "ExternalPendingBalanceCapEthSet") + .withArgs(5n); - await expect(checker.connect(manager).setExitedValidatorsPerDayLimit(100n)) - .to.emit(checker, "ExitedValidatorsPerDayLimitSet") - .withArgs(100n); - - const after = await checker.getOracleReportLimits(); - expect(after.exitedValidatorsPerDayLimit).to.equal(100n); + expect((await checker.getOracleReportLimits()).externalPendingBalanceCapEth).to.equal(5n); }); - }); - context("setAppearedValidatorsPerDayLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setRequestTimestampMargin validates uint32 bound", async () => { + await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setRequestTimestampMargin(OVER_UINT32)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setSecondOpinionOracleAndCLBalanceUpperMargin updates oracle and limit", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + + const secondOpinion = deployer.address; + await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 99n)) + .to.emit(checker, "SecondOpinionOracleChanged") + .withArgs(secondOpinion) + .to.emit(checker, "CLBalanceOraclesErrorUpperBPLimitSet") + .withArgs(99n); + + expect(await checker.secondOpinionOracle()).to.equal(secondOpinion); + expect((await checker.getOracleReportLimits()).clBalanceOraclesErrorUpperBPLimit).to.equal(99n); }); - it("reverts if called by non-manager", async () => { + it("setAppearedEthAmountPerDayLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + await expect( - checker.connect(stranger).setAppearedValidatorsPerDayLimit(101n), + checker.connect(stranger).setAppearedEthAmountPerDayLimit(120n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setAppearedValidatorsPerDayLimit(MAX_UINT16)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(OVER_UINT32)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); + + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(120n)) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(120n); + + expect((await checker.getOracleReportLimits()).appearedEthAmountPerDayLimit).to.equal(120n); }); - it("sets limit correctly and emits `AppearedValidatorsPerDayLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.appearedValidatorsPerDayLimit).to.not.equal(101n); + it("setConsolidationEthAmountPerDayLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setConsolidationEthAmountPerDayLimit(11n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), + ); - await expect(checker.connect(manager).setAppearedValidatorsPerDayLimit(101n)) - .to.emit(checker, "AppearedValidatorsPerDayLimitSet") - .withArgs(101n); + await expect( + checker.connect(manager).setConsolidationEthAmountPerDayLimit(OVER_UINT32), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - const after = await checker.getOracleReportLimits(); - expect(after.appearedValidatorsPerDayLimit).to.equal(101n); - }); - }); + await expect(checker.connect(manager).setConsolidationEthAmountPerDayLimit(11n)) + .to.emit(checker, "ConsolidationEthAmountPerDayLimitSet") + .withArgs(11n); - context("setAnnualBalanceIncreaseBPLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager); + expect((await checker.getOracleReportLimits()).consolidationEthAmountPerDayLimit).to.equal(11n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager); - }); + it("setAnnualBalanceIncreaseBPLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( - checker.connect(stranger).setAnnualBalanceIncreaseBPLimit(100n), + checker.connect(stranger).setAnnualBalanceIncreaseBPLimit(250n), ).to.be.revertedWithOZAccessControlError( stranger.address, await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( checker.connect(manager).setAnnualBalanceIncreaseBPLimit(TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - it("sets limit correctly and emits `AnnualBalanceIncreaseBPLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.annualBalanceIncreaseBPLimit).to.not.equal(100n); - - await expect(checker.connect(manager).setAnnualBalanceIncreaseBPLimit(100n)) + await expect(checker.connect(manager).setAnnualBalanceIncreaseBPLimit(250n)) .to.emit(checker, "AnnualBalanceIncreaseBPLimitSet") - .withArgs(100n); - - const after = await checker.getOracleReportLimits(); - expect(after.annualBalanceIncreaseBPLimit).to.equal(100n); - }); - }); + .withArgs(250n); - context("setMaxExitRequestsPerOracleReport", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), manager); + expect((await checker.getOracleReportLimits()).annualBalanceIncreaseBPLimit).to.equal(250n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), manager); - }); + it("setSimulatedShareRateDeviationBPLimit: ACL, bounds and update", async () => { + await checker.connect(admin).grantRole(await checker.SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( - checker.connect(stranger).setMaxExitRequestsPerOracleReport(100n), + checker.connect(stranger).setSimulatedShareRateDeviationBPLimit(300n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), + await checker.SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( - checker.connect(manager).setMaxExitRequestsPerOracleReport(MAX_UINT16), + checker.connect(manager).setSimulatedShareRateDeviationBPLimit(TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setSimulatedShareRateDeviationBPLimit(300n)) + .to.emit(checker, "SimulatedShareRateDeviationBPLimitSet") + .withArgs(300n); + + expect((await checker.getOracleReportLimits()).simulatedShareRateDeviationBPLimit).to.equal(300n); }); - it("sets limit correctly and emits `MaxValidatorExitRequestsPerReportSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxValidatorExitRequestsPerReport).to.not.equal(100n); + it("setMaxBalanceExitRequestedPerReportInEth: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setMaxBalanceExitRequestedPerReportInEth(60_000n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), + ); + + await expect( + checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - await expect(checker.connect(manager).setMaxExitRequestsPerOracleReport(100n)) - .to.emit(checker, "MaxValidatorExitRequestsPerReportSet") - .withArgs(100n); + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n)) + .to.emit(checker, "MaxBalanceExitRequestedPerReportInEthSet") + .withArgs(60_000n); - const after = await checker.getOracleReportLimits(); - expect(after.maxValidatorExitRequestsPerReport).to.equal(100n); + expect((await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth).to.equal(60_000n); }); - }); - context("setRequestTimestampMargin", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); - }); + it("setMaxBalanceExitRequestedPerReportInEth accepts zero", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); - after(async () => { - await checker.connect(admin).revokeRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(0n)) + .to.emit(checker, "MaxBalanceExitRequestedPerReportInEthSet") + .withArgs(0n); + + expect((await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth).to.equal(0n); }); - it("reverts if called by non-manager", async () => { - await expect(checker.connect(stranger).setRequestTimestampMargin(100n)).to.be.revertedWithOZAccessControlError( + it("setMaxEffectiveBalanceWeightWCType01: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setMaxEffectiveBalanceWeightWCType01(64n), + ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), + await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setRequestTimestampMargin(MAX_UINT32)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(0n)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); - }); - - it("sets limit correctly and emits `RequestTimestampMarginSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.requestTimestampMargin).to.not.equal(100n); - await expect(checker.connect(manager).setRequestTimestampMargin(100n)) - .to.emit(checker, "RequestTimestampMarginSet") - .withArgs(100n); + await expect( + checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - const after = await checker.getOracleReportLimits(); - expect(after.requestTimestampMargin).to.equal(100n); - }); - }); + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n)) + .to.emit(checker, "MaxEffectiveBalanceWeightWCType01Set") + .withArgs(64n); - context("setMaxPositiveTokenRebase", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); + expect((await checker.getOracleReportLimits()).maxEffectiveBalanceWeightWCType01).to.equal(64n); + expect(await checker.getMaxEffectiveBalanceWeightWCType01()).to.equal(64n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); - }); + it("setMaxEffectiveBalanceWeightWCType02: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { - await expect(checker.connect(stranger).setMaxPositiveTokenRebase(100n)).to.be.revertedWithOZAccessControlError( + await expect( + checker.connect(stranger).setMaxEffectiveBalanceWeightWCType02(4_096n), + ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), + await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setMaxPositiveTokenRebase(MAX_UINT64 + 1n)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(0n)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); - }); - it("reverts if limit is less than min", async () => { - await expect(checker.connect(manager).setMaxPositiveTokenRebase(0n)).to.be.revertedWithCustomError( - checker, - "IncorrectLimitValue", - ); + await expect( + checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(4_096n)) + .to.emit(checker, "MaxEffectiveBalanceWeightWCType02Set") + .withArgs(4_096n); + + expect((await checker.getOracleReportLimits()).maxEffectiveBalanceWeightWCType02).to.equal(4_096n); + expect(await checker.getMaxEffectiveBalanceWeightWCType02()).to.equal(4_096n); }); - it("sets limit correctly and emits `MaxPositiveTokenRebaseSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxPositiveTokenRebase).to.not.equal(100n); + it("limit setters do not emit events when the value does not change", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); - await expect(checker.connect(manager).setMaxPositiveTokenRebase(100n)) - .to.emit(checker, "MaxPositiveTokenRebaseSet") - .withArgs(100n); + await checker.connect(manager).setMaxPositiveTokenRebase(600_000n); + await expect(checker.connect(manager).setMaxPositiveTokenRebase(600_000n)).to.not.emit( + checker, + "MaxPositiveTokenRebaseSet", + ); - const after = await checker.getOracleReportLimits(); - expect(after.maxPositiveTokenRebase).to.equal(100n); - }); - }); + await checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n); + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n)).to.not.emit( + checker, + "MaxBalanceExitRequestedPerReportInEthSet", + ); - context("setMaxItemsPerExtraDataTransaction", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager); + await checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n); + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n)).to.not.emit( + checker, + "MaxEffectiveBalanceWeightWCType01Set", + ); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager); - }); + it("setMaxItemsPerExtraDataTransaction: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( checker.connect(stranger).setMaxItemsPerExtraDataTransaction(100n), ).to.be.revertedWithOZAccessControlError( stranger.address, await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( - checker.connect(manager).setMaxItemsPerExtraDataTransaction(MAX_UINT16), + checker.connect(manager).setMaxItemsPerExtraDataTransaction(OVER_UINT16), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - - it("sets limit correctly and emits `MaxItemsPerExtraDataTransactionSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxItemsPerExtraDataTransaction).to.not.equal(100n); - await expect(checker.connect(manager).setMaxItemsPerExtraDataTransaction(100n)) + await expect(checker.connect(manager).setMaxItemsPerExtraDataTransaction(20n)) .to.emit(checker, "MaxItemsPerExtraDataTransactionSet") - .withArgs(100n); - - const after = await checker.getOracleReportLimits(); - expect(after.maxItemsPerExtraDataTransaction).to.equal(100n); - }); - }); + .withArgs(20n); - context("setMaxNodeOperatorsPerExtraDataItem", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager); + expect((await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction).to.equal(20n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager); - }); + it("setMaxNodeOperatorsPerExtraDataItem: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( checker.connect(stranger).setMaxNodeOperatorsPerExtraDataItem(100n), ).to.be.revertedWithOZAccessControlError( stranger.address, await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( - checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(MAX_UINT16), + checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(OVER_UINT16), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - - it("sets limit correctly and emits `MaxNodeOperatorsPerExtraDataItemSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxNodeOperatorsPerExtraDataItem).to.not.equal(100n); - await expect(checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(100n)) + await expect(checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(20n)) .to.emit(checker, "MaxNodeOperatorsPerExtraDataItemSet") - .withArgs(100n); + .withArgs(20n); - const after = await checker.getOracleReportLimits(); - expect(after.maxNodeOperatorsPerExtraDataItem).to.equal(100n); + expect((await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem).to.equal(20n); }); - }); - context("setSecondOpinionOracleAndCLBalanceUpperMargin", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager); + it("setRequestTimestampMargin updates value and emits event", async () => { + await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(stranger).setRequestTimestampMargin(512n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), + ); + + await expect(checker.connect(manager).setRequestTimestampMargin(512n)) + .to.emit(checker, "RequestTimestampMarginSet") + .withArgs(512n); + + expect((await checker.getOracleReportLimits()).requestTimestampMargin).to.equal(512n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager); + it("setMaxPositiveTokenRebase: ACL, min/max and update", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setMaxPositiveTokenRebase(600_000n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), + ); + + await expect(checker.connect(manager).setMaxPositiveTokenRebase(0n)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + await expect(checker.connect(manager).setMaxPositiveTokenRebase(OVER_UINT64)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + + await expect(checker.connect(manager).setMaxPositiveTokenRebase(600_000n)) + .to.emit(checker, "MaxPositiveTokenRebaseSet") + .withArgs(600_000n); + + expect((await checker.getOracleReportLimits()).maxPositiveTokenRebase).to.equal(600_000n); }); - it("reverts if called by non-manager", async () => { + it("setMaxCLBalanceDecreaseBP: ACL, bounds and update", async () => { + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(stranger).setMaxCLBalanceDecreaseBP(200n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), + ); + await expect( - checker.connect(stranger).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 100n), - ).to.be.revertedWithOZAccessControlError(stranger.address, await checker.SECOND_OPINION_MANAGER_ROLE()); + checker.connect(manager).setMaxCLBalanceDecreaseBP(TOTAL_BASIS_POINTS + 1n), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setMaxCLBalanceDecreaseBP(200n)) + .to.emit(checker, "MaxCLBalanceDecreaseBPSet") + .withArgs(200n); + + expect((await checker.getOracleReportLimits()).maxCLBalanceDecreaseBP).to.equal(200n); }); - it("reverts if limit is greater than max", async () => { + it("setSecondOpinionOracleAndCLBalanceUpperMargin validates basis points bound", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await expect( checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - it("sets limit correctly and emits `CLBalanceOraclesErrorUpperBPLimitSet` event", async () => { - await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 100n)) - .to.emit(checker, "CLBalanceOraclesErrorUpperBPLimitSet") - .withArgs(100n); + it("setSecondOpinionOracleAndCLBalanceUpperMargin does not emit oracle change for same address", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + + const secondOpinion = deployer.address; + await checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 50n); + await expect( + checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 51n), + ).to.not.emit(checker, "SecondOpinionOracleChanged"); }); - it("changes the second opinion oracle if it is new", async () => { - const secondOpinionOracle = randomAddress(); - await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinionOracle, 100n)) - .to.emit(checker, "SecondOpinionOracleChanged") - .withArgs(secondOpinionOracle); + it("setOracleReportLimits rejects invalid exitedValidatorEthAmountLimit", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); - expect(await checker.secondOpinionOracle()).to.equal(secondOpinionOracle); + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, exitedValidatorEthAmountLimit: 0n }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, exitedValidatorEthAmountLimit: OVER_UINT16 }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - }); - context("setInitialSlashingAndPenaltiesAmount", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), manager); - }); + it("setOracleReportLimits rejects invalid externalPendingBalanceCapEth", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, externalPendingBalanceCapEth: OVER_UINT16 }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + }); + + it("setOracleReportLimits rejects invalid annualBalanceIncreaseBPLimit", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + + await expect( + checker + .connect(manager) + .setOracleReportLimits( + { ...defaultLimits, annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS + 1n }, + ZeroAddress, + ), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + }); + + it("setOracleReportLimits rejects invalid maxEffectiveBalanceWeight values", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, maxEffectiveBalanceWeightWCType01: 0n }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, maxEffectiveBalanceWeightWCType02: OVER_UINT16 }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + }); + + it("roundtrips limits at packed type boundaries", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; + + const maxPackedLimits = { + exitedEthAmountPerDayLimit: OVER_UINT32 - 1n, + appearedEthAmountPerDayLimit: OVER_UINT32 - 1n, + annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS, + simulatedShareRateDeviationBPLimit: TOTAL_BASIS_POINTS, + maxBalanceExitRequestedPerReportInEth: OVER_UINT16 - 1n, + maxEffectiveBalanceWeightWCType01: OVER_UINT16 - 1n, + maxEffectiveBalanceWeightWCType02: OVER_UINT16 - 1n, + maxItemsPerExtraDataTransaction: OVER_UINT16 - 1n, + maxNodeOperatorsPerExtraDataItem: OVER_UINT16 - 1n, + requestTimestampMargin: OVER_UINT32 - 1n, + maxPositiveTokenRebase: OVER_UINT64 - 1n, + maxCLBalanceDecreaseBP: TOTAL_BASIS_POINTS, + clBalanceOraclesErrorUpperBPLimit: TOTAL_BASIS_POINTS, + consolidationEthAmountPerDayLimit: OVER_UINT32 - 1n, + exitedValidatorEthAmountLimit: OVER_UINT16 - 1n, + externalPendingBalanceCapEth: OVER_UINT16 - 1n, + }; + + const roundtrip = await wrapper.roundtripRawLimits(maxPackedLimits); + + expect(roundtrip.exitedEthAmountPerDayLimit).to.equal(maxPackedLimits.exitedEthAmountPerDayLimit); + expect(roundtrip.appearedEthAmountPerDayLimit).to.equal(maxPackedLimits.appearedEthAmountPerDayLimit); + expect(roundtrip.annualBalanceIncreaseBPLimit).to.equal(maxPackedLimits.annualBalanceIncreaseBPLimit); + expect(roundtrip.simulatedShareRateDeviationBPLimit).to.equal(maxPackedLimits.simulatedShareRateDeviationBPLimit); + expect(roundtrip.maxBalanceExitRequestedPerReportInEth).to.equal( + maxPackedLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(roundtrip.maxEffectiveBalanceWeightWCType01).to.equal(maxPackedLimits.maxEffectiveBalanceWeightWCType01); + expect(roundtrip.maxEffectiveBalanceWeightWCType02).to.equal(maxPackedLimits.maxEffectiveBalanceWeightWCType02); + expect(roundtrip.maxItemsPerExtraDataTransaction).to.equal(maxPackedLimits.maxItemsPerExtraDataTransaction); + expect(roundtrip.maxNodeOperatorsPerExtraDataItem).to.equal(maxPackedLimits.maxNodeOperatorsPerExtraDataItem); + expect(roundtrip.requestTimestampMargin).to.equal(maxPackedLimits.requestTimestampMargin); + expect(roundtrip.maxPositiveTokenRebase).to.equal(maxPackedLimits.maxPositiveTokenRebase); + expect(roundtrip.maxCLBalanceDecreaseBP).to.equal(maxPackedLimits.maxCLBalanceDecreaseBP); + expect(roundtrip.clBalanceOraclesErrorUpperBPLimit).to.equal(maxPackedLimits.clBalanceOraclesErrorUpperBPLimit); + expect(roundtrip.consolidationEthAmountPerDayLimit).to.equal(maxPackedLimits.consolidationEthAmountPerDayLimit); + expect(roundtrip.exitedValidatorEthAmountLimit).to.equal(maxPackedLimits.exitedValidatorEthAmountLimit); + expect(roundtrip.externalPendingBalanceCapEth).to.equal(maxPackedLimits.externalPendingBalanceCapEth); + }); + + it("packAndStore caches packed limits in wrapper storage", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; + + await wrapper.packAndStore(); + + const accountingPacked = await wrapper.exposeAccountingCorePackedLimits(); + expect(accountingPacked.exitedEthAmountPerDayLimit).to.equal(defaultLimits.exitedEthAmountPerDayLimit); + expect(accountingPacked.appearedEthAmountPerDayLimit).to.equal(defaultLimits.appearedEthAmountPerDayLimit); + expect(accountingPacked.consolidationEthAmountPerDayLimit).to.equal( + defaultLimits.consolidationEthAmountPerDayLimit, + ); + expect(accountingPacked.exitedValidatorEthAmountLimit).to.equal(defaultLimits.exitedValidatorEthAmountLimit); + expect(accountingPacked.annualBalanceIncreaseBPLimit).to.equal(defaultLimits.annualBalanceIncreaseBPLimit); + expect(accountingPacked.simulatedShareRateDeviationBPLimit).to.equal( + defaultLimits.simulatedShareRateDeviationBPLimit, + ); + expect(accountingPacked.maxPositiveTokenRebase).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(accountingPacked.maxCLBalanceDecreaseBP).to.equal(defaultLimits.maxCLBalanceDecreaseBP); + expect(accountingPacked.clBalanceOraclesErrorUpperBPLimit).to.equal( + defaultLimits.clBalanceOraclesErrorUpperBPLimit, + ); + expect(accountingPacked.externalPendingBalanceCapEth).to.equal(defaultLimits.externalPendingBalanceCapEth); + + const operationalPacked = await wrapper.exposeOperationalPackedLimits(); + expect(operationalPacked.maxBalanceExitRequestedPerReportInEth).to.equal( + defaultLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(operationalPacked.maxEffectiveBalanceWeightWCType01).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType01, + ); + expect(operationalPacked.maxEffectiveBalanceWeightWCType02).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType02, + ); + expect(operationalPacked.maxItemsPerExtraDataTransaction).to.equal(defaultLimits.maxItemsPerExtraDataTransaction); + expect(operationalPacked.maxNodeOperatorsPerExtraDataItem).to.equal( + defaultLimits.maxNodeOperatorsPerExtraDataItem, + ); + expect(operationalPacked.requestTimestampMargin).to.equal(defaultLimits.requestTimestampMargin); + }); + + it("slot-local setters do not affect the other packed storage block", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); + + const initialLimits = await checker.getOracleReportLimits(); + + await checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n); + const afterOperationalUpdate = await checker.getOracleReportLimits(); + expect(afterOperationalUpdate.maxEffectiveBalanceWeightWCType01).to.equal(64n); + expect(afterOperationalUpdate.maxPositiveTokenRebase).to.equal(initialLimits.maxPositiveTokenRebase); + expect(afterOperationalUpdate.exitedEthAmountPerDayLimit).to.equal(initialLimits.exitedEthAmountPerDayLimit); + expect(afterOperationalUpdate.consolidationEthAmountPerDayLimit).to.equal( + initialLimits.consolidationEthAmountPerDayLimit, + ); + + await checker.connect(manager).setMaxPositiveTokenRebase(600_000n); + const afterAccountingUpdate = await checker.getOracleReportLimits(); + expect(afterAccountingUpdate.maxPositiveTokenRebase).to.equal(600_000n); + expect(afterAccountingUpdate.maxEffectiveBalanceWeightWCType01).to.equal( + afterOperationalUpdate.maxEffectiveBalanceWeightWCType01, + ); + expect(afterAccountingUpdate.requestTimestampMargin).to.equal(afterOperationalUpdate.requestTimestampMargin); + expect(afterAccountingUpdate.maxItemsPerExtraDataTransaction).to.equal( + afterOperationalUpdate.maxItemsPerExtraDataTransaction, + ); + expect(afterAccountingUpdate.externalPendingBalanceCapEth).to.equal( + afterOperationalUpdate.externalPendingBalanceCapEth, + ); + }); + + it("packed limits helpers revert with BasisPointsOverflow on raw pack over MAX_BASIS_POINTS", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; + + const malformedLimits = { + ...defaultLimits, + annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS + 1n, + }; + + await expect(wrapper.packRawLimits(malformedLimits)) + .to.be.revertedWithCustomError(wrapper, "BasisPointsOverflow") + .withArgs(TOTAL_BASIS_POINTS + 1n, TOTAL_BASIS_POINTS); + }); + }); + + context("standalone sanity checks", () => { + it("checkExitBusOracleReport", async () => { + const limit = (await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth; + + await expect(checker.checkExitBusOracleReport(limit)).not.to.be.reverted; + await expect(checker.checkExitBusOracleReport(limit + 1n)) + .to.be.revertedWithCustomError(checker, "IncorrectSumOfExitBalancePerReport") + .withArgs(limit + 1n); + }); + + it("checkExitBusOracleReport allows zero and below-limit values", async () => { + const limit = (await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth; + await expect(checker.checkExitBusOracleReport(0n)).not.to.be.reverted; + await expect(checker.checkExitBusOracleReport(limit - 1n)).not.to.be.reverted; + }); + + it("checkExitedEthAmountPerDay uses timeElapsed (seconds)", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.exitedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + const oneDay = 24n * 60n * 60n; + const exitedValidatorEthAmountLimit = limits.exitedValidatorEthAmountLimit; + const exitedValidatorEthAmountLimitInWei = exitedValidatorEthAmountLimit * ether("1"); + + await expect(checker.checkExitedEthAmountPerDay(0n, oneDay)).not.to.be.reverted; + + const exitedValidatorsCountForDailyExceededRevert = + limitWithConsolidationInWei / exitedValidatorEthAmountLimitInWei + 1n; + const exitedPerDayForDailyExceededRevert = + exitedValidatorsCountForDailyExceededRevert * exitedValidatorEthAmountLimitInWei; + + await expect(checker.checkExitedEthAmountPerDay(exitedValidatorsCountForDailyExceededRevert, oneDay)) + .to.be.revertedWithCustomError(checker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, exitedPerDayForDailyExceededRevert); + + const exitedPerDayForOneValidatorAndZeroTime = exitedValidatorEthAmountLimitInWei * 86_400n; + const exitedValidatorsCountForGuaranteedRevert = + limitWithConsolidationInWei / exitedPerDayForOneValidatorAndZeroTime + 1n; + const exitedPerDayForGuaranteedRevert = + exitedValidatorsCountForGuaranteedRevert * exitedPerDayForOneValidatorAndZeroTime; + + await expect(checker.checkExitedEthAmountPerDay(exitedValidatorsCountForGuaranteedRevert, 0n)) + .to.be.revertedWithCustomError(checker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, exitedPerDayForGuaranteedRevert); + }); + + it("checkAppearedEthAmountPerDay includes consolidation limit", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await expect(checker.checkAppearedEthAmountPerDay(0n)).not.to.be.reverted; + + const guaranteedExceededAppearedPerDayValue = limitWithConsolidationInWei + 1n; + + await expect(checker.checkAppearedEthAmountPerDay(guaranteedExceededAppearedPerDayValue)) + .to.be.revertedWithCustomError(checker, "AppearedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, guaranteedExceededAppearedPerDayValue); + }); + + it("checkAppearedEthAmountPerDay allows exact configured limit", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await expect(checker.checkAppearedEthAmountPerDay(limitWithConsolidationInWei)).not.to.be.reverted; + }); + + it("checkNodeOperatorsPerExtraDataItemCount", async () => { + const limit = (await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem; + await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12n, limit)).not.to.be.reverted; + + await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12n, limit + 1n)) + .to.be.revertedWithCustomError(checker, "TooManyNodeOpsPerExtraDataItem") + .withArgs(12n, limit + 1n); + }); + + it("checkExtraDataItemsCountPerTransaction", async () => { + const limit = (await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction; + await expect(checker.checkExtraDataItemsCountPerTransaction(limit)).not.to.be.reverted; + + await expect(checker.checkExtraDataItemsCountPerTransaction(limit + 1n)) + .to.be.revertedWithCustomError(checker, "TooManyItemsPerExtraDataTransaction") + .withArgs(limit, limit + 1n); + }); + + it("checkWithdrawalQueueOracleReport", async () => { + const now = 1_700_000_000n; + const margin = (await checker.getOracleReportLimits()).requestTimestampMargin; + + const oldRequestId = 1n; + const newRequestId = 2n; + + const oldTs = now - margin; + const newTs = now - margin / 2n; + + await withdrawalQueue.setRequestTimestamp(oldRequestId, oldTs); + await withdrawalQueue.setRequestTimestamp(newRequestId, newTs); + + await expect(checker.checkWithdrawalQueueOracleReport(oldRequestId, now)).not.to.be.reverted; + + await expect(checker.checkWithdrawalQueueOracleReport(newRequestId, now)) + .to.be.revertedWithCustomError(checker, "IncorrectRequestFinalization") + .withArgs(newTs); + }); + + context("checkCLPendingBalanceIncrease cold start", () => { + const oneDay = 24n * 60n * 60n; + const noDeposits = 0n; + const unexpectedPendingWei = 1n; + const coldStartDepositsWei = ether("200"); + const largeColdStartDepositsWei = ether("1000000"); + const firstDayAppearedLimitWei = defaultLimits.appearedEthAmountPerDayLimit * ether("1"); + const pendingAfterExactFirstDayActivationWei = coldStartDepositsWei - firstDayAppearedLimitWei; + const validatorsBeyondFirstDayLimitWei = firstDayAppearedLimitWei + 1n; + const pendingAfterExceededFirstDayActivationWei = pendingAfterExactFirstDayActivationWei - 1n; + + it("allows a zero-balance first report without deposits", async () => { + await expect(checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, 0n, 0n, noDeposits)).not.to.be.reverted; + }); + + it("rejects a positive first report without deposits", async () => { + await expect(checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, unexpectedPendingWei, 0n, noDeposits)) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, unexpectedPendingWei); + }); + + it("allows a positive first report within external pending balance cap", async () => { + const externalPendingBalanceCapEth = 1n; + const reportedPendingWei = externalPendingBalanceCapEth * ether("1"); + + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + + await expect(checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, reportedPendingWei, 0n, noDeposits)).not + .to.be.reverted; + }); + + it("allows the first-report total CL increase up to deposits", async () => { + await expect( + checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, coldStartDepositsWei, 0n, coldStartDepositsWei), + ).not.to.be.reverted; + }); + + it("does not cap first-report deposits by annual growth allowance when they remain pending", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + 0n, + largeColdStartDepositsWei, + 0n, + largeColdStartDepositsWei, + ), + ).not.to.be.reverted; + }); + + it("limits first-report validator activation by appeared ETH amount per day", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + firstDayAppearedLimitWei, + pendingAfterExactFirstDayActivationWei, + 0n, + coldStartDepositsWei, + ), + ).not.to.be.reverted; + + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + validatorsBeyondFirstDayLimitWei, + pendingAfterExceededFirstDayActivationWei, + 0n, + coldStartDepositsWei, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(firstDayAppearedLimitWei, firstDayAppearedLimitWei + 1n); + }); + }); + + context("checkCLPendingBalanceIncrease with existing state", () => { + const oneDay = 24n * 60n * 60n; + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("2"); + const allowedActivationWei = ether("1"); + const excessiveActivationWei = ether("2"); + + it("allows a non-cold-start report within the pending corridor", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + previousValidatorsWei, + previousPendingWei, + previousValidatorsWei + allowedActivationWei, + previousPendingWei - allowedActivationWei, + 0n, + 0n, + ), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when validators growth exceeds the activated budget", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + previousValidatorsWei, + 0n, + previousValidatorsWei + excessiveActivationWei, + 0n, + 0n, + 0n, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(ether("1"), excessiveActivationWei); + }); + + it("reverts with InvalidClBalancesData when CL withdrawals exceed previous validators balance", async () => { + await expect(checker.checkCLPendingBalanceIncrease(oneDay, ether("10"), 0n, 0n, 0n, ether("11"), 0n)).not.to.be + .reverted; + }); + }); + }); + + context("checkCLBalancesConsistency", () => { + it("reverts on array length mismatch", async () => { + await expect(checker.checkCLBalancesConsistency([1n], [], 10n)).to.be.revertedWithCustomError( + checker, + "InvalidClBalancesData", + ); + }); + + it("reverts when module sums are inconsistent", async () => { + await expect(checker.checkCLBalancesConsistency([1n, 2n], [10n, 20n], 40n)) + .to.be.revertedWithCustomError(checker, "InconsistentValidatorsBalanceByModule") + .withArgs(40n, 30n); + }); + + it("passes with consistent data", async () => { + await expect(checker.checkCLBalancesConsistency([1n, 2n], [10n, 20n], 30n)).not.to.be.reverted; + }); + + it("passes for empty arrays and zero totals", async () => { + await expect(checker.checkCLBalancesConsistency([], [], 0n)).not.to.be.reverted; + }); + }); + + context("checkAccountingOracleReport", () => { + const baseReport = { + timeElapsed: 24n * 60n * 60n, + preCLBalance: ether("100000"), + postCLBalance: ether("100001"), + preCLPendingBalance: 0n, + postCLPendingBalance: 0n, + withdrawalVaultBalance: 0n, + elRewardsVaultBalance: 0n, + sharesRequestedToBurn: 0n, + deposits: 0n, + withdrawalsVaultTransfer: 0n, + }; + + const report = ( + overrides: Partial = {}, + ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { + const r = { ...baseReport, ...overrides }; + return [ + r.timeElapsed, + r.preCLBalance - r.preCLPendingBalance - r.deposits, + r.preCLPendingBalance, + r.postCLBalance - r.postCLPendingBalance, + r.postCLPendingBalance, + r.withdrawalVaultBalance, + r.elRewardsVaultBalance, + r.sharesRequestedToBurn, + r.deposits, + r.withdrawalsVaultTransfer, + ]; + }; + + let accountingSigner: HardhatEthersSigner; + + before(async () => { + accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + }); + + it("reverts when not called by accounting", async () => { + await expect(checker.connect(stranger).checkAccountingOracleReport(...report())).to.be.revertedWithCustomError( + checker, + "CalledNotFromAccounting", + ); + }); + + it("reverts when withdrawal vault balance is overstated", async () => { + const actual = await ethers.provider.getBalance(withdrawalVault.address); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ withdrawalVaultBalance: actual + 1n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultBalance") + .withArgs(actual); + }); + + it("reverts when EL rewards vault balance is overstated", async () => { + const actual = await ethers.provider.getBalance(elRewardsVault.address); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ elRewardsVaultBalance: actual + 1n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectELRewardsVaultBalance") + .withArgs(actual); + }); + + it("reverts when withdrawals vault transfer exceeds reported vault balance", async () => { + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ withdrawalVaultBalance: 10n, withdrawalsVaultTransfer: 11n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultTransfer") + .withArgs(10n, 11n); + }); + + it("reverts when shares requested to burn are overstated", async () => { + await burner.setSharesRequestedToBurn(10n, 21n); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport(...report({ sharesRequestedToBurn: 32n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectSharesRequestedToBurn") + .withArgs(31n); + }); + + it("reverts when positive CL increase exceeds the pending-backed one-day allowance", async () => { + const preCLBalance = 3_650_000n; + const preCLPendingBalance = 1_000n; + const postCLPendingBalance = 0n; + const allowedIncrease = preCLPendingBalance + (preCLBalance + preCLPendingBalance) / 3650n; + const clIncrease = allowedIncrease + 1n; + const postCLBalance = preCLBalance + clIncrease; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedIncrease, clIncrease); + }); + + it("reverts when a one-day positive CL increase exceeds the pending-backed allowance", async () => { + const preCLBalance = ether("1000000"); + const preCLPendingBalance = ether("100"); + const postCLPendingBalance = 0n; + const allowedIncrease = preCLPendingBalance + (preCLBalance + preCLPendingBalance) / 3650n; + const clIncrease = allowedIncrease + 1n; + const postCLBalance = preCLBalance + clIncrease; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance, + timeElapsed: 24n * 60n * 60n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedIncrease, clIncrease); + }); + + it("passes with valid report", async () => { + // This scenario uses 1 wei of pending explicitly, though positive growth can also be covered by the validators-based safety cap. + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: baseReport.preCLBalance + 1n, + preCLPendingBalance: 1n, + postCLBalance: baseReport.preCLBalance + 1n, + postCLPendingBalance: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("allows cold-start onboarding from deposits into pending and then into validators", async () => { + const deposits = ether("200"); + const activated = ether("100"); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: deposits, + postCLBalance: deposits, + preCLPendingBalance: 0n, + postCLPendingBalance: deposits, + deposits, + }), + ), + ).not.to.be.reverted; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: deposits, + postCLBalance: deposits, + preCLPendingBalance: deposits, + postCLPendingBalance: deposits - activated, + deposits: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("does not skip cold-start pending sanity on the first report", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: 0n, + postCLBalance: 1n, + preCLPendingBalance: 0n, + postCLPendingBalance: 1n, + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, 1n); + }); + + it("allows pending above the funded envelope within external pending balance cap on accounting path", async () => { + const externalPendingBalanceCapEth = 2n; + const preCLBalanceWei = ether("100"); + const reportedPendingWei = (externalPendingBalanceCapEth - 1n) * ether("1"); + + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalanceWei, + postCLBalance: preCLBalanceWei, + preCLPendingBalance: 0n, + postCLPendingBalance: reportedPendingWei, + deposits: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("allows pending exactly at external pending balance cap on accounting path", async () => { + const externalPendingBalanceCapEth = 2n; + const totalCLBalanceWei = ether("100"); + const reportedPendingWei = externalPendingBalanceCapEth * ether("1"); + + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: totalCLBalanceWei, + postCLBalance: totalCLBalanceWei, + preCLPendingBalance: 0n, + postCLPendingBalance: reportedPendingWei, + deposits: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("reverts when pending exceeds external pending balance cap on accounting path", async () => { + const externalPendingBalanceCapEth = 2n; + const totalCLBalanceWei = ether("100"); + const pendingBalanceCapWei = externalPendingBalanceCapEth * ether("1"); + const reportedPendingWei = pendingBalanceCapWei + 1n; + + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: totalCLBalanceWei, + postCLBalance: totalCLBalanceWei, + preCLPendingBalance: 0n, + postCLPendingBalance: reportedPendingWei, + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(pendingBalanceCapWei, reportedPendingWei); + }); + + it("does not count external pending balance cap as activation budget on accounting path", async () => { + const externalPendingBalanceCapEth = 2n; + const preCLBalanceWei = ether("100"); + const reportedPendingWei = (externalPendingBalanceCapEth - 1n) * ether("1"); + const allowedValidatorsIncreaseWei = ether("10"); + const reportedValidatorsIncreaseWei = allowedValidatorsIncreaseWei + ether("1"); + const postCLBalanceWei = preCLBalanceWei + reportedPendingWei + reportedValidatorsIncreaseWei; + + await checker + .connect(admin) + .grantRole(await checker.EXTERNAL_PENDING_BALANCE_CAP_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setExternalPendingBalanceCapEth(externalPendingBalanceCapEth); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + timeElapsed: 365n * 24n * 60n * 60n, + preCLBalance: preCLBalanceWei, + postCLBalance: postCLBalanceWei, + preCLPendingBalance: 0n, + postCLPendingBalance: reportedPendingWei, + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedValidatorsIncreaseWei, reportedValidatorsIncreaseWei); + }); + + it("reverts when validator decrease is hidden by pending increase", async () => { + const preCLBalance = ether("10000"); + const postCLBalance = preCLBalance; + const postCLPendingBalance = ether("1000"); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance, + postCLBalance, + postCLPendingBalance, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, postCLPendingBalance); + }); + + it("handles CL balance increase exactly at appeared ETH amount limit", async () => { + const preCLBalance = ether("1000000"); + const preCLPendingBalance = ether("100"); + const postCLBalance = preCLBalance + preCLPendingBalance; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance: 0n, + timeElapsed: 24n * 60n * 60n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero time elapsed path for annual increase", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("100000") + 1n, + preCLPendingBalance: 1n, + postCLBalance: ether("100000") + 1n, + postCLPendingBalance: 0n, + timeElapsed: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero time elapsed path for CL balance increase normalization", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("1000000") + 1n, + preCLPendingBalance: 1n, + postCLBalance: ether("1000000") + 1n, + postCLPendingBalance: 0n, + timeElapsed: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero pre CL balance for annual increase", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: 1n, + postCLBalance: 1n, + }), + ), + ).not.to.be.reverted; + }); + + it("stores post-cl balance snapshots in reportData", async () => { + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("100") })), + ).not.to.be.reverted; + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ preCLBalance: ether("100"), postCLBalance: ether("100"), deposits: 2n }), + ), + ).not.to.be.reverted; + + expect(await checker.getReportDataCount()).to.equal(2n); + + const first = await checker.reportData(0n); + const second = await checker.reportData(1n); + expect(first.timestamp).to.equal(24n * 60n * 60n); + expect(first.clBalance).to.equal(ether("100")); + expect(first.deposits).to.equal(0n); + expect(first.clWithdrawals).to.equal(0n); + expect(second.timestamp).to.equal(2n * 24n * 60n * 60n); + expect(second.clBalance).to.equal(ether("100")); + expect(second.deposits).to.equal(2n); + expect(second.clWithdrawals).to.equal(0n); + }); + }); + + context("checkAccountingOracleReport: CL decrease window and second opinion", () => { + const baseWindowReport = { + timeElapsed: 24n * 60n * 60n, + preCLBalance: ether("100"), + postCLBalance: ether("100"), + preCLPendingBalance: 0n, + postCLPendingBalance: 0n, + withdrawalVaultBalance: 0n, + elRewardsVaultBalance: 0n, + sharesRequestedToBurn: 0n, + deposits: 0n, + withdrawalsVaultTransfer: 0n, + }; + + const report = ( + overrides: Partial = {}, + ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { + const r = { ...baseWindowReport, ...overrides }; + return [ + r.timeElapsed, + r.preCLBalance - r.preCLPendingBalance - r.deposits, + r.preCLPendingBalance, + r.postCLBalance - r.postCLPendingBalance, + r.postCLPendingBalance, + r.withdrawalVaultBalance, + r.elRewardsVaultBalance, + r.sharesRequestedToBurn, + r.deposits, + r.withdrawalsVaultTransfer, + ]; + }; + + let accountingSigner: HardhatEthersSigner; + + before(async () => { + accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + }); + + it("emits NegativeCLRebaseAccepted when decrease is within limit", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await accountingOracle.setLastProcessingRefSlot(42n); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("97") })), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(42n, ether("97"), ether("3"), ether("3.6")); + }); + + it("uses 36-day timestamp window (not report count) and keeps left boundary report in range", async () => { + const ONE_DAY = 24n * 60n * 60n; + + // Report timestamps become: day 1, day 10, day 46. + // For the third report, windowStart = 46 - 36 = day 10. + // So baseline must be day 10 report (left boundary is included), not day 1. + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("50"), postCLBalance: ether("50") }), + ); + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 9n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + + await accountingOracle.setLastProcessingRefSlot(314n); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 36n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("97") }), + ), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(314n, ether("97"), ether("3"), ether("3.6")); + + expect(await checker.getReportDataCount()).to.equal(3n); + const first = await checker.reportData(0n); + const second = await checker.reportData(1n); + const third = await checker.reportData(2n); + expect(first.timestamp).to.equal(ONE_DAY); + expect(second.timestamp).to.equal(10n * ONE_DAY); + expect(third.timestamp).to.equal(46n * ONE_DAY); + }); + + it("excludes all outdated snapshots from the window after a long gap", async () => { + const ONE_DAY = 24n * 60n * 60n; + + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 48n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("90") }), + ), + ).not.to.be.reverted; + + expect(await checker.getReportDataCount()).to.equal(3n); + const third = await checker.reportData(2n); + expect(third.timestamp).to.equal(50n * ONE_DAY); + expect(third.clBalance).to.equal(ether("90")); + }); + + it("uses absolute window diff between baseline and current balances", async () => { + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setMaxCLBalanceDecreaseBP(1n); + + await checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100000"), postCLBalance: ether("100000") })); + // This intermediate increase is meant to exercise the decrease window, so it needs a matching + // pending-funded activation budget and must not fail earlier in the global CL growth check. + await checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("100020"), + preCLPendingBalance: ether("20"), + postCLBalance: ether("100020"), + postCLPendingBalance: 0n, + }), + ); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100020"), postCLBalance: ether("100015") })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(ether("15"), ether("10")); + }); + + it("reverts with IncorrectCLBalanceDecrease when decrease exceeds limit and no second opinion", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(ether("10"), ether("3.6")); + }); + + it("reverts with IncorrectCLBalanceDecreaseWindowData on baseline/flows underflow", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("300"), + postCLBalance: ether("90"), + withdrawalVaultBalance: ether("200"), + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecreaseWindowData") + .withArgs(ether("100"), 0n, ether("200")); + }); + + it("reverts with NegativeRebaseFailedSecondOpinionReportIsNotReady when second opinion report is absent", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ).to.be.revertedWithCustomError(checker, "NegativeRebaseFailedSecondOpinionReportIsNotReady"); + }); + + it("reverts with NegativeRebaseFailedCLBalanceMismatch when second opinion CL balance is lower", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("89") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") + .withArgs(ether("90"), ether("89"), 50n); + }); + + it("reverts with NegativeRebaseFailedCLBalanceMismatch when second opinion deviation exceeds upper BP limit", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("100") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") + .withArgs(ether("90"), ether("100"), 50n); + }); + + it("reverts with NegativeRebaseFailedWithdrawalVaultBalanceMismatch when second opinion withdrawal balance differs", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("90.4") / 1_000_000_000n, 1n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedWithdrawalVaultBalanceMismatch") + .withArgs(0n, 1n); + }); + + it("emits NegativeCLRebaseConfirmed when second opinion validates report", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("90.4") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.emit(checker, "NegativeCLRebaseConfirmed") + .withArgs(77n, ether("90"), 0n); + }); + }); + + context("checkSimulatedShareRate", () => { + const SHARE_RATE_PRECISION_E27 = 10n ** 27n; + + const actualShareRate = ( + postInternalEther: bigint, + postInternalShares: bigint, + etherToFinalizeWQ: bigint, + sharesToBurnForWithdrawals: bigint, + ) => + ((postInternalEther + etherToFinalizeWQ) * SHARE_RATE_PRECISION_E27) / + (postInternalShares + sharesToBurnForWithdrawals); + + it("passes when simulated rate equals actual rate", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const simulated = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)).not.to.be + .reverted; + }); + + it("passes when deviation is below configured limit", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const actual = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + const simulated = actual + (actual * 200n) / TOTAL_BASIS_POINTS; + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)).not.to.be + .reverted; + }); + + it("reverts when deviation is above configured limit", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const actual = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + const simulated = actual + (actual * 251n) / TOTAL_BASIS_POINTS; + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)) + .to.be.revertedWithCustomError(checker, "IncorrectSimulatedShareRate") + .withArgs(simulated, actual); + }); + + it("accounts for withdrawal finalization offsets in actual rate", async () => { + const postInternalEther = ether("90"); + const postInternalShares = ether("90"); + const etherToFinalizeWQ = ether("10"); + const sharesToBurnForWithdrawals = ether("10"); + const simulated = actualShareRate( + postInternalEther, + postInternalShares, + etherToFinalizeWQ, + sharesToBurnForWithdrawals, + ); + + await expect( + checker.checkSimulatedShareRate( + postInternalEther, + postInternalShares, + etherToFinalizeWQ, + sharesToBurnForWithdrawals, + simulated, + ), + ).not.to.be.reverted; + }); + }); + + context("migrateBaselineSnapshot", () => { + const MIGRATION_WITHDRAWALS = ether("57600"); + + it("is permissionless before migration completes", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); + + await expect(migrationChecker.connect(stranger).migrateBaselineSnapshot()).not.to.be.reverted; + }); + + it("reverts on unexpected Lido version", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(3n); + + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()) + .to.be.revertedWithCustomError(migrationChecker, "UnexpectedLidoVersion") + .withArgs(3n, 4n); + }); + + it("seeds baseline and bootstrap report snapshots", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); + + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()) + .to.emit(migrationChecker, "BaselineSnapshotMigrated") + .withArgs(ether("107"), ether("3"), MIGRATION_WITHDRAWALS); + + expect(await migrationChecker.getReportDataCount()).to.equal(2n); + + const baselineReport = await migrationChecker.reportData(0n); + const bootstrapFlowReport = await migrationChecker.reportData(1n); + + expect(baselineReport.timestamp).to.equal(0n); + expect(baselineReport.clBalance).to.equal(ether("107")); + expect(baselineReport.deposits).to.equal(0n); + expect(baselineReport.clWithdrawals).to.equal(0n); + + expect(bootstrapFlowReport.timestamp).to.equal(0n); + expect(bootstrapFlowReport.clBalance).to.equal(ether("107")); + expect(bootstrapFlowReport.deposits).to.equal(ether("3")); + expect(bootstrapFlowReport.clWithdrawals).to.equal(MIGRATION_WITHDRAWALS); + }); + + it("uses migrated bootstrap flows in first CL decrease window check", async () => { + const migratedCLBalance = ether("107000"); + const migrationDeposits = ether("3"); + const migrationDepositsCur = ether("3"); + const reportDecrease = ether("2500"); + + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n, { + clActive: ether("100000"), + clPending: ether("7000"), + deposits: migrationDeposits, + depositsCurrent: migrationDepositsCur, + }); + + await migrationChecker.connect(manager).migrateBaselineSnapshot(); - after(async () => { - await checker.connect(admin).revokeRole(await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), manager); - }); + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const withdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault.address); - it("reverts if called by non-manager", async () => { - await expect( - checker.connect(stranger).setInitialSlashingAndPenaltiesAmount(100n, 100n), - ).to.be.revertedWithOZAccessControlError( - stranger.address, - await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), - ); - }); + const maxAllowedCLBalanceDecrease = + ((migratedCLBalance + migrationDeposits - MIGRATION_WITHDRAWALS) * defaultLimits.maxCLBalanceDecreaseBP) / + TOTAL_BASIS_POINTS; - it("reverts if initial slashing amount is greater than max", async () => { await expect( - checker.connect(manager).setInitialSlashingAndPenaltiesAmount(MAX_UINT16, 100n), - ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + migrationChecker + .connect(accountingSigner) + .checkAccountingOracleReport( + 24n * 60n * 60n, + migratedCLBalance, + 0n, + migratedCLBalance - reportDecrease, + 0n, + withdrawalVaultBalance, + 0n, + 0n, + 0n, + 0n, + ), + ) + .to.be.revertedWithCustomError(migrationChecker, "IncorrectCLBalanceDecrease") + .withArgs(reportDecrease, maxAllowedCLBalanceDecrease); }); - it("reverts if penalties amount is greater than max", async () => { - await expect( - checker.connect(manager).setInitialSlashingAndPenaltiesAmount(100n, MAX_UINT16), - ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); + it("reverts when migration is called more than once", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); - it("sets limit correctly and emits `InitialSlashingAmountSet` and `InactivityPenaltiesAmountSet` events", async () => { - await expect(checker.connect(manager).setInitialSlashingAndPenaltiesAmount(100n, 100n)) - .to.emit(checker, "InitialSlashingAmountSet") - .withArgs(100n) - .to.emit(checker, "InactivityPenaltiesAmountSet") - .withArgs(100n); + await migrationChecker.connect(manager).migrateBaselineSnapshot(); + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()).to.be.revertedWithCustomError( + migrationChecker, + "MigrationAlreadyDone", + ); }); }); @@ -661,11 +2067,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); - }); - - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); }); it("works with zero data", async () => { @@ -673,181 +2075,148 @@ describe("OracleReportSanityChecker.sol", () => { ...report(), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); context("trivial post CL < pre CL", () => { before(async () => { - const newRebaseLimit = 100_000; // 0.01% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(100_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - }), + ...report({ postCLBalance: ether("99") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - elRewardsVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), elRewardsVaultBalance: ether("0.1") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("0.1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - withdrawalVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), withdrawalVaultBalance: ether("0.1") }), ); expect(withdrawals).to.equal(ether("0.1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - sharesRequestedToBurn: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), sharesRequestedToBurn: ether("0.1") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); expect(sharesToBurn).to.equal(ether("0.1")); }); }); context("trivial post CL > pre CL", () => { before(async () => { - const newRebaseLimit = 100_000_000; // 10% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(100_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - }), + ...report({ postCLBalance: ether("100.01") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - elRewardsVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), elRewardsVaultBalance: ether("0.1") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("0.1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - withdrawalVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), withdrawalVaultBalance: ether("0.1") }), ); expect(withdrawals).to.equal(ether("0.1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - sharesRequestedToBurn: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), sharesRequestedToBurn: ether("0.1") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); expect(sharesToBurn).to.equal(ether("0.1")); }); }); - context("non-trivial post CL < pre CL ", () => { + context("non-trivial post CL < pre CL", () => { before(async () => { - const newRebaseLimit = 10_000_000; // 1% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(10_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - }), + ...report({ postCLBalance: ether("99") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - elRewardsVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("99"), elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("2")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - withdrawalVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("99"), withdrawalVaultBalance: ether("5") }), ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -860,71 +2229,59 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - sharesRequestedToBurn: ether("5"), - }), + ...report({ postCLBalance: ether("99"), sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(1980198019801980198n); // ether(100. - (99. / 1.01)) + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(1980198019801980198n); }); }); context("non-trivial post CL > pre CL", () => { before(async () => { - const newRebaseLimit = 20_000_000; // 2% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(20_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - }), + ...report({ postCLBalance: ether("101") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - elRewardsVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("101"), elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - withdrawalVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("101"), withdrawalVaultBalance: ether("5") }), ); expect(withdrawals).to.equal(ether("1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -937,23 +2294,20 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - sharesRequestedToBurn: ether("5"), - }), + ...report({ postCLBalance: ether("101"), sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(980392156862745098n); // ether(100. - (101. / 1.02)) + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(980392156862745098n); }); }); @@ -966,8 +2320,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 5_000_000; // 0.5% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(5_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { @@ -975,8 +2328,8 @@ describe("OracleReportSanityChecker.sol", () => { ...report(defaultRebaseParams), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(ether("10")); expect(sharesToBurn).to.equal(ether("10")); }); @@ -986,9 +2339,9 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("1.5")); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -998,8 +2351,8 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1.5")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -1009,8 +2362,8 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1.5")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -1019,11 +2372,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - - expect(sharesFromWQToBurn).to.equal(9950248756218905473n); // ether("100. - (90.5 / 1.005)") - expect(sharesToBurn).to.equal(11442786069651741293n); // ether("100. - (89. / 1.005)") + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905473n); + expect(sharesToBurn).to.equal(11442786069651741293n); }); }); @@ -1036,8 +2388,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 40_000_000; // 4% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(40_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { @@ -1045,8 +2396,8 @@ describe("OracleReportSanityChecker.sol", () => { ...report(defaultRebaseParams), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(ether("10")); expect(sharesToBurn).to.equal(ether("10")); }); @@ -1056,10 +2407,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("2")); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with withdrawals", async () => { @@ -1068,9 +2419,9 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -1079,9 +2430,9 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with shares requested to burn", async () => { @@ -1089,10 +2440,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615385n); - expect(sharesToBurn).to.equal(11538461538461538461n); // 100. - (92. / 1.04) + expect(sharesToBurn).to.equal(11538461538461538461n); }); }); @@ -1104,14 +2455,13 @@ describe("OracleReportSanityChecker.sol", () => { postCLBalance: ether("1000000"), withdrawalVaultBalance: ether("500"), elRewardsVaultBalance: ether("500"), - sharesRequestedToBurn: ether("0"), + sharesRequestedToBurn: 0n, etherToLockForWithdrawals: ether("40000"), newSharesToBurnForWithdrawals: ether("40000"), }; before(async () => { - const newRebaseLimit = 1_000_000; // 0.1% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(1_000_000n); }); it("smoothens the rebase", async () => { @@ -1121,12 +2471,12 @@ describe("OracleReportSanityChecker.sol", () => { expect(withdrawals).to.equal(ether("500")); expect(elRewards).to.equal(ether("500")); - expect(sharesFromWQToBurn).to.equal(39960039960039960039960n); // ether(1000000 - 961000. / 1.001) + expect(sharesFromWQToBurn).to.equal(39960039960039960039960n); expect(sharesToBurn).to.equal(39960039960039960039960n); }); }); - context("rounding case from Görli", () => { + context("rounding case from Goerli", () => { const rebaseParams = { preTotalPooledEther: 125262263468962792235936n, preTotalShares: 120111767594397261197918n, @@ -1140,8 +2490,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 750_000; // 0.075% or 7.5 basis points - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(750_000n); }); it("smoothens the rebase", async () => { @@ -1156,281 +2505,4 @@ describe("OracleReportSanityChecker.sol", () => { }); }); }); - - // NB: negative rebase is handled in `oracleReportSanityChecker.negative-rebase.test.ts` - context("checkAccountingOracleReport", () => { - const report = ( - overrides: Partial<{ - [key in keyof typeof correctOracleReport]: bigint; - }> = {}, - ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { - const reportData = { ...correctOracleReport, ...overrides }; - return [ - reportData.timeElapsed, - reportData.preCLBalance, - reportData.postCLBalance, - reportData.withdrawalVaultBalance, - reportData.elRewardsVaultBalance, - reportData.sharesRequestedToBurn, - reportData.preCLValidators, - reportData.postCLValidators, - ]; - }; - - let accountingSigher: HardhatEthersSigner; - before(async () => { - accountingSigher = await impersonate(await locator.accounting(), ether("1")); - }); - - it("reverts when not called by accounting", async () => { - await expect(checker.connect(stranger).checkAccountingOracleReport(...report())).to.be.revertedWithCustomError( - checker, - "CalledNotFromAccounting", - ); - }); - - it("reverts when actual withdrawal vault balance is less than passed", async () => { - const currentWithdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - withdrawalVaultBalance: currentWithdrawalVaultBalance + 1n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultBalance") - .withArgs(currentWithdrawalVaultBalance); - }); - - it("reverts when actual el rewards vault balance is less than passed", async () => { - const currentELRewardsVaultBalance = await ethers.provider.getBalance(elRewardsVault); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - elRewardsVaultBalance: currentELRewardsVaultBalance + 1n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectELRewardsVaultBalance") - .withArgs(currentELRewardsVaultBalance); - }); - - it("reverts when actual shares to burn is less than passed", async () => { - await burner.setSharesRequestedToBurn(10, 21); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - sharesRequestedToBurn: 32n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectSharesRequestedToBurn") - .withArgs(31n); - }); - - it("reverts when reported values overcome annual CL balance limit", async () => { - const maxBasisPoints = 10_000n; - const secondsInOneYear = 365n * 24n * 60n * 60n; - const postCLBalance = ether("150000"); - - // This formula calculates the annualized balance increase in basis points (BP) - // 1. Calculate the absolute balance increase: (postCLBalance - preCLBalance) - // 2. Convert to a relative increase by dividing by preCLBalance - // 3. Annualize by multiplying by (secondsInOneYear / timeElapsed) - // 4. Convert to basis points by multiplying by maxBasisPoints (100_00n) - // The result represents how much the balance would increase over a year at the current rate - const annualBalanceIncrease = - (secondsInOneYear * maxBasisPoints * (postCLBalance - correctOracleReport.preCLBalance)) / - correctOracleReport.preCLBalance / - correctOracleReport.timeElapsed; - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport(...report({ postCLBalance: postCLBalance })), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceIncrease") - .withArgs(annualBalanceIncrease); - }); - - it("reverts when amount of appeared validators is greater than possible", async () => { - const insaneValidators = 100000n; - await expect( - checker - .connect(accountingSigher) - .checkAccountingOracleReport( - ...report({ postCLValidators: correctOracleReport.preCLValidators + insaneValidators }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectAppearedValidators") - .withArgs(correctOracleReport.preCLValidators + insaneValidators); - }); - - it("passes all checks with correct oracle report data", async () => { - await expect(checker.connect(accountingSigher).checkAccountingOracleReport(...report())).not.to.be.reverted; - }); - - it("handles zero time passed for annual balance increase", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - postCLBalance: correctOracleReport.preCLBalance + 1000n, - timeElapsed: 0n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles zero pre CL balance estimating balance increase", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLBalance: 0n, - postCLBalance: 1000n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles appeared validators", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLValidators: correctOracleReport.preCLValidators, - postCLValidators: correctOracleReport.preCLValidators + 2n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles zero time passed for appeared validators", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLValidators: correctOracleReport.preCLValidators, - postCLValidators: correctOracleReport.preCLValidators + 2n, - timeElapsed: 0n, - }), - ), - ).not.to.be.reverted; - }); - }); - - context("checkExitBusOracleReport", () => { - let maxExitRequests: bigint; - - before(async () => { - maxExitRequests = (await checker.getOracleReportLimits()).maxValidatorExitRequestsPerReport; - }); - - it("reverts on too many exit requests", async () => { - await expect(checker.checkExitBusOracleReport(maxExitRequests + 1n)) - .to.be.revertedWithCustomError(checker, "IncorrectNumberOfExitRequestsPerReport") - .withArgs(maxExitRequests); - }); - - it("works with correct validators count", async () => { - await expect(checker.checkExitBusOracleReport(maxExitRequests)).not.to.be.reverted; - }); - }); - - context("checkExitedValidatorsRatePerDay", () => { - let maxExitedValidators: bigint; - - before(async () => { - maxExitedValidators = (await checker.getOracleReportLimits()).exitedValidatorsPerDayLimit; - }); - - it("reverts on too many exited validators", async () => { - await expect(checker.checkExitedValidatorsRatePerDay(maxExitedValidators + 1n)) - .to.be.revertedWithCustomError(checker, "ExitedValidatorsLimitExceeded") - .withArgs(maxExitedValidators, maxExitedValidators + 1n); - }); - - it("works with correct exited validators count", async () => { - await expect(checker.checkExitedValidatorsRatePerDay(maxExitedValidators)).not.to.be.reverted; - }); - }); - - context("checkNodeOperatorsPerExtraDataItemCount", () => { - let maxCount: bigint; - - before(async () => { - maxCount = (await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem; - }); - - it("reverts on too many node operators", async () => { - await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12, maxCount + 1n)) - .to.be.revertedWithCustomError(checker, "TooManyNodeOpsPerExtraDataItem") - .withArgs(12, maxCount + 1n); - }); - - it("works with correct count", async () => { - await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12, maxCount)).not.to.be.reverted; - }); - }); - - context("checkExtraDataItemsCountPerTransaction", () => { - let maxCount: bigint; - - before(async () => { - maxCount = (await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction; - }); - - it("reverts on too many items", async () => { - await expect(checker.checkExtraDataItemsCountPerTransaction(maxCount + 1n)) - .to.be.revertedWithCustomError(checker, "TooManyItemsPerExtraDataTransaction") - .withArgs(maxCount, maxCount + 1n); - }); - - it("works with correct count", async () => { - await expect(checker.checkExtraDataItemsCountPerTransaction(maxCount)).not.to.be.reverted; - }); - }); - - context("checkWithdrawalQueueOracleReport", () => { - const oldRequestId = 1n; - const newRequestId = 2n; - let oldRequestCreationTimestamp; - let newRequestCreationTimestamp: bigint; - - const correctWithdrawalQueueOracleReport = { - lastFinalizableRequestId: oldRequestId, - refReportTimestamp: -1n, - }; - - before(async () => { - const currentBlockTimestamp = await getCurrentBlockTimestamp(); - correctWithdrawalQueueOracleReport.refReportTimestamp = currentBlockTimestamp; - oldRequestCreationTimestamp = currentBlockTimestamp - defaultLimits.requestTimestampMargin; - - correctWithdrawalQueueOracleReport.lastFinalizableRequestId = oldRequestCreationTimestamp; - newRequestCreationTimestamp = currentBlockTimestamp - defaultLimits.requestTimestampMargin / 2n; - - await withdrawalQueue.setRequestTimestamp(oldRequestId, oldRequestCreationTimestamp); - await withdrawalQueue.setRequestTimestamp(newRequestId, newRequestCreationTimestamp); - - await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); - }); - - after(async () => { - await checker.connect(admin).revokeRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); - }); - - it("reverts when the creation timestamp of requestIdToFinalizeUpTo is too close to report timestamp", async () => { - await expect( - checker.checkWithdrawalQueueOracleReport(newRequestId, correctWithdrawalQueueOracleReport.refReportTimestamp), - ) - .to.be.revertedWithCustomError(checker, "IncorrectRequestFinalization") - .withArgs(newRequestCreationTimestamp); - }); - - it("passes all checks with correct withdrawal queue report data", async () => { - await checker.checkWithdrawalQueueOracleReport( - correctWithdrawalQueueOracleReport.lastFinalizableRequestId, - correctWithdrawalQueueOracleReport.refReportTimestamp, - ); - }); - }); }); diff --git a/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts b/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts deleted file mode 100644 index 528acdd8af..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts +++ /dev/null @@ -1,160 +0,0 @@ -import { expect } from "chai"; -import { hexlify, randomBytes, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { DepositContract__MockForBeaconChainDepositor, StakingRouter__Harness } from "typechain-types"; - -import { certainAddress, ether, MAX_UINT256, proxify, randomString } from "lib"; - -import { Snapshot } from "test/suite"; - -describe("StakingRouter.sol:misc", () => { - let deployer: HardhatEthersSigner; - let proxyAdmin: HardhatEthersSigner; - let stakingRouterAdmin: HardhatEthersSigner; - let user: HardhatEthersSigner; - - let depositContract: DepositContract__MockForBeaconChainDepositor; - let stakingRouter: StakingRouter__Harness; - let impl: StakingRouter__Harness; - - let originalState: string; - - const lido = certainAddress("test:staking-router:lido"); - const withdrawalCredentials = hexlify(randomBytes(32)); - - before(async () => { - [deployer, proxyAdmin, stakingRouterAdmin, user] = await ethers.getSigners(); - - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin: proxyAdmin, caller: user }); - }); - - beforeEach(async () => (originalState = await Snapshot.take())); - - afterEach(async () => await Snapshot.restore(originalState)); - - context("initialize", () => { - it("Reverts if admin is zero address", async () => { - await expect(stakingRouter.initialize(ZeroAddress, lido, withdrawalCredentials)).to.be.revertedWithCustomError( - stakingRouter, - "ZeroAddressAdmin", - ); - }); - - it("Reverts if lido is zero address", async () => { - await expect( - stakingRouter.initialize(stakingRouterAdmin.address, ZeroAddress, withdrawalCredentials), - ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddressLido"); - }); - - it("Initializes the contract version, sets up roles and variables", async () => { - await expect(stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials)) - .to.emit(stakingRouter, "ContractVersionSet") - .withArgs(3) - .and.to.emit(stakingRouter, "RoleGranted") - .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address) - .and.to.emit(stakingRouter, "WithdrawalCredentialsSet") - .withArgs(withdrawalCredentials, user.address); - - expect(await stakingRouter.getContractVersion()).to.equal(3); - expect(await stakingRouter.getLido()).to.equal(lido); - expect(await stakingRouter.getWithdrawalCredentials()).to.equal(withdrawalCredentials); - }); - }); - - context("finalizeUpgrade_v3()", () => { - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - const modulesCount = 3; - - beforeEach(async () => { - // initialize staking router - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); - // grant roles - await stakingRouter - .connect(stakingRouterAdmin) - .grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), stakingRouterAdmin); - - for (let i = 0; i < modulesCount; i++) { - await stakingRouter - .connect(stakingRouterAdmin) - .addStakingModule( - randomString(8), - certainAddress(`test:staking-router:staking-module-${i}`), - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - } - expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount); - }); - - it("fails with UnexpectedContractVersion error when called on implementation", async () => { - await expect(impl.finalizeUpgrade_v3()) - .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(MAX_UINT256, 2); - }); - - it("fails with UnexpectedContractVersion error when called on deployed from scratch SRv2", async () => { - await expect(stakingRouter.finalizeUpgrade_v3()) - .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(3, 2); - }); - - context("simulate upgrade from v2", () => { - beforeEach(async () => { - // reset contract version - await stakingRouter.testing_setBaseVersion(2); - }); - - it("sets correct contract version", async () => { - expect(await stakingRouter.getContractVersion()).to.equal(2); - await stakingRouter.finalizeUpgrade_v3(); - expect(await stakingRouter.getContractVersion()).to.be.equal(3); - }); - }); - }); - - context("receive", () => { - it("Reverts", async () => { - await expect( - user.sendTransaction({ - to: stakingRouter, - value: ether("1.0"), - }), - ).to.be.revertedWithCustomError(stakingRouter, "DirectETHTransfer"); - }); - }); - - context("getLido", () => { - it("Returns zero address before initialization", async () => { - expect(await stakingRouter.getLido()).to.equal(ZeroAddress); - }); - - it("Returns lido address after initialization", async () => { - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); - - expect(await stakingRouter.getLido()).to.equal(lido); - }); - }); -}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts b/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts deleted file mode 100644 index 06c5579b41..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts +++ /dev/null @@ -1,483 +0,0 @@ -import { expect } from "chai"; -import { hexlify, randomBytes, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { StakingRouter } from "typechain-types"; - -import { certainAddress, getNextBlock, proxify, randomString } from "lib"; - -const UINT64_MAX = 2n ** 64n - 1n; - -describe("StakingRouter.sol:module-management", () => { - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - let user: HardhatEthersSigner; - - let stakingRouter: StakingRouter; - - beforeEach(async () => { - [deployer, admin, user] = await ethers.getSigners(); - - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin }); - - // initialize staking router - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-modules:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); - - // grant roles - await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); - }); - - context("addStakingModule", () => { - const NAME = "StakingModule"; - const ADDRESS = certainAddress("test:staking-router:staking-module"); - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter - .connect(user) - .addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); - }); - - it("Reverts if the target share is greater than 100%", async () => { - const STAKE_SHARE_LIMIT_OVER_100 = 100_01; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT_OVER_100, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); - }); - - it("Reverts if the sum of module and treasury fees is greater than 100%", async () => { - const MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE_INVALID, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - - const TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE_INVALID, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - }); - - it("Reverts if the staking module address is zero address", async () => { - await expect( - stakingRouter.addStakingModule( - NAME, - ZeroAddress, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddressStakingModule"); - }); - - it("Reverts if the staking module name is empty string", async () => { - const NAME_EMPTY_STRING = ""; - - await expect( - stakingRouter.addStakingModule( - NAME_EMPTY_STRING, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); - }); - - it("Reverts if the staking module name is too long", async () => { - const MAX_STAKING_MODULE_NAME_LENGTH = await stakingRouter.MAX_STAKING_MODULE_NAME_LENGTH(); - const NAME_TOO_LONG = randomString(Number(MAX_STAKING_MODULE_NAME_LENGTH + 1n)); - - await expect( - stakingRouter.addStakingModule( - NAME_TOO_LONG, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); - }); - - it("Reverts if the max number of staking modules is reached", async () => { - const MAX_STAKING_MODULES_COUNT = await stakingRouter.MAX_STAKING_MODULES_COUNT(); - - for (let i = 0; i < MAX_STAKING_MODULES_COUNT; i++) { - await stakingRouter.addStakingModule( - randomString(8), - certainAddress(`test:staking-router:staking-module-${i}`), - 1_00, - 1_00, - 1_00, - 1_00, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - } - - expect(await stakingRouter.getStakingModulesCount()).to.equal(MAX_STAKING_MODULES_COUNT); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModulesLimitExceeded"); - }); - - it("Reverts if adding a module with the same address", async () => { - await stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleAddressExists"); - }); - - it("Adds the module to stakingRouter and emits events", async () => { - const stakingModuleId = (await stakingRouter.getStakingModulesCount()) + 1n; - const moduleAddedBlock = await getNextBlock(); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ) - .to.be.emit(stakingRouter, "StakingRouterETHDeposited") - .withArgs(stakingModuleId, 0) - .and.to.be.emit(stakingRouter, "StakingModuleAdded") - .withArgs(stakingModuleId, ADDRESS, NAME, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleShareLimitSet") - .withArgs(stakingModuleId, STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") - .withArgs(stakingModuleId, MODULE_FEE, TREASURY_FEE, admin.address); - - expect(await stakingRouter.getStakingModule(stakingModuleId)).to.deep.equal([ - stakingModuleId, - ADDRESS, - MODULE_FEE, - TREASURY_FEE, - STAKE_SHARE_LIMIT, - 0n, // status active - NAME, - moduleAddedBlock.timestamp, - moduleAddedBlock.number, - 0n, // exited validators, - PRIORITY_EXIT_SHARE_THRESHOLD, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ]); - }); - }); - - context("updateStakingModule", () => { - const NAME = "StakingModule"; - const ADDRESS = certainAddress("test:staking-router-modules:staking-module"); - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - let ID: bigint; - - const NEW_STAKE_SHARE_LIMIT = 2_00n; - const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = NEW_STAKE_SHARE_LIMIT; - - const NEW_MODULE_FEE = 6_00n; - const NEW_TREASURY_FEE = 4_00n; - - const NEW_MAX_DEPOSITS_PER_BLOCK = 100n; - const NEW_MIN_DEPOSIT_BLOCK_DISTANCE = 20n; - - beforeEach(async () => { - await stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - ID = await stakingRouter.getStakingModulesCount(); - }); - - it("Reverts if the caller does not have the role", async () => { - stakingRouter = stakingRouter.connect(user); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); - }); - - it("Reverts if the new target share is greater than 100%", async () => { - const NEW_STAKE_SHARE_LIMIT_OVER_100 = 100_01; - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT_OVER_100, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); - }); - - it("Reverts if the new priority exit share is greater than 100%", async () => { - const NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); - }); - - it("Reverts if the new priority exit share is less than stake share limit", async () => { - const UPGRADED_STAKE_SHARE_LIMIT = 55_00n; - const UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00n; - await expect( - stakingRouter.updateStakingModule( - ID, - UPGRADED_STAKE_SHARE_LIMIT, - UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); - }); - - it("Reverts if the new deposit block distance is zero", async () => { - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - 0n, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); - }); - - it("Reverts if the new deposit block distance is great then uint64 max", async () => { - await stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - UINT64_MAX, - ); - - expect((await stakingRouter.getStakingModule(ID)).minDepositBlockDistance).to.be.equal(UINT64_MAX); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - UINT64_MAX + 1n, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); - }); - - it("Reverts if the new max deposits per block is great then uint64 max", async () => { - await stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - UINT64_MAX, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ); - - expect((await stakingRouter.getStakingModule(ID)).maxDepositsPerBlock).to.be.equal(UINT64_MAX); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - UINT64_MAX + 1n, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMaxDepositPerBlockValue"); - }); - - it("Reverts if the sum of the new module and treasury fees is greater than 100%", async () => { - const NEW_MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; - - await expect( - stakingRouter.updateStakingModule( - ID, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE_INVALID, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - - const NEW_TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; - await expect( - stakingRouter.updateStakingModule( - ID, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - NEW_TREASURY_FEE_INVALID, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - }); - - it("Update target share, module and treasury fees and emits events", async () => { - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ) - .to.be.emit(stakingRouter, "StakingModuleShareLimitSet") - .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") - .withArgs(ID, NEW_MODULE_FEE, NEW_TREASURY_FEE, admin.address); - }); - }); -}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts b/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts deleted file mode 100644 index 059ee1148c..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { expect } from "chai"; -import { randomBytes } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { StakingRouter } from "typechain-types"; - -import { MAX_UINT256, proxify, randomAddress } from "lib"; - -describe("StakingRouter.sol:Versioned", () => { - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - - let impl: StakingRouter; - let versioned: StakingRouter; - - const petrifiedVersion = MAX_UINT256; - - before(async () => { - [deployer, admin] = await ethers.getSigners(); - - // deploy staking router - const depositContract = randomAddress(); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [versioned] = await proxify({ impl, admin }); - }); - - context("constructor", () => { - it("Petrifies the implementation", async () => { - expect(await impl.getContractVersion()).to.equal(petrifiedVersion); - }); - }); - - context("getContractVersion", () => { - it("Returns 0 as the initial contract version", async () => { - expect(await versioned.getContractVersion()).to.equal(0n); - }); - }); - - context("initialize", () => { - it("Increments version", async () => { - await versioned.initialize(randomAddress(), randomAddress(), randomBytes(32)); - - expect(await versioned.getContractVersion()).to.equal(3n); - }); - }); -}); diff --git a/test/0.8.9/withdrawalVault/eip7251Mock.ts b/test/0.8.9/withdrawalVault/eip7251Mock.ts new file mode 100644 index 0000000000..1501a0dc54 --- /dev/null +++ b/test/0.8.9/withdrawalVault/eip7251Mock.ts @@ -0,0 +1,55 @@ +import { expect } from "chai"; +import { ContractTransactionReceipt, ContractTransactionResponse } from "ethers"; +import { ethers } from "hardhat"; + +import { EIP7251ConsolidationRequest__Mock } from "typechain-types"; + +import { EIP7251_ADDRESS, findEventsWithInterfaces } from "lib"; + +const eventName = "ConsolidationRequestAdded__Mock"; +const eip7251MockEventABI = [`event ${eventName}(bytes request, uint256 fee)`]; +const eip7251MockInterface = new ethers.Interface(eip7251MockEventABI); + +export const deployEIP7251ConsolidationRequestContractMock = async ( + fee: bigint, +): Promise => { + const eip7251Mock = await ethers.deployContract("EIP7251ConsolidationRequest__Mock"); + const eip7251MockAddress = await eip7251Mock.getAddress(); + + await ethers.provider.send("hardhat_setCode", [EIP7251_ADDRESS, await ethers.provider.getCode(eip7251MockAddress)]); + + const contract = await ethers.getContractAt("EIP7251ConsolidationRequest__Mock", EIP7251_ADDRESS); + await contract.mock__setFee(fee); + + return contract; +}; + +export const encodeEIP7251Payload = (sourcePubkey: string, targetPubkey: string): string => { + const sourcePubkeyHex = sourcePubkey.startsWith("0x") ? sourcePubkey.slice(2) : sourcePubkey; + const targetPubkeyHex = targetPubkey.startsWith("0x") ? targetPubkey.slice(2) : targetPubkey; + return `0x${sourcePubkeyHex}${targetPubkeyHex}`; +}; + +export function findEIP7251MockEvents(receipt: ContractTransactionReceipt) { + return findEventsWithInterfaces(receipt!, eventName, [eip7251MockInterface]); +} + +export const testEIP7251Mock = async ( + addConsolidationRequests: () => Promise, + sourcePubkeys: string[], + targetPubkeys: string[], + expectedFee: bigint, +): Promise<{ tx: ContractTransactionResponse; receipt: ContractTransactionReceipt }> => { + const tx = await addConsolidationRequests(); + const receipt = (await tx.wait()) as ContractTransactionReceipt; + + const events = findEIP7251MockEvents(receipt); + expect(events.length).to.equal(sourcePubkeys.length); + + for (let i = 0; i < sourcePubkeys.length; i++) { + expect(events[i].args[0]).to.equal(encodeEIP7251Payload(sourcePubkeys[i], targetPubkeys[i])); + expect(events[i].args[1]).to.equal(expectedFee); + } + + return { tx, receipt }; +}; diff --git a/test/0.8.9/withdrawalVault/utils.ts b/test/0.8.9/withdrawalVault/utils.ts index 968e73df9b..72560c9573 100644 --- a/test/0.8.9/withdrawalVault/utils.ts +++ b/test/0.8.9/withdrawalVault/utils.ts @@ -35,3 +35,20 @@ export function generateWithdrawalRequestPayload(numberOfRequests: number) { mixedWithdrawalAmounts, }; } + +export function generateConsolidationRequestPayload(numberOfRequests: number) { + const sourcePubkeys: string[] = []; + const targetPubkeys: string[] = []; + + for (let i = 1; i <= numberOfRequests; i++) { + sourcePubkeys.push(toValidatorPubKey(i)); + targetPubkeys.push(toValidatorPubKey(i + numberOfRequests)); // Ensure unique target pubkeys + } + + return { + sourcePubkeysHexArray: sourcePubkeys.map((pk) => `0x${pk}`), + targetPubkeysHexArray: targetPubkeys.map((pk) => `0x${pk}`), + sourcePubkeys, + targetPubkeys, + }; +} diff --git a/test/0.8.9/withdrawalVault/withdrawalVault.test.ts b/test/0.8.9/withdrawalVault/withdrawalVault.test.ts index d6260ae9cb..98b12ab945 100644 --- a/test/0.8.9/withdrawalVault/withdrawalVault.test.ts +++ b/test/0.8.9/withdrawalVault/withdrawalVault.test.ts @@ -7,13 +7,21 @@ import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { EIP7002WithdrawalRequest__Mock, + EIP7251ConsolidationRequest__Mock, ERC20__Harness, ERC721__Harness, Lido__MockForWithdrawalVault, WithdrawalVault__Harness, } from "typechain-types"; -import { EIP7002_ADDRESS, EIP7002_MIN_WITHDRAWAL_REQUEST_FEE, MAX_UINT256, proxify } from "lib"; +import { + EIP7002_ADDRESS, + EIP7002_MIN_WITHDRAWAL_REQUEST_FEE, + EIP7251_ADDRESS, + EIP7251_MIN_CONSOLIDATION_FEE, + MAX_UINT256, + proxify, +} from "lib"; import { Snapshot } from "test/suite"; @@ -23,7 +31,13 @@ import { findEIP7002MockEvents, testEIP7002Mock, } from "./eip7002Mock"; -import { generateWithdrawalRequestPayload } from "./utils"; +import { + deployEIP7251ConsolidationRequestContractMock, + encodeEIP7251Payload, + findEIP7251MockEvents, + testEIP7251Mock, +} from "./eip7251Mock"; +import { generateConsolidationRequestPayload, generateWithdrawalRequestPayload } from "./utils"; const PETRIFIED_VERSION = MAX_UINT256; @@ -32,11 +46,13 @@ describe("WithdrawalVault.sol", () => { let user: HardhatEthersSigner; let treasury: HardhatEthersSigner; let triggerableWithdrawalsGateway: HardhatEthersSigner; + let consolidationGateway: HardhatEthersSigner; let stranger: HardhatEthersSigner; let originalState: string; let withdrawalsPredeployed: EIP7002WithdrawalRequest__Mock; + let consolidationPredeployed: EIP7251ConsolidationRequest__Mock; let lido: Lido__MockForWithdrawalVault; let lidoAddress: string; @@ -47,18 +63,29 @@ describe("WithdrawalVault.sol", () => { before(async () => { [owner, user, treasury] = await ethers.getSigners(); // TODO - [owner, treasury, triggerableWithdrawalsGateway, stranger] = await ethers.getSigners(); + [owner, treasury, triggerableWithdrawalsGateway, consolidationGateway, stranger] = await ethers.getSigners(); withdrawalsPredeployed = await deployEIP7002WithdrawalRequestContractMock(EIP7002_MIN_WITHDRAWAL_REQUEST_FEE); expect(await withdrawalsPredeployed.getAddress()).to.equal(EIP7002_ADDRESS); + consolidationPredeployed = await deployEIP7251ConsolidationRequestContractMock(EIP7251_MIN_CONSOLIDATION_FEE); + + expect(await consolidationPredeployed.getAddress()).to.equal(EIP7251_ADDRESS); + lido = await ethers.deployContract("Lido__MockForWithdrawalVault"); lidoAddress = await lido.getAddress(); impl = await ethers.deployContract( "WithdrawalVault__Harness", - [lidoAddress, treasury.address, triggerableWithdrawalsGateway.address], + [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ], owner, ); @@ -78,25 +105,91 @@ describe("WithdrawalVault.sol", () => { ZeroAddress, treasury.address, triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Reverts if the treasury address is zero", async () => { await expect( - ethers.deployContract("WithdrawalVault", [lidoAddress, ZeroAddress, triggerableWithdrawalsGateway.address]), + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + ZeroAddress, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Reverts if the triggerable withdrawal gateway address is zero", async () => { await expect( - ethers.deployContract("WithdrawalVault", [lidoAddress, treasury.address, ZeroAddress]), + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + ZeroAddress, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the consolidation gateway address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + ZeroAddress, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the withdrawal request address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + ZeroAddress, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the consolidation request address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + ZeroAddress, + ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Sets initial properties", async () => { expect(await vault.LIDO()).to.equal(lidoAddress, "Lido address"); expect(await vault.TREASURY()).to.equal(treasury.address, "Treasury address"); + expect(await vault.TRIGGERABLE_WITHDRAWALS_GATEWAY()).to.equal( + triggerableWithdrawalsGateway.address, + "Triggerable Withdrawals Gateway address", + ); + expect(await vault.CONSOLIDATION_GATEWAY()).to.equal( + consolidationGateway.address, + "Consolidation Gateway address", + ); + expect(await vault.WITHDRAWAL_REQUEST()).to.equal(EIP7002_ADDRESS, "Withdrawal Request address"); + expect(await vault.CONSOLIDATION_REQUEST()).to.equal(EIP7251_ADDRESS, "Consolidation Request address"); }); it("Petrifies the implementation", async () => { @@ -112,38 +205,38 @@ describe("WithdrawalVault.sol", () => { it("Should revert if the contract is already initialized", async () => { await vault.initialize(); - await expect(vault.initialize()).to.be.revertedWithCustomError(vault, "UnexpectedContractVersion").withArgs(2, 0); + await expect(vault.initialize()).to.be.revertedWithCustomError(vault, "UnexpectedContractVersion").withArgs(3, 0); }); it("Initializes the contract", async () => { - await expect(vault.initialize()).to.emit(vault, "ContractVersionSet").withArgs(2); + await expect(vault.initialize()).to.emit(vault, "ContractVersionSet").withArgs(3); }); }); - context("finalizeUpgrade_v2()", () => { + context("finalizeUpgrade_v3()", () => { it("Should revert with UnexpectedContractVersion error when called on implementation", async () => { - await expect(impl.finalizeUpgrade_v2()) + await expect(impl.finalizeUpgrade_v3()) .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(MAX_UINT256, 1); + .withArgs(MAX_UINT256, 2); }); - it("Should revert with UnexpectedContractVersion error when called on deployed from scratch WithdrawalVaultV2", async () => { + it("Should revert with UnexpectedContractVersion error when called on deployed from scratch WithdrawalVaultV3", async () => { await vault.initialize(); - await expect(vault.finalizeUpgrade_v2()) + await expect(vault.finalizeUpgrade_v3()) .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(2, 1); + .withArgs(3, 2); }); - context("Simulate upgrade from v1", () => { + context("Simulate upgrade from v2", () => { beforeEach(async () => { - await vault.harness__initializeContractVersionTo(1); + await vault.harness__initializeContractVersionTo(2); }); it("Should set correct contract version", async () => { - expect(await vault.getContractVersion()).to.equal(1); - await vault.finalizeUpgrade_v2(); - expect(await vault.getContractVersion()).to.be.equal(2); + expect(await vault.getContractVersion()).to.equal(2); + await vault.finalizeUpgrade_v3(); + expect(await vault.getContractVersion()).to.be.equal(3); }); }); }); @@ -346,20 +439,24 @@ describe("WithdrawalVault.sol", () => { vault .connect(triggerableWithdrawalsGateway) .addWithdrawalRequests(invalidPubkeyHexString, [1n], { value: fee }), - ).to.be.revertedWithPanic(1); // assertion + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidPubkeyHexString[0]); }); it("Should revert if last pubkey not 48 bytes", async function () { const validPubkey = - "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f"; - const invalidPubkey = "1234"; - const pubkeysHexArray = [`0x${validPubkey}`, `0x${invalidPubkey}`]; + "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f"; + const invalidPubkey = `0x${"12345".repeat(10)}`; // 50 characters, i.e. 25 bytes + const pubkeysHexArray = [validPubkey, invalidPubkey]; const fee = (await getFee()) * 2n; // 2 requests await expect( vault.connect(triggerableWithdrawalsGateway).addWithdrawalRequests(pubkeysHexArray, [1n, 2n], { value: fee }), - ).to.be.revertedWithPanic(1); // assertion + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidPubkey); }); it("Should revert if addition fails at the withdrawal request contract", async function () { @@ -582,4 +679,349 @@ describe("WithdrawalVault.sol", () => { }); }); }); + + context("get consolidation request fee", () => { + it("Should get fee from the EIP-7251 contract", async function () { + await consolidationPredeployed.mock__setFee(333n); + expect( + (await vault.getConsolidationRequestFee()) == 333n, + "consolidation request should use fee from the EIP-7251 contract", + ); + }); + + it("Should revert if fee read fails", async function () { + await consolidationPredeployed.mock__setFailOnGetFee(true); + await expect(vault.getConsolidationRequestFee()).to.be.revertedWithCustomError(vault, "FeeReadFailed"); + }); + + ["0x", "0x01", "0x" + "0".repeat(61) + "1", "0x" + "0".repeat(65) + "1"].forEach((unexpectedFee) => { + it(`Should revert if unexpected fee value ${unexpectedFee} is returned`, async function () { + await consolidationPredeployed.mock__setFeeRaw(unexpectedFee); + await expect(vault.getConsolidationRequestFee()).to.be.revertedWithCustomError(vault, "FeeInvalidData"); + }); + }); + }); + + async function getConsolidationFee(): Promise { + const fee = await vault.getConsolidationRequestFee(); + + return ethers.parseUnits(fee.toString(), "wei"); + } + + async function getConsolidationPredeployedContractBalance(): Promise { + const contractAddress = await consolidationPredeployed.getAddress(); + return await ethers.provider.getBalance(contractAddress); + } + + context("add consolidation requests", () => { + beforeEach(async () => { + await vault.initialize(); + }); + + it("Should revert if the caller is not Consolidation Gateway", async () => { + await expect( + vault.connect(stranger).addConsolidationRequests(["0x1234"], ["0x5678"]), + ).to.be.revertedWithCustomError(vault, "NotConsolidationGateway"); + }); + + it("Should revert if empty arrays are provided", async function () { + await expect(vault.connect(consolidationGateway).addConsolidationRequests([], [], { value: 1n })) + .to.be.revertedWithCustomError(vault, "ZeroArgument") + .withArgs("sourcePubkeys"); + }); + + it("Should revert if array lengths do not match", async function () { + const requestCount = 2; + const { sourcePubkeysHexArray } = generateConsolidationRequestPayload(requestCount); + const { targetPubkeysHexArray } = generateConsolidationRequestPayload(1); // Only one target pubkey + + const totalConsolidationFee = (await getConsolidationFee()) * BigInt(requestCount); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: totalConsolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "ArraysLengthMismatch") + .withArgs(requestCount, targetPubkeysHexArray.length); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, [], { value: totalConsolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "ArraysLengthMismatch") + .withArgs(requestCount, 0); + }); + + it("Should revert if not enough fee is sent", async function () { + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + + await consolidationPredeployed.mock__setFee(3n); // Set fee to 3 gwei + + // 1. Should revert if no fee is sent + await expect( + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(3n, 0); + + // 2. Should revert if fee is less than required + const insufficientFee = 2n; + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: insufficientFee }), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(3n, 2n); + }); + + it("Should revert if source pubkey is not 48 bytes", async function () { + // Invalid source pubkey (only 2 bytes) + const invalidSourcePubkey = "0x1234"; + const validTargetPubkey = "0x" + "5".repeat(96); // 48 bytes + + const fee = await getConsolidationFee(); + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests([invalidSourcePubkey], [validTargetPubkey], { value: fee }), + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidSourcePubkey); + }); + + it("Should revert if target pubkey is not 48 bytes", async function () { + const validSourcePubkey = "0x" + "1".repeat(96); // 48 bytes + // Invalid target pubkey (only 2 bytes) + const invalidTargetPubkey = "0x5678"; + + const fee = await getConsolidationFee(); + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests([validSourcePubkey], [invalidTargetPubkey], { value: fee }), + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidTargetPubkey); + }); + + it("Should revert if addition fails at the consolidation request contract", async function () { + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + const fee = await getConsolidationFee(); + + // Set mock to fail on add + await consolidationPredeployed.mock__setFailOnAddRequest(true); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "RequestAdditionFailed"); + }); + + it("Should revert when fee read fails", async function () { + await consolidationPredeployed.mock__setFailOnGetFee(true); + + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(2); + const fee = 10n; + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "FeeReadFailed"); + }); + + it("Should revert when the provided fee exceeds the required amount", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const consolidationFee = 9n + 1n; // 3 request * 3 gwei (fee) + 1 gwei (extra fee)= 10 gwei + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: consolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(9n, 10n); + }); + + ["0x", "0x01", "0x" + "0".repeat(61) + "1", "0x" + "0".repeat(65) + "1"].forEach((unexpectedFee) => { + it(`Should revert if unexpected fee value ${unexpectedFee} is returned`, async function () { + await consolidationPredeployed.mock__setFeeRaw(unexpectedFee); + + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + const fee = 10n; + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "FeeInvalidData"); + }); + }); + + it("Should accept consolidation requests when the provided fee matches the exact required amount", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(3n); + const expectedTotalConsolidationFee = 9n; + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + + // Check extremely high fee + const highFee = ethers.parseEther("10"); + await consolidationPredeployed.mock__setFee(highFee); + const expectedLargeTotalConsolidationFee = ethers.parseEther("30"); + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedLargeTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + highFee, + ); + }); + + it("Should emit consolidation event", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const expectedTotalConsolidationFee = 9n; // 3 requests * 3 gwei (fee) = 9 gwei + + await expect( + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + ) + .to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[0], targetPubkeys[0])) + .and.to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[1], targetPubkeys[1])) + .and.to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[2], targetPubkeys[2])); + }); + + it("Should not affect contract balance", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const expectedTotalConsolidationFee = 9n; // 3 requests * 3 gwei (fee) = 9 gwei + + const initialBalance = await getWithdrawalCredentialsContractBalance(); + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + expect(await getWithdrawalCredentialsContractBalance()).to.equal(initialBalance); + }); + + it("Should transfer the total calculated fee to the EIP-7251 consolidation contract", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(3n); + const expectedTotalConsolidationFee = 9n; + + const initialBalance = await getConsolidationPredeployedContractBalance(); + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + + expect(await getConsolidationPredeployedContractBalance()).to.equal( + initialBalance + expectedTotalConsolidationFee, + ); + }); + + it("Should ensure consolidation requests are encoded as expected with a 96-byte pubkeys ", async function () { + const requestCount = 16; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const tx = await vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: 16n }); + + const receipt = await tx.wait(); + + const events = findEIP7251MockEvents(receipt!); + expect(events.length).to.equal(requestCount); + + for (let i = 0; i < requestCount; i++) { + const encodedRequest = events[i].args[0]; + // 0x (2 characters) + 48-byte pubkey (96 characters) + 48-byte pubkey (96 characters) = 194 characters + expect(encodedRequest.length).to.equal(194); + + expect(encodedRequest.slice(0, 2)).to.equal("0x"); + expect(encodedRequest.slice(2, 98)).to.equal(sourcePubkeys[i]); + expect(encodedRequest.slice(98, 194)).to.equal(targetPubkeys[i]); + } + }); + + const testCasesForConsolidationRequests = [ + { requestCount: 1 }, + { requestCount: 3 }, + { requestCount: 7 }, + { requestCount: 10 }, + { requestCount: 100 }, + ]; + + testCasesForConsolidationRequests.forEach(({ requestCount }) => { + it(`Should process ${requestCount} consolidation request(s) successfully`, async function () { + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 1n; + const expectedTotalConsolidationFee = BigInt(requestCount) * fee; + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + }); + }); + }); }); diff --git a/test/common/contracts/RateLimit__Harness.sol b/test/common/contracts/RateLimit__Harness.sol new file mode 100644 index 0000000000..3c45dc089a --- /dev/null +++ b/test/common/contracts/RateLimit__Harness.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity 0.8.25; + +import {LimitData, RateLimitStorage, RateLimit} from "contracts/common/lib/RateLimit.sol"; + +contract RateLimitStorage__Harness { + using RateLimitStorage for bytes32; + + bytes32 public constant TEST_POSITION = keccak256("rate.limit.test.position"); + + function getStorageLimit() external view returns (LimitData memory data) { + return TEST_POSITION.getStorageLimit(); + } + + function setStorageLimit(LimitData memory _data) external { + TEST_POSITION.setStorageLimit(_data); + } +} + +contract RateLimit__Harness { + using RateLimit for LimitData; + + LimitData public state; + + function harness_setState( + uint32 maxLimit, + uint32 prevLimit, + uint32 itemsPerFrame, + uint32 frameDurationInSec, + uint32 timestamp + ) external { + state.maxLimit = maxLimit; + state.itemsPerFrame = itemsPerFrame; + state.frameDurationInSec = frameDurationInSec; + state.prevLimit = prevLimit; + state.prevTimestamp = timestamp; + } + + function harness_getState() external view returns (LimitData memory) { + return + LimitData( + state.maxLimit, + state.prevLimit, + state.prevTimestamp, + state.frameDurationInSec, + state.itemsPerFrame + ); + } + + function calculateCurrentLimit(uint256 currentTimestamp) external view returns (uint256) { + return state.calculateCurrentLimit(currentTimestamp); + } + + function updatePrevLimit(uint256 newLimit, uint256 timestamp) external view returns (LimitData memory) { + return state.updatePrevLimit(newLimit, timestamp); + } + + function setLimits( + uint256 maxLimit, + uint256 itemsPerFrame, + uint256 frameDurationInSec, + uint256 timestamp + ) external view returns (LimitData memory) { + return state.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp); + } + + function isLimitSet() external view returns (bool) { + return state.isLimitSet(); + } +} diff --git a/test/common/lib/rateLimit.test.ts b/test/common/lib/rateLimit.test.ts new file mode 100644 index 0000000000..b94eb39f5c --- /dev/null +++ b/test/common/lib/rateLimit.test.ts @@ -0,0 +1,398 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +interface LimitData { + maxLimit: bigint; + prevLimit: bigint; + prevTimestamp: bigint; + frameDurationInSec: bigint; + itemsPerFrame: bigint; +} + +describe("RateLimit.sol", () => { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let rateLimitStorage: any; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let rateLimit: any; + + before(async () => { + rateLimitStorage = await ethers.deployContract("RateLimitStorage__Harness"); + rateLimit = await ethers.deployContract("RateLimit__Harness"); + }); + + context("RateLimitStorage", () => { + let data: LimitData; + + it("Min possible values", async () => { + data = { + maxLimit: 0n, + prevLimit: 0n, + prevTimestamp: 0n, + frameDurationInSec: 0n, + itemsPerFrame: 0n, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(0n); + expect(result.prevLimit).to.equal(0n); + expect(result.prevTimestamp).to.equal(0n); + expect(result.frameDurationInSec).to.equal(0n); + expect(result.itemsPerFrame).to.equal(0n); + }); + + it("Max possible values", async () => { + const MAX_UINT32 = 2n ** 32n - 1n; + + data = { + maxLimit: MAX_UINT32, + prevLimit: MAX_UINT32, + prevTimestamp: MAX_UINT32, + frameDurationInSec: MAX_UINT32, + itemsPerFrame: MAX_UINT32, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(MAX_UINT32); + expect(result.prevLimit).to.equal(MAX_UINT32); + expect(result.prevTimestamp).to.equal(MAX_UINT32); + expect(result.frameDurationInSec).to.equal(MAX_UINT32); + expect(result.itemsPerFrame).to.equal(MAX_UINT32); + }); + + it("Some random values", async () => { + const maxLimit = 100n; + const prevLimit = 9n; + const prevTimestamp = 90n; + const frameDurationInSec = 10n; + const itemsPerFrame = 1n; + + data = { + maxLimit, + prevLimit, + prevTimestamp, + frameDurationInSec, + itemsPerFrame, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(maxLimit); + expect(result.prevLimit).to.equal(prevLimit); + expect(result.prevTimestamp).to.equal(prevTimestamp); + expect(result.frameDurationInSec).to.equal(frameDurationInSec); + expect(result.itemsPerFrame).to.equal(itemsPerFrame); + }); + }); + + context("RateLimit", () => { + context("calculateCurrentLimit", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should return prevLimit value (nothing restored), if no time passed", async () => { + const timestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const result = await rateLimit.calculateCurrentLimit(timestamp); + expect(result).to.equal(prevLimit); + }); + + it("should return prevLimit value (nothing restored), if less than one frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 9); + expect(result).to.equal(prevLimit); + }); + + it("Should return prevLimit + 1 (restored one item), if exactly one frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + frameDurationInSec); + expect(result).to.equal(prevLimit + 1); + }); + + it("Should return prevLimit + restored value, if multiple full frames passed, restored value does not exceed maxLimit", async () => { + const prevTimestamp = 1000; + const maxLimit = 20; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 40); + expect(result).to.equal(prevLimit + 4); + }); + + it("Should return maxLimit, if restored limit exceeds max", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 90; // remaining limit from prev usage + const itemsPerFrame = 3; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 100); // 10 frames * 3 = 30 + expect(result).to.equal(maxLimit); + }); + + it("Should return prevLimit, if itemsPerFrame = 0", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 7; // remaining limit from prev usage + const itemsPerFrame = 0; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 100); + expect(result).to.equal(7); + }); + + it("non-multiple frame passed (should truncate fractional frame)", async () => { + const prevTimestamp = 1000; + const maxLimit = 20; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 25); + expect(result).to.equal(7); // 5 + 2 + }); + }); + + context("updatePrevLimit", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should revert with LimitExceeded, if newLimit exceeded maxLimit", async () => { + const prevTimestamp = 1000; + + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + await expect(rateLimit.updatePrevLimit(11, prevTimestamp + 10)).to.be.revertedWithCustomError( + rateLimit, + "LimitExceeded", + ); + }); + + it("should increase prevTimestamp on frame duration if one frame passed", async () => { + const prevTimestamp = 1000; + + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(4, prevTimestamp + 10); + expect(updated.prevLimit).to.equal(4); + expect(updated.prevTimestamp).to.equal(prevTimestamp + 10); + }); + + it("should not change prevTimestamp, as less than frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(3, prevTimestamp + 9); + expect(updated.prevLimit).to.equal(3); + expect(updated.prevTimestamp).to.equal(prevTimestamp); + }); + + it("should increase prevTimestamp on multiple frames value, if multiple frames passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 90; // remaining limit from prev usage + const itemsPerFrame = 5; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(85, prevTimestamp + 45); + expect(updated.prevLimit).to.equal(85); + expect(updated.prevTimestamp).to.equal(prevTimestamp + 40); + }); + + it("should not change prevTimestamp, if no time passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 50; + const prevLimit = 25; // remaining limit from prev usage + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(20, prevTimestamp); + expect(updated.prevLimit).to.equal(20); + expect(updated.prevTimestamp).to.equal(prevTimestamp); + }); + }); + + context("setLimits", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should initialize limits", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + const result = await rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(maxLimit); + expect(result.itemsPerFrame).to.equal(itemsPerFrame); + expect(result.frameDurationInSec).to.equal(frameDurationInSec); + expect(result.prevLimit).to.equal(maxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should set prevLimit to new maxLimit, if new maxLimit is lower than prevLimit", async () => { + const timestamp = 900; + const oldMaxLimit = 100; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 50; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(newMaxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should not update prevLimit, if new maxLimit is higher", async () => { + const timestamp = 900; + const oldMaxLimit = 100; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 150; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(prevLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should reset prevLimit if old max was zero", async () => { + const timestamp = 900; + const oldMaxLimit = 0; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 150; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(newMaxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should revert if maxLimit is too large", async () => { + const timestamp = 1000; + const maxLimit = 2n ** 32n; // exceeds uint32 max + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeMaxLimit"); + }); + + it("should revert if itemsPerFrame bigger than maxLimit", async () => { + const timestamp = 1000; + const maxLimit = 10; + const itemsPerFrame = 15; + const frameDurationInSec = 10; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeItemsPerFrame"); + }); + + it("should revert if frameDurationInSec is too large", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 2n ** 32n; // exceeds uint32 max + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeFrameDuration"); + }); + + it("should revert if frameDurationInSec is zero", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 0; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "ZeroFrameDuration"); + }); + }); + + context("isLimitSet", () => { + it("returns false when maxLimit is 0", async () => { + await rateLimit.harness_setState(0, 10, 1, 10, 1000); + const result = await rateLimit.isLimitSet(); + expect(result).to.be.false; + }); + + it("returns true when maxLimit is non-zero", async () => { + await rateLimit.harness_setState(100, 50, 1, 10, 1000); + const result = await rateLimit.isLimitSet(); + expect(result).to.be.true; + }); + }); + }); +}); diff --git a/test/common/minFirstAllocationStrategy.t.sol b/test/common/minFirstAllocationStrategy.t.sol index 8e46dc729c..05d6044f17 100644 --- a/test/common/minFirstAllocationStrategy.t.sol +++ b/test/common/minFirstAllocationStrategy.t.sol @@ -15,12 +15,18 @@ contract MinFirstAllocationStrategyInvariants is Test { uint256 private constant MAX_CAPACITY_VALUE = 8192; uint256 private constant MAX_ALLOCATION_SIZE = 1024; - MinFirstAllocationStrategyBase internal handler; + MinFirstAllocationStrategyAllocateHandler internal handler; MinFirstAllocationStrategy__Harness internal harness; function setUp() external { handler = new MinFirstAllocationStrategyAllocateHandler(); harness = new MinFirstAllocationStrategy__Harness(); + + targetContract(address(handler)); + + bytes4[] memory selectors = new bytes4[](1); + selectors[0] = MinFirstAllocationStrategyAllocateHandler.allocate.selector; + targetSelector(FuzzSelector({addr: address(handler), selectors: selectors})); } function test_allocateToBestCandidate_ReturnsZeroWhenAllocationSizeIsZero() public view { diff --git a/test/deploy/accountingOracle.ts b/test/deploy/accountingOracle.ts index 926c7f5b27..56a68522a9 100644 --- a/test/deploy/accountingOracle.ts +++ b/test/deploy/accountingOracle.ts @@ -17,6 +17,10 @@ import { import { deployHashConsensus } from "./hashConsensus"; import { deployLidoLocator, updateLidoLocatorImplementation } from "./locator"; +import { + MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, +} from "./validatorExitBusOracle"; export const ORACLE_LAST_COMPLETED_EPOCH = 2n * EPOCHS_PER_FRAME; export const ORACLE_LAST_REPORT_SLOT = ORACLE_LAST_COMPLETED_EPOCH * SLOTS_PER_EPOCH; @@ -24,8 +28,18 @@ export const ORACLE_LAST_REPORT_SLOT = ORACLE_LAST_COMPLETED_EPOCH * SLOTS_PER_E async function deployMockAccountingAndStakingRouter() { const stakingRouter = await ethers.deployContract("StakingRouter__MockForAccountingOracle"); const withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForAccountingOracle"); + const lido = await ethers.deployContract("Lido__MockForAccounting"); const accounting = await ethers.deployContract("Accounting__MockForAccountingOracle"); - return { accounting, stakingRouter, withdrawalQueue }; + + // Initialize Lido mock with reasonable defaults for balance-based accounting + await lido.mock__setClValidatorsBalance(300n * 10n ** 18n); // 300 ETH active + await lido.mock__setClPendingBalance(20n * 10n ** 18n); // 20 ETH pending + await lido.mock__setDepositedValidators(10); + // Router mock stores validators balance only; pending balance is seeded on the Lido mock. + await stakingRouter.mock__registerStakingModule(1); + await stakingRouter.reportValidatorBalancesByStakingModule([1], [300n * 10n ** 9n]); + + return { accounting, stakingRouter, withdrawalQueue, lido }; } async function deployMockLazyOracle() { @@ -46,7 +60,7 @@ export async function deployAccountingOracleSetup( ) { const locator = await deployLidoLocator(); const locatorAddr = await locator.getAddress(); - const { accounting, stakingRouter, withdrawalQueue } = await getLidoAndStakingRouter(); + const { accounting, stakingRouter, withdrawalQueue, lido } = await getLidoAndStakingRouter(); const oracle = await ethers.deployContract("AccountingOracle__Harness", [ lidoLocatorAddr || locatorAddr, @@ -71,13 +85,13 @@ export async function deployAccountingOracleSetup( withdrawalQueue: await withdrawalQueue.getAddress(), accountingOracle: accountingOracleAddress, accounting: accountingAddress, + lido: await lido.getAddress(), }); const lazyOracle = await deployMockLazyOracle(); const oracleReportSanityChecker = await deployOracleReportSanityCheckerForAccounting( locatorAddr, - accountingOracleAddress, accountingAddress, admin, ); @@ -94,6 +108,7 @@ export async function deployAccountingOracleSetup( accounting, stakingRouter, withdrawalQueue, + lido, locatorAddr, oracle, consensus, @@ -135,28 +150,27 @@ export async function initAccountingOracle({ return initTx; } -async function deployOracleReportSanityCheckerForAccounting( - lidoLocator: string, - accountingOracle: string, - accounting: string, - admin: string, -) { - const exitedValidatorsPerDayLimit = 55; - const appearedValidatorsPerDayLimit = 100; +async function deployOracleReportSanityCheckerForAccounting(lidoLocator: string, accounting: string, admin: string) { + const exitedEthAmountPerDayLimit = 65_535n; + const appearedEthAmountPerDayLimit = 65_535n; return await ethers.getContractFactory("OracleReportSanityChecker").then((f) => - f.deploy(lidoLocator, accountingOracle, accounting, admin, { - exitedValidatorsPerDayLimit, - appearedValidatorsPerDayLimit, + f.deploy(lidoLocator, accounting, admin, { + exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit, annualBalanceIncreaseBPLimit: 0n, simulatedShareRateDeviationBPLimit: 0n, - maxValidatorExitRequestsPerReport: 32n * 12n, + maxBalanceExitRequestedPerReportInEth: 65_535n, // Max uint16 (65,535 ETH) + maxEffectiveBalanceWeightWCType01: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + maxEffectiveBalanceWeightWCType02: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 0n, - maxPositiveTokenRebase: 0n, - initialSlashingAmountPWei: 0n, - inactivityPenaltiesAmountPWei: 0n, + maxPositiveTokenRebase: 1n, + maxCLBalanceDecreaseBP: 360n, clBalanceOraclesErrorUpperBPLimit: 0n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, + externalPendingBalanceCapEth: 0n, }), ); } diff --git a/test/deploy/dao.ts b/test/deploy/dao.ts index 70e18dc012..c5cfebfe8c 100644 --- a/test/deploy/dao.ts +++ b/test/deploy/dao.ts @@ -62,7 +62,9 @@ export async function addAragonApp({ dao, name, impl, rootAccount }: CreateAddAp export async function deployLidoDao({ rootAccount, initialized, locatorConfig = {} }: DeployLidoDaoArgs) { const { dao, acl } = await createAragonDao(rootAccount); - const impl = await ethers.deployContract("Lido", rootAccount); + const impl = await ethers.deployContract("Lido", { + signer: rootAccount, + }); const lidoProxyAddress = await addAragonApp({ dao, @@ -85,7 +87,9 @@ export async function deployLidoDao({ rootAccount, initialized, locatorConfig = export async function deployLidoDaoForNor({ rootAccount, initialized, locatorConfig = {} }: DeployLidoDaoArgs) { const { dao, acl } = await createAragonDao(rootAccount); - const impl = await ethers.deployContract("Lido__HarnessForDistributeReward", rootAccount); + const impl = await ethers.deployContract("Lido__HarnessForDistributeReward", { + signer: rootAccount, + }); const lidoProxyAddress = await addAragonApp({ dao, diff --git a/test/deploy/index.ts b/test/deploy/index.ts index 5b35dfceb8..85ab73b69e 100644 --- a/test/deploy/index.ts +++ b/test/deploy/index.ts @@ -6,3 +6,4 @@ export * from "./hashConsensus"; export * from "./withdrawalQueue"; export * from "./validatorExitBusOracle"; export * from "./vaults"; +export * from "./stakingRouter"; diff --git a/test/deploy/locator.ts b/test/deploy/locator.ts index d5fcadd5ee..9b152fb8f9 100644 --- a/test/deploy/locator.ts +++ b/test/deploy/locator.ts @@ -29,6 +29,7 @@ async function deployDummyLocator(config?: Partial, de oracleDaemonConfig: certainAddress("dummy-locator:oracleDaemonConfig"), validatorExitDelayVerifier: certainAddress("dummy-locator:validatorExitDelayVerifier"), triggerableWithdrawalsGateway: certainAddress("dummy-locator:triggerableWithdrawalsGateway"), + consolidationGateway: certainAddress("dummy-locator:consolidationGateway"), accounting: certainAddress("dummy-locator:accounting"), predepositGuarantee: certainAddress("dummy-locator:predepositGuarantee"), wstETH: certainAddress("dummy-locator:wstETH"), @@ -36,6 +37,7 @@ async function deployDummyLocator(config?: Partial, de vaultFactory: certainAddress("dummy-locator:vaultFactory"), operatorGrid: certainAddress("dummy-locator:operatorGrid"), lazyOracle: certainAddress("dummy-locator:lazyOracle"), + topUpGateway: certainAddress("dummy-locator:topUpGateway"), ...config, }); @@ -111,6 +113,7 @@ async function getLocatorConfig(locatorAddress: string): Promise[]; const configPromises = addresses.map((name) => locator[name]()); diff --git a/test/deploy/stakingRouter.ts b/test/deploy/stakingRouter.ts new file mode 100644 index 0000000000..0f241b2754 --- /dev/null +++ b/test/deploy/stakingRouter.ts @@ -0,0 +1,80 @@ +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + BeaconChainDepositor, + DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, + StakingRouter__Harness, +} from "typechain-types"; + +import { MAX_EFFECTIVE_BALANCE_WC_TYPE_01, MAX_EFFECTIVE_BALANCE_WC_TYPE_02, proxify } from "lib"; + +import { deployLidoLocator } from "test/deploy"; + +export interface DeployStakingRouterSigners { + deployer: HardhatEthersSigner; + admin: HardhatEthersSigner; + user?: HardhatEthersSigner; +} + +export interface DeployStakingRouterParams { + depositContract?: DepositContract__MockForBeaconChainDepositor; + lido?: Lido__MockForStakingRouter; + lidoLocator?: LidoLocator; + maxEBType1?: bigint; + maxEBType2?: bigint; +} + +export async function deployStakingRouter( + { deployer, admin, user }: DeployStakingRouterSigners, + { + depositContract, + lido, + lidoLocator, + maxEBType1 = MAX_EFFECTIVE_BALANCE_WC_TYPE_01, + maxEBType2 = MAX_EFFECTIVE_BALANCE_WC_TYPE_02, + }: DeployStakingRouterParams = {}, +): Promise<{ + depositContract: DepositContract__MockForBeaconChainDepositor; + stakingRouter: StakingRouter__Harness; + impl: StakingRouter__Harness; + beaconChainDepositor: BeaconChainDepositor; +}> { + if (!depositContract) { + depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor"); + } + + if (!lido) { + lido = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + } + + if (!lidoLocator) { + lidoLocator = await deployLidoLocator({ lido }); + } + + const beaconChainDepositor = await ethers.deployContract("BeaconChainDepositor", deployer); + const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); + const srLib = await ethers.deployContract("SRLib", { + signer: deployer, + libraries: { + ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), + }, + }); + const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { + signer: deployer, + libraries: { + ["contracts/0.8.25/lib/BeaconChainDepositor.sol:BeaconChainDepositor"]: await beaconChainDepositor.getAddress(), + ["contracts/0.8.25/sr/SRLib.sol:SRLib"]: await srLib.getAddress(), + }, + }); + + const impl = await stakingRouterFactory + .connect(deployer) + .deploy(depositContract, lido, lidoLocator, maxEBType1, maxEBType2); + const [stakingRouter] = await proxify({ impl, admin, caller: user }); + + return { stakingRouter, depositContract, impl, beaconChainDepositor }; +} diff --git a/test/deploy/validatorExitBusOracle.ts b/test/deploy/validatorExitBusOracle.ts index dda2368bbb..fe35316c8a 100644 --- a/test/deploy/validatorExitBusOracle.ts +++ b/test/deploy/validatorExitBusOracle.ts @@ -1,7 +1,12 @@ import { expect } from "chai"; import { ethers } from "hardhat"; -import { HashConsensus__Harness, ReportProcessor__Mock, ValidatorsExitBusOracle } from "typechain-types"; +import { + HashConsensus__Harness, + ReportProcessor__Mock, + StakingModule__MockForKeyVerification, + ValidatorsExitBusOracle, +} from "typechain-types"; import { EPOCHS_PER_FRAME, @@ -16,6 +21,11 @@ import { deployHashConsensus } from "./hashConsensus"; import { deployLidoLocator, updateLidoLocatorImplementation } from "./locator"; export const DATA_FORMAT_LIST = 1; +export const DATA_FORMAT_LIST_WITH_KEY_INDEX = 2; + +// MaxEB weights (in ETH) +export const MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01 = 32n; // 32 ETH for WC 0x01 validators +export const MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02 = 2048n; // 2048 ETH for 0x02 validators async function deployMockAccountingOracle(secondsPerSlot = SECONDS_PER_SLOT, genesisTime = GENESIS_TIME) { const lido = await ethers.deployContract("Accounting__MockForAccountingOracle"); @@ -27,26 +37,25 @@ async function deployMockAccountingOracle(secondsPerSlot = SECONDS_PER_SLOT, gen return { ao, lido }; } -async function deployOracleReportSanityCheckerForExitBus( - lidoLocator: string, - accountingOracle: string, - accounting: string, - admin: string, -) { +async function deployOracleReportSanityCheckerForExitBus(lidoLocator: string, accounting: string, admin: string) { return await ethers.getContractFactory("OracleReportSanityChecker").then((f) => - f.deploy(lidoLocator, accountingOracle, accounting, admin, { - exitedValidatorsPerDayLimit: 0n, - appearedValidatorsPerDayLimit: 0n, + f.deploy(lidoLocator, accounting, admin, { + exitedEthAmountPerDayLimit: 0n, + appearedEthAmountPerDayLimit: 0n, annualBalanceIncreaseBPLimit: 0n, simulatedShareRateDeviationBPLimit: 0n, - maxValidatorExitRequestsPerReport: 2000, + maxBalanceExitRequestedPerReportInEth: 65_535n, // Max uint16 (65,535 ETH) + maxEffectiveBalanceWeightWCType01: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + maxEffectiveBalanceWeightWCType02: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, maxItemsPerExtraDataTransaction: 0n, maxNodeOperatorsPerExtraDataItem: 0n, requestTimestampMargin: 0n, - maxPositiveTokenRebase: 0n, - initialSlashingAmountPWei: 0n, - inactivityPenaltiesAmountPWei: 0n, + maxPositiveTokenRebase: 1n, + maxCLBalanceDecreaseBP: 360n, clBalanceOraclesErrorUpperBPLimit: 0n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, + externalPendingBalanceCapEth: 0n, }), ); } @@ -68,6 +77,48 @@ export async function deployVEBO( const locator = await deployLidoLocator(); const locatorAddr = await locator.getAddress(); + // Deploy mock StakingRouter with default module configurations + const stakingRouter = await ethers.deployContract("StakingRouter__MockForValidatorsExitBus"); + const stakingRouterAddr = await stakingRouter.getAddress(); + + // Configure default modules: + // Module 1: Legacy (0x01) - 32 ETH validators + await stakingRouter.setStakingModuleWithdrawalCredentialsType(1, 0x01); + // Modules 2, 3, 4, 5, 7: MaxEB (0x02) - 2048 ETH validators + await stakingRouter.setStakingModuleWithdrawalCredentialsType(2, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(3, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(4, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(5, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(7, 0x02); + // Modules 100, 101: Used in tests - configure as Legacy (0x01) + await stakingRouter.setStakingModuleWithdrawalCredentialsType(100, 0x01); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(101, 0x01); + + // Deploy universal mock modules for key verification (Format 2 testing) + // These mocks return requested keys and work for both legacy and new interfaces + const mockModule1 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule2 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule3 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule4 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule5 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule7 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + + await stakingRouter.setStakingModuleAddress(1, await mockModule1.getAddress()); + await stakingRouter.setStakingModuleAddress(2, await mockModule2.getAddress()); + await stakingRouter.setStakingModuleAddress(3, await mockModule3.getAddress()); + await stakingRouter.setStakingModuleAddress(4, await mockModule4.getAddress()); + await stakingRouter.setStakingModuleAddress(5, await mockModule5.getAddress()); + await stakingRouter.setStakingModuleAddress(7, await mockModule7.getAddress()); + + await updateLidoLocatorImplementation(locatorAddr, { + stakingRouter: stakingRouterAddr, + }); + + // Deploy mock NodeOperatorsRegistry + // In permissive mode (default), it returns empty keys which causes ValidatorsExitBus + // to skip validation. Tests can explicitly configure keys if needed. + const nodeOperatorsRegistry = await ethers.deployContract("NodeOperatorsRegistry__Mock"); + const oracle = await ethers.deployContract("ValidatorsExitBus__Harness", [secondsPerSlot, genesisTime, locatorAddr]); const { consensus } = await deployHashConsensus(admin, { @@ -91,7 +142,6 @@ export async function deployVEBO( const oracleReportSanityChecker = await deployOracleReportSanityCheckerForExitBus( locatorAddr, - accountingOracleAddress, accountingAddress, admin, ); @@ -111,9 +161,61 @@ export async function deployVEBO( consensus, oracleReportSanityChecker, triggerableWithdrawalsGateway, + nodeOperatorsRegistry, + stakingRouter, + mockModules: { + module1: mockModule1, + module2: mockModule2, + module3: mockModule3, + module4: mockModule4, + module5: mockModule5, + module7: mockModule7, + }, }; } +// Derive the same 48-byte pubkey as StakingModule__MockForKeyVerification fallback: +// pubkey = keccak(nodeOpId, keyIndex) || first 16 bytes of keccak(nodeOpId, keyIndex, 1) +export function makeMockPubkey(nodeOpId: number | bigint, keyIndex: number | bigint): string { + const hash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["uint256", "uint256"], [nodeOpId, keyIndex]), + ); + const hash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["uint256", "uint256", "uint256"], [nodeOpId, keyIndex, 1]), + ); + return ("0x" + hash1.slice(2) + hash2.slice(2)).slice(0, 2 + 96); +} + +// Seed StakingModule__MockForKeyVerification instances with signing keys matching requests +export async function seedMockModuleSigningKeys( + mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }, + requests: { moduleId: number; nodeOpId: number; keyIndex?: number; valIndex: number; valPubkey?: string }[], +) { + const modMap: Record = { + 1: mockModules.module1, + 2: mockModules.module2, + 3: mockModules.module3, + 4: mockModules.module4, + 5: mockModules.module5, + 7: mockModules.module7, + }; + + for (const r of requests) { + const mod = modMap[r.moduleId]; + if (!mod || !mod.setSigningKey) continue; + const keyIdx = r.keyIndex ?? r.valIndex; + const pubkey = r.valPubkey ?? makeMockPubkey(r.nodeOpId, keyIdx); + await mod.setSigningKey(r.nodeOpId, keyIdx, pubkey); + } +} + interface VEBOConfig { admin: string; oracle: ValidatorsExitBusOracle; @@ -123,8 +225,8 @@ interface VEBOConfig { lastProcessingRefSlot?: number; resumeAfterDeploy?: boolean; maxRequestsPerBatch?: number; - maxExitRequestsLimit?: number; - exitsPerFrame?: number; + maxExitBalanceEth?: bigint; + balancePerFrameEth?: bigint; frameDurationInSec?: number; } @@ -137,8 +239,8 @@ export async function initVEBO({ lastProcessingRefSlot = 0, resumeAfterDeploy = false, maxRequestsPerBatch = 600, - maxExitRequestsLimit = 13000, - exitsPerFrame = 1, + maxExitBalanceEth = 13_000n, // 13,000 ETH + balancePerFrameEth = 32n, // 32 ETH (1 legacy validator per frame) frameDurationInSec = 48, }: VEBOConfig) { const initTx = await oracle.initialize( @@ -147,8 +249,8 @@ export async function initVEBO({ consensusVersion, lastProcessingRefSlot, maxRequestsPerBatch, - maxExitRequestsLimit, - exitsPerFrame, + maxExitBalanceEth, + balancePerFrameEth, frameDurationInSec, ); diff --git a/test/integration/consolidation/consolidation-gas.integration.ts b/test/integration/consolidation/consolidation-gas.integration.ts new file mode 100644 index 0000000000..c6f1d9146e --- /dev/null +++ b/test/integration/consolidation/consolidation-gas.integration.ts @@ -0,0 +1,290 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway, ConsolidationMigrator, NodeOperatorsRegistry } from "typechain-types"; + +import { addressToWC, certainAddress } from "lib"; +import { LocalMerkleTree, prepareLocalMerkleTree } from "lib/pdg"; +import { getProtocolContext, ProtocolContext } from "lib/protocol"; +import { + depositAndReportValidators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, + report, +} from "lib/protocol/helpers"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; +import { LoadedContract } from "lib/protocol/types"; + +import { Snapshot } from "test/suite"; + +/** + * Gas measurement integration test for consolidation (full stack, no mocks). + * Uses: ConsolidationMigrator → ConsolidationBus → ConsolidationGateway → WithdrawalVault + * + * Results for batch of 5 x 63 requests: + * ┌──────────────────────────┬─────────────┐ + * │ Operation │ Gas │ + * ├──────────────────────────┼─────────────┤ + * │ submitConsolidationBatch │ 7,941,893 │ + * │ executeConsolidation │ 6,463,147 │ + * │ Total │ 14,405,040 │ + * │ Per request │ 45,730 │ + * └──────────────────────────┴─────────────┘ + */ +describe("Integration: Consolidation gas measurement (full stack via Migrator)", () => { + let ctx: ProtocolContext; + let nor: LoadedContract; + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway; + let consolidationMigrator: ConsolidationMigrator; + + let submitter: HardhatEthersSigner; + let executor: HardhatEthersSigner; + + const MAX_BLOCK_GAS = 16_000_000n; + const NUM_GROUPS = 5; + const REQUESTS_PER_GROUP = 63; + const TOTAL_REQUESTS = NUM_GROUPS * REQUESTS_PER_GROUP; // 315 + const TOTAL_SOURCE_KEYS = BigInt(TOTAL_REQUESTS); // 315 + const TOTAL_TARGET_KEYS = BigInt(NUM_GROUPS); // 5 + + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + + let sourceOperatorId: bigint; + let targetOperatorId: bigint; + + // Source pubkeys grouped: 5 groups × 63 pubkeys + let sourcePubkeysGroups: string[][]; + // Target pubkeys: 5 + let targetPubkeys: string[]; + + // Key index groups for submitConsolidationBatch + let consolidationIndexGroups: { sourceKeyIndices: bigint[]; targetKeyIndex: bigint }[]; + + let originalState: string; + + before(async function () { + ctx = await getProtocolContext(); + + originalState = await Snapshot.take(); + + // ToDo: adapt tests for non-scratch contexts (forking/upgrade). + // This suite assumes both source and target modules resolve to NOR (module 1), + // which is only true on scratch deploys. In forking/upgrade mode the migrator's + // targetModuleId points at CMv2, so the NOR-based fixtures here would mismatch. + if (!ctx.isScratch) { + // Post-migration alignment: report() with no explicit per-module balances + // produces a self-consistent first report. + await report(ctx); + + this.skip(); + } + + [, submitter, executor] = await ethers.getSigners(); + + nor = ctx.contracts.nor; + consolidationBus = ctx.contracts.consolidationBus; + consolidationGateway = ctx.contracts.consolidationGateway; + consolidationMigrator = ctx.contracts.consolidationMigrator; + + const agentSigner = await ctx.getSigner("agent"); + + // ========================================= + // Deposit all existing depositable validators first to clear them + // ========================================= + const { stakingRouter } = ctx.contracts; + const existingDepositable = await stakingRouter.getStakingModuleMaxDepositsCount( + NOR_MODULE_ID, + await ctx.contracts.lido.getDepositableEther(), + ); + if (existingDepositable > 0n) { + const DEPOSIT_BATCH = 50n; + for (let deposited = 0n; deposited < existingDepositable; deposited += DEPOSIT_BATCH) { + const batch = deposited + DEPOSIT_BATCH > existingDepositable ? existingDepositable - deposited : DEPOSIT_BATCH; + await depositAndReportValidators(ctx, NOR_MODULE_ID, batch); + } + } + + // ========================================= + // Setup source operator with deposited keys + // ========================================= + sourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "gas_test_source_operator", + rewardAddress: certainAddress("gas:source:reward"), + }); + + // Add keys in batches to avoid exceeding block gas limit + const KEYS_BATCH = 100n; + for (let added = 0n; added < TOTAL_SOURCE_KEYS; added += KEYS_BATCH) { + const batch = added + KEYS_BATCH > TOTAL_SOURCE_KEYS ? TOTAL_SOURCE_KEYS - added : KEYS_BATCH; + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: sourceOperatorId, + keysToAdd: batch, + }); + } + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: sourceOperatorId, + limit: TOTAL_SOURCE_KEYS, + }); + + // Deposit source keys in batches + const DEPOSIT_BATCH = 50n; + for (let deposited = 0n; deposited < TOTAL_SOURCE_KEYS; deposited += DEPOSIT_BATCH) { + const batch = deposited + DEPOSIT_BATCH > TOTAL_SOURCE_KEYS ? TOTAL_SOURCE_KEYS - deposited : DEPOSIT_BATCH; + await depositAndReportValidators(ctx, NOR_MODULE_ID, batch); + } + + // ========================================= + // Setup target operator with deposited keys + // ========================================= + targetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "gas_test_target_operator", + rewardAddress: certainAddress("gas:target:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId, + keysToAdd: TOTAL_TARGET_KEYS, + }); + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId, + limit: TOTAL_TARGET_KEYS, + }); + + await depositAndReportValidators(ctx, NOR_MODULE_ID, TOTAL_TARGET_KEYS); + + // ========================================= + // Retrieve pubkeys from NOR + // ========================================= + sourcePubkeysGroups = []; + consolidationIndexGroups = []; + for (let g = 0; g < NUM_GROUPS; g++) { + const group: string[] = []; + const indices: bigint[] = []; + for (let r = 0; r < REQUESTS_PER_GROUP; r++) { + const keyIndex = g * REQUESTS_PER_GROUP + r; + const key = await nor.getSigningKey(sourceOperatorId, keyIndex); + expect(key.used).to.be.true; + group.push(key.key); + indices.push(BigInt(keyIndex)); + } + sourcePubkeysGroups.push(group); + consolidationIndexGroups.push({ sourceKeyIndices: indices, targetKeyIndex: BigInt(g) }); + } + + targetPubkeys = []; + for (let t = 0; t < NUM_GROUPS; t++) { + const key = await nor.getSigningKey(targetOperatorId, t); + expect(key.used).to.be.true; + targetPubkeys.push(key.key); + } + + // ========================================= + // Setup roles and limits + // ========================================= + + // Allow pair in ConsolidationMigrator + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(agentSigner).grantRole(ALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).grantRole(DISALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).allowPair(sourceOperatorId, targetOperatorId, submitter.address); + + // Increase ConsolidationBus batch size to accommodate 315 requests in 5 groups + const MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + await consolidationBus.connect(agentSigner).grantRole(MANAGE_ROLE, agentSigner.address); + await consolidationBus.connect(agentSigner).setBatchSize(TOTAL_REQUESTS); + + // Set rate limit high enough for all requests + const EXIT_LIMIT_MANAGER_ROLE = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await ( + await consolidationGateway.connect(agentSigner).grantRole(EXIT_LIMIT_MANAGER_ROLE, agentSigner.address) + ).wait(); + await ( + await consolidationGateway.connect(agentSigner).setConsolidationRequestLimit(TOTAL_REQUESTS, TOTAL_REQUESTS, 1) + ).wait(); + + // Advance time by 1 second so the rate limit replenishes to maxLimit + await ethers.provider.send("evm_increaseTime", [1]); + await ethers.provider.send("evm_mine", []); + }); + + after(async () => await Snapshot.restore(originalState)); + + it(`should execute batch of ${NUM_GROUPS} x ${REQUESTS_PER_GROUP} (${TOTAL_REQUESTS}) requests within gas limit`, async () => { + // Build merkle tree witnesses for target pubkeys + const merkleTree: LocalMerkleTree = await prepareLocalMerkleTree(); + + const validatorIndices: number[] = []; + const withdrawalCredentials = addressToWC(await ctx.contracts.withdrawalVault.getAddress(), 2); + for (const pubkey of targetPubkeys) { + const { validatorIndex } = await merkleTree.addValidator({ + pubkey, + withdrawalCredentials, + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + validatorIndices.push(validatorIndex); + } + + const { childBlockTimestamp, beaconBlockHeader } = await merkleTree.commitChangesToBeaconRoot(); + + const targetWitnesses = await Promise.all( + targetPubkeys.map(async (pubkey, i) => ({ + proof: await merkleTree.buildProof(validatorIndices[i], beaconBlockHeader), + pubkey, + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + })), + ); + + // Submit batch via ConsolidationMigrator → ConsolidationBus + const submitTx = await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, consolidationIndexGroups); + const submitReceipt = await submitTx.wait(); + + // Get fee from real WithdrawalVault + const { withdrawalVault } = ctx.contracts; + const fee = await withdrawalVault.getConsolidationRequestFee(); + const totalFee = fee * BigInt(TOTAL_REQUESTS); + + // Execute batch through full stack - build ConsolidationWitnessGroup array + const consolidationWitnessGroups = sourcePubkeysGroups.map((sourcePubkeys, i) => ({ + sourcePubkeys, + targetWitness: targetWitnesses[i], + })); + + const executeTx = await consolidationBus.connect(executor).executeConsolidation(consolidationWitnessGroups, { + value: totalFee, + }); + const executeReceipt = await executeTx.wait(); + + // Gas assertions + expect(submitReceipt!.gasUsed).to.be.lessThan(MAX_BLOCK_GAS); + expect(executeReceipt!.gasUsed).to.be.lessThan(MAX_BLOCK_GAS); + + // Log gas usage + const submitGas = submitReceipt!.gasUsed; + const execGas = executeReceipt!.gasUsed; + const totalGas = submitGas + execGas; + const perRequest = totalGas / BigInt(TOTAL_REQUESTS); + + console.log(`\n Gas usage for ${NUM_GROUPS} x ${REQUESTS_PER_GROUP} (${TOTAL_REQUESTS}) requests:`); + console.log(` submitConsolidationBatch: ${Number(submitGas).toLocaleString()}`); + console.log(` executeConsolidation: ${Number(execGas).toLocaleString()}`); + console.log(` Total: ${Number(totalGas).toLocaleString()}`); + console.log(` Per request: ${Number(perRequest).toLocaleString()}`); + }); +}); diff --git a/test/integration/consolidation/consolidation-migration.integration.ts b/test/integration/consolidation/consolidation-migration.integration.ts new file mode 100644 index 0000000000..9fde9dc809 --- /dev/null +++ b/test/integration/consolidation/consolidation-migration.integration.ts @@ -0,0 +1,945 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway, ConsolidationMigrator, NodeOperatorsRegistry } from "typechain-types"; + +import { addressToWC, certainAddress, findEventsWithInterfaces } from "lib"; +import { LocalMerkleTree, prepareLocalMerkleTree } from "lib/pdg"; +import { getProtocolContext, ProtocolContext } from "lib/protocol"; +import { + depositAndReportValidators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, + report, +} from "lib/protocol/helpers"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; +import { LoadedContract } from "lib/protocol/types"; + +import { Snapshot } from "test/suite"; + +const fakeWitnessForTarget = (pubkey: string) => ({ + proof: [], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +/** + * Integration test for the full consolidation migration flow using real NOR modules. + * + * The flow tested: + * 1. ConsolidationMigrator validates source/target keys and submits to ConsolidationBus + * 2. ConsolidationBus stores the batch for later execution + * 3. Executor calls executeConsolidation on ConsolidationBus + * 4. ConsolidationBus forwards to ConsolidationGateway + * 5. ConsolidationGateway forwards to WithdrawalVault + * 6. WithdrawalVault processes EIP-7251 consolidation requests + */ +describe("Integration: Consolidation Migration Flow (Real NOR)", () => { + let ctx: ProtocolContext; + let nor: LoadedContract; + let consolidationGateway: ConsolidationGateway; + let consolidationBus: ConsolidationBus; + let consolidationMigrator: ConsolidationMigrator; + + let executor: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + // Operator IDs will be assigned during setup + let sourceOperatorId: bigint; + let targetOperatorId: bigint; + + // Pubkeys will be retrieved from real NOR + let SOURCE_PUBKEY_1: string; + let SOURCE_PUBKEY_2: string; + let TARGET_PUBKEY_1: string; + let TARGET_PUBKEY_2: string; + + let merkleTree: LocalMerkleTree; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let targetWitness1: any; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let targetWitness2: any; + + let globalSnapshot: string; + let testSnapshot: string; + + before(async function () { + ctx = await getProtocolContext(); + + globalSnapshot = await Snapshot.take(); + + // ToDo: adapt tests for non-scratch contexts (forking/upgrade). + // This suite assumes both source and target modules resolve to NOR (module 1), + // which is only true on scratch deploys. In forking/upgrade mode the migrator's + // targetModuleId points at CMv2, so the NOR-based fixtures here would mismatch. + if (!ctx.isScratch) { + // Post-migration alignment: report() with no explicit per-module balances + // produces a self-consistent first report. + await report(ctx); + + this.skip(); + } + + [, executor, submitter, stranger] = await ethers.getSigners(); + + // Get real contracts from protocol context + nor = ctx.contracts.nor; + consolidationGateway = ctx.contracts.consolidationGateway; + consolidationBus = ctx.contracts.consolidationBus; + consolidationMigrator = ctx.contracts.consolidationMigrator; + + const agentSigner = await ctx.getSigner("agent"); + + // ========================================= + // Setup source operator with deposited keys + // ========================================= + + // Create source operator + sourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_source_operator", + rewardAddress: certainAddress("consolidation:source:reward"), + }); + + // Add signing keys to source operator + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: sourceOperatorId, + keysToAdd: 5n, + }); + + // Set staking limit to vet the keys + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: sourceOperatorId, + limit: 5n, + }); + + // Deposit validators to make keys "used" + await depositAndReportValidators(ctx, NOR_MODULE_ID, 2n); + + // ========================================= + // Setup target operator with deposited keys (active validators) + // Per EIP-7251, consolidation can only happen TO active validators + // ========================================= + + // Create target operator + targetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_target_operator", + rewardAddress: certainAddress("consolidation:target:reward"), + }); + + // Add signing keys to target operator + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId, + keysToAdd: 5n, + }); + + // Set staking limit to vet the keys + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId, + limit: 5n, + }); + + // Deposit validators to make target keys "used" (active validators) + await depositAndReportValidators(ctx, NOR_MODULE_ID, 2n); + + // ========================================= + // Retrieve pubkeys from real NOR + // ========================================= + + // Get source pubkeys (these are deposited/used) + const sourceKey1 = await nor.getSigningKey(sourceOperatorId, 0); + const sourceKey2 = await nor.getSigningKey(sourceOperatorId, 1); + SOURCE_PUBKEY_1 = sourceKey1.key; + SOURCE_PUBKEY_2 = sourceKey2.key; + + // Verify source keys are used (deposited) + expect(sourceKey1.used).to.be.true; + expect(sourceKey2.used).to.be.true; + + // Get target pubkeys (these are deposited - active validators) + const targetKey1 = await nor.getSigningKey(targetOperatorId, 0); + const targetKey2 = await nor.getSigningKey(targetOperatorId, 1); + TARGET_PUBKEY_1 = targetKey1.key; + TARGET_PUBKEY_2 = targetKey2.key; + + // Verify target keys ARE used (deposited - active validators) + expect(targetKey1.used).to.be.true; + expect(targetKey2.used).to.be.true; + + // ========================================= + // Setup CL proof merkle tree for target witnesses + // ========================================= + merkleTree = await prepareLocalMerkleTree(); + + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + const withdrawalVaultAddress = await ctx.contracts.withdrawalVault.getAddress(); + const withdrawalCredentials = addressToWC(withdrawalVaultAddress, 2); + const makeValidatorContainer = (pubkey: string) => ({ + pubkey, + withdrawalCredentials, + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + + const { validatorIndex: vi1 } = await merkleTree.addValidator(makeValidatorContainer(TARGET_PUBKEY_1)); + const { validatorIndex: vi2 } = await merkleTree.addValidator(makeValidatorContainer(TARGET_PUBKEY_2)); + const { childBlockTimestamp, beaconBlockHeader } = await merkleTree.commitChangesToBeaconRoot(); + + const buildWitness = async (pubkey: string, validatorIndex: number) => ({ + proof: await merkleTree.buildProof(validatorIndex, beaconBlockHeader), + pubkey, + validatorIndex, + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + }); + + targetWitness1 = await buildWitness(TARGET_PUBKEY_1, vi1); + targetWitness2 = await buildWitness(TARGET_PUBKEY_2, vi2); + + // ========================================= + // Setup roles + // ========================================= + + // Grant MANAGE_ROLE on ConsolidationBus to agent (for batch management tests) + const MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + const REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + await consolidationBus.connect(agentSigner).grantRole(MANAGE_ROLE, agentSigner.address); + await consolidationBus.connect(agentSigner).grantRole(REMOVE_ROLE, agentSigner.address); + + // Grant ALLOW_PAIR_ROLE and DISALLOW_PAIR_ROLE on ConsolidationMigrator to agent + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(agentSigner).grantRole(ALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).grantRole(DISALLOW_PAIR_ROLE, agentSigner.address); + + // Allow the consolidation pair with submitter + await consolidationMigrator.connect(agentSigner).allowPair(sourceOperatorId, targetOperatorId, submitter.address); + }); + + after(async () => await Snapshot.restore(globalSnapshot)); + + beforeEach(async () => { + testSnapshot = await Snapshot.take(); + }); + + afterEach(async () => await Snapshot.restore(testSnapshot)); + + context("Full consolidation flow with real NOR", () => { + it("Should successfully complete the full consolidation flow with single validator", async () => { + const { withdrawalVault } = ctx.contracts; + + // Single validator consolidation + const groups = [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, groups); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + const tx = await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + const receipt = await tx.wait(); + const consolidationEvents = findEventsWithInterfaces(receipt!, "ConsolidationRequestAdded", [ + withdrawalVault.interface, + ]); + expect(consolidationEvents?.length).to.equal(1); + }); + + it("Should successfully complete the full consolidation flow with multiple validators", async () => { + const { withdrawalVault } = ctx.contracts; + + // Step 1: Operator submits consolidation batch via ConsolidationMigrator + const groups = [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]; + + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, groups), + ) + .to.emit(consolidationMigrator, "ConsolidationSubmitted") + .withArgs( + sourceOperatorId, + targetOperatorId, + groups.map((g) => [g.sourceKeyIndices, g.targetKeyIndex]), + ); + + // Step 2: Verify batch is stored in ConsolidationBus + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [ + [ + { sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }, + { sourcePubkeys: [SOURCE_PUBKEY_2], targetPubkey: TARGET_PUBKEY_2 }, + ], + ], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal( + await consolidationMigrator.getAddress(), + ); + + // Step 3: Executor calls executeConsolidation + const fee = await withdrawalVault.getConsolidationRequestFee(); + const totalFee = fee * BigInt(groups.length); + + const initialLimit = (await consolidationGateway.getConsolidationRequestLimitFullInfo()) + .currentConsolidationRequestsLimit; + + const tx = await consolidationBus.connect(executor).executeConsolidation( + [ + { sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }, + { sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }, + ], + { + value: totalFee, + }, + ); + + // Step 4: Verify batch is removed from storage after execution + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + + // Step 5: Verify ConsolidationGateway rate limit was consumed + const finalLimit = (await consolidationGateway.getConsolidationRequestLimitFullInfo()) + .currentConsolidationRequestsLimit; + expect(finalLimit).to.equal(initialLimit - BigInt(groups.length)); + + // Step 6: Verify consolidation requests reached WithdrawalVault + const receipt = await tx.wait(); + expect(receipt).not.to.be.null; + + const consolidationEvents = findEventsWithInterfaces(receipt!, "ConsolidationRequestAdded", [ + withdrawalVault.interface, + ]); + expect(consolidationEvents?.length).to.equal(groups.length); + }); + + it("Should revert submitConsolidationBatch if caller is not the designated submitter", async () => { + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, sourceOperatorId, targetOperatorId); + }); + + it("Should revert submitConsolidationBatch if pair is not allowed (no submitter set)", async () => { + const unknownTargetOpId = 999n; + + // When pair is not allowed, there's no submitter set (address(0)) + // So caller will fail authorization check first + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, unknownTargetOpId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOperatorId, unknownTargetOpId); + }); + + it("Should revert executeConsolidation if batch not found", async () => { + const fakePubkey = "0x" + "ff".repeat(48); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [fakePubkey], targetWitness: fakeWitnessForTarget(fakePubkey) }], { + value: 1n, + }), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + + it("Should revert executeConsolidation if insufficient fee", async () => { + // Submit batch first + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Try to execute with insufficient fee (0) + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: 0n, + }, + ), + ).to.be.reverted; // The actual error comes from WithdrawalVault + }); + + it("Should revert executeConsolidation if batch already executed", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first time + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Try to execute again + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + }); + + context("Batch management", () => { + it("Should allow manager to remove a pending batch", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.not.equal(ethers.ZeroAddress); + + // Manager removes the batch + await consolidationBus.connect(agentSigner).removeBatches([batchHash]); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + }); + }); + + context("Allowlist management", () => { + it("Should allow disallowing a pair after submission", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Submit a batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Disallow the pair + await consolidationMigrator.connect(agentSigner).disallowPair(sourceOperatorId, targetOperatorId); + + // Verify new submissions are blocked (submitter is cleared, so NotAuthorized is thrown) + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOperatorId, targetOperatorId); + + // But existing batch can still be executed + const { withdrawalVault } = ctx.contracts; + const fee = await withdrawalVault.getConsolidationRequestFee(); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }), + ).to.not.be.reverted; + }); + + it("Should allow one source operator to consolidate to multiple targets", async () => { + const { withdrawalVault } = ctx.contracts; + const agentSigner = await ctx.getSigner("agent"); + + // Set up a second target operator with deposited validators + const targetOperatorId2 = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_target_operator_2", + rewardAddress: certainAddress("consolidation:target2:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId2, + keysToAdd: 2n, + }); + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId2, + limit: 2n, + }); + + // Deposit validators to make target2 keys active + await depositAndReportValidators(ctx, NOR_MODULE_ID, 1n); + + const targetKey3 = await nor.getSigningKey(targetOperatorId2, 0); + const TARGET_PUBKEY_3 = targetKey3.key; + + // Build valid CL proof witness for TARGET_PUBKEY_3 + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + const { validatorIndex: vi3 } = await merkleTree.addValidator({ + pubkey: TARGET_PUBKEY_3, + withdrawalCredentials: addressToWC(await ctx.contracts.withdrawalVault.getAddress(), 2), + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + const { childBlockTimestamp: cbt3, beaconBlockHeader: bbh3 } = await merkleTree.commitChangesToBeaconRoot(); + const targetWitness3 = { + proof: await merkleTree.buildProof(vi3, bbh3), + pubkey: TARGET_PUBKEY_3, + validatorIndex: vi3, + childBlockTimestamp: cbt3, + slot: bbh3.slot, + proposerIndex: bbh3.proposerIndex, + }; + + // Allow second pair with the same submitter + await consolidationMigrator + .connect(agentSigner) + .allowPair(sourceOperatorId, targetOperatorId2, submitter.address); + + // Submit batch to first target + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Submit batch to second target + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId2, [ + { sourceKeyIndices: [1n], targetKeyIndex: 0n }, + ]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute both batches + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness3 }], { + value: fee, + }); + }); + }); + + context("Key validation with real NOR", () => { + it("Should revert submitConsolidationBatch if source key is NOT used (not deposited)", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Create a new source operator with keys that are NOT deposited + const unusedSourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_unused_source", + rewardAddress: certainAddress("consolidation:unused:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: unusedSourceOperatorId, + keysToAdd: 2n, + }); + + // Set staking limit but DO NOT deposit - keys remain unused + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: unusedSourceOperatorId, + limit: 2n, + }); + + // Allow the pair + await consolidationMigrator + .connect(agentSigner) + .allowPair(unusedSourceOperatorId, targetOperatorId, submitter.address); + + // Try to consolidate from unused key - should fail + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(unusedSourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(NOR_MODULE_ID, unusedSourceOperatorId, 0n); + }); + + it("Should revert submitConsolidationBatch if target key is NOT deposited (not active validator)", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Create a new target operator with keys that are NOT deposited + const undepositedTargetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_undeposited_target", + rewardAddress: certainAddress("consolidation:undeposited:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: undepositedTargetOperatorId, + keysToAdd: 2n, + }); + + // Set staking limit but DO NOT deposit - keys remain undeposited (not active) + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: undepositedTargetOperatorId, + limit: 2n, + }); + + // Verify target keys are NOT used (not deposited) + const targetKey = await nor.getSigningKey(undepositedTargetOperatorId, 0); + expect(targetKey.used).to.be.false; + + // Allow the pair + await consolidationMigrator + .connect(agentSigner) + .allowPair(sourceOperatorId, undepositedTargetOperatorId, submitter.address); + + // Try to consolidate to undeposited target key - should fail + // Per EIP-7251, consolidation can only happen TO active (deposited) validators + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, undepositedTargetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(NOR_MODULE_ID, undepositedTargetOperatorId, 0n); + }); + }); + + context("ConsolidationGateway integration", () => { + it("Should revert executeConsolidation when ConsolidationGateway is paused", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch first + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Grant PAUSE_ROLE to agent and pause the gateway + const agentSigner = await ctx.getSigner("agent"); + const PAUSE_ROLE = await consolidationGateway.PAUSE_ROLE(); + await consolidationGateway.connect(agentSigner).grantRole(PAUSE_ROLE, agentSigner.address); + await consolidationGateway.connect(agentSigner).pauseFor(3600); // 1 hour + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Try to execute - should revert because gateway is paused + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("Should revert executeConsolidation when rate limit is exhausted", async () => { + const { withdrawalVault } = ctx.contracts; + + // Grant EXIT_LIMIT_MANAGER_ROLE to agent and set a small limit + const agentSigner = await ctx.getSigner("agent"); + const EXIT_LIMIT_MANAGER_ROLE = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.connect(agentSigner).grantRole(EXIT_LIMIT_MANAGER_ROLE, agentSigner.address); + await consolidationGateway.connect(agentSigner).setConsolidationRequestLimit(1, 1, 86400); + + // Submit first batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first batch - this should consume the limit + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Submit second batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [1n], targetKeyIndex: 1n }]); + + // Execute second batch - should fail due to rate limit + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }], { + value: fee, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded"); + }); + + it("Should refund excess ETH to executor", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + const excessFee = fee * 10n; // Send 10x the required fee + + const executorBalanceBefore = await ethers.provider.getBalance(executor.address); + + const tx = await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: excessFee, + }); + + const receipt = await tx.wait(); + const gasUsed = receipt!.gasUsed * receipt!.gasPrice; + + const executorBalanceAfter = await ethers.provider.getBalance(executor.address); + + // Executor should only pay fee + gas, not excessFee + // Balance after = Balance before - fee - gas + const expectedBalance = executorBalanceBefore - fee - gasUsed; + expect(executorBalanceAfter).to.equal(expectedBalance); + }); + }); + + context("Batch management extended", () => { + it("Should execute multiple batches sequentially", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit first batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Submit second batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [1n], targetKeyIndex: 1n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first batch + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Verify first batch is executed + const batchHash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash1)).publisher).to.equal(ethers.ZeroAddress); + + // Execute second batch + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }], { + value: fee, + }); + + // Verify second batch is executed + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_2], targetPubkey: TARGET_PUBKEY_2 }]], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(ethers.ZeroAddress); + }); + + it("Should revert executeConsolidation if batch was removed", async () => { + const { withdrawalVault } = ctx.contracts; + const agentSigner = await ctx.getSigner("agent"); + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + // Remove batch + await consolidationBus.connect(agentSigner).removeBatches([batchHash]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Try to execute removed batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + + it("Should revert addConsolidationRequests if too many groups", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Set maxGroupsInBatch to 1 + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + + // Try to submit batch with 2 groups (exceeds maxGroupsInBatch of 1) + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(2, 1); + }); + + it("Should revert addConsolidationRequests if batch size exceeds limit", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Set batchSize to 1 (single group with 2 sources will exceed it) + // Must reduce maxGroupsInBatch first, since batchSize must be >= maxGroupsInBatch + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + await consolidationBus.connect(agentSigner).setBatchSize(1); + + // Try to submit 1 group with 2 source keys (total count 2 exceeds batchSize of 1) + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n, 1n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(2, 1); + }); + + it("Should revert addConsolidationRequests if batch already pending (duplicate submission)", async () => { + // Submit batch first time + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + // Try to submit the same batch again + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchAlreadyPending") + .withArgs(batchHash); + }); + }); + + context("Input validation", () => { + it("Should revert submitConsolidationBatch with EmptyBatch if groups array is empty", async () => { + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, []), + ).to.be.revertedWithCustomError(consolidationBus, "EmptyBatch"); + }); + + it("Should revert submitConsolidationBatch with EmptyGroup if a source group is empty", async () => { + // Second group has empty sourceKeyIndices — ConsolidationBus catches this after migrator passes it through + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(1); + }); + + it("Should revert submitConsolidationBatch with TooManyGroups if groups exceed maxGroupsInBatch", async () => { + const agentSigner = await ctx.getSigner("agent"); + + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(2, 1); + }); + + it("Should revert submitConsolidationBatch with BatchTooLarge if total keys exceed batchSize", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Reduce limits so a single group with 2 source keys exceeds the batch size + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + await consolidationBus.connect(agentSigner).setBatchSize(1); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n, 1n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(2, 1); + }); + }); +}); diff --git a/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts b/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts index f550e3d31b..488bc2f0d3 100644 --- a/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts +++ b/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts @@ -18,7 +18,7 @@ import { RewardDistributionState, setAnnualBalanceIncreaseLimit, } from "lib"; -import { getProtocolContext, ProtocolContext, withCSM } from "lib/protocol"; +import { getProtocolContext, ProtocolContext, seedProtocolPendingBaseline, withCSM } from "lib/protocol"; import { reportWithoutExtraData } from "lib/protocol/helpers/accounting"; import { norSdvtEnsureOperators } from "lib/protocol/helpers/nor-sdvt"; import { removeStakingLimit, setModuleStakeShareLimit } from "lib/protocol/helpers/staking"; @@ -28,6 +28,7 @@ import { MAX_BASIS_POINTS, Snapshot } from "test/suite"; const MIN_KEYS_PER_OPERATOR = 5n; const MIN_OPERATORS_COUNT = 30n; +const MAIN_REPORT_EFFECTIVE_CL_REWARD = ether("1"); class ListKeyMapHelper { private map: Map = new Map(); @@ -237,13 +238,29 @@ describe("Integration: AccountingOracle extra data full items", () => { ); } + // This suite also relies on the reward-bearing main report to enter + // TransferredToModule before extra-data finalization. Snapshot protocol + // pending first so the original reward-bearing path remains reachable. + await seedProtocolPendingBaseline(ctx, SDVT_MODULE_ID); + + // Keep the original 1 ETH reward-bearing main report, but give the pending-backed + // safety cap enough elapsed time after snapshotting the pending baseline. + await advanceChainTime(15n * 24n * 60n * 60n); + const { submitter, extraDataChunks } = await reportWithoutExtraData( ctx, numExitedValidatorsByStakingModule, modulesWithExited, extraData, + { + // Snapshot protocol pending into the previous report first, then run the original + // reward-bearing main report so this suite still exercises + // TransferredToModule -> ReadyForDistribution -> Distributed. + effectiveClDiff: MAIN_REPORT_EFFECTIVE_CL_REWARD, + }, ); + // Make the main-report transition explicit before extra data finalization moves modules to ReadyForDistribution. await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); for (let i = 0; i < extraDataChunks.length; i++) { diff --git a/test/integration/core/accounting-oracle-extra-data.integration.ts b/test/integration/core/accounting-oracle-extra-data.integration.ts index 23b1c721b4..4b0c752a1f 100644 --- a/test/integration/core/accounting-oracle-extra-data.integration.ts +++ b/test/integration/core/accounting-oracle-extra-data.integration.ts @@ -6,7 +6,13 @@ import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { advanceChainTime, ether, findEventsWithInterfaces, hexToBytes, RewardDistributionState } from "lib"; import { EXTRA_DATA_FORMAT_LIST, KeyType, prepareExtraData, setAnnualBalanceIncreaseLimit } from "lib/oracle"; -import { getProtocolContext, OracleReportParams, ProtocolContext, report } from "lib/protocol"; +import { + getProtocolContext, + OracleReportParams, + ProtocolContext, + report, + seedProtocolPendingBaseline, +} from "lib/protocol"; import { reportWithoutExtraData, waitNextAvailableReportTime } from "lib/protocol/helpers/accounting"; import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; @@ -14,6 +20,7 @@ import { MAX_BASIS_POINTS, Snapshot } from "test/suite"; const MODULE_ID = NOR_MODULE_ID; const NUM_NEWLY_EXITED_VALIDATORS = 1n; +const MAIN_REPORT_EFFECTIVE_CL_REWARD = ether("1"); const MAINNET_NOR_ADDRESS = "0x55032650b14df07b85bf18a3a3ec8e0af2e028d5".toLowerCase(); describe("Integration: AccountingOracle extra data", () => { @@ -38,6 +45,14 @@ describe("Integration: AccountingOracle extra data", () => { return nodeOperator.totalExitedValidators; } + { + const { lido } = ctx.contracts; + const reserveTarget = await lido.getDepositsReserveTarget(); + if (reserveTarget > 0n) { + const agent = await ctx.getSigner("agent"); + await lido.connect(agent).setDepositsReserveTarget(0n); + } + } { // Prepare exited keys extra data for reusing in tests const { oracleReportSanityChecker } = ctx.contracts; @@ -112,7 +127,21 @@ describe("Integration: AccountingOracle extra data", () => { // Add total exited validators for both entries const totalNewExited = NUM_NEWLY_EXITED_VALIDATORS + 1n; // First operator has 1, second has 1 - return await reportWithoutExtraData(ctx, [totalExitedValidators + totalNewExited], [NOR_MODULE_ID], extraData); + // The main report in this suite must stay reward-bearing because it drives the + // TransferredToModule -> ReadyForDistribution state machine. Snapshot protocol + // pending first so the original 1 ETH main report still reaches that phase path. + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); + + // Keep the original 1 ETH reward-bearing main report, but give the pending-backed + // safety cap enough elapsed time after snapshotting the pending baseline. + await advanceChainTime(15n * 24n * 60n * 60n); + + return await reportWithoutExtraData(ctx, [totalExitedValidators + totalNewExited], [NOR_MODULE_ID], extraData, { + // Snapshot protocol pending into the previous report first, then run the original + // reward-bearing main report so this suite still exercises + // TransferredToModule -> ReadyForDistribution. + effectiveClDiff: MAIN_REPORT_EFFECTIVE_CL_REWARD, + }); } it("should accept report with multiple keys per node operator (single chunk)", async () => { @@ -170,6 +199,8 @@ describe("Integration: AccountingOracle extra data", () => { const { accountingOracle } = ctx.contracts; const { submitter, extraDataChunks } = await submitMainReport(); + // Make the main-report transition explicit before extra data starts changing module state further. + await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); // Submit first chunk of extra data await accountingOracle.connect(submitter).submitReportExtraDataList(hexToBytes(extraDataChunks[0])); @@ -196,6 +227,8 @@ describe("Integration: AccountingOracle extra data", () => { const { accountingOracle } = ctx.contracts; const { submitter, extraDataChunks } = await submitMainReport(); + // Make the main-report transition explicit before extra data starts changing module state further. + await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); // Submit first chunk of extra data await accountingOracle.connect(submitter).submitReportExtraDataList(hexToBytes(extraDataChunks[0])); diff --git a/test/integration/core/accounting-oracle-module-balances.integration.ts b/test/integration/core/accounting-oracle-module-balances.integration.ts new file mode 100644 index 0000000000..21953b24a1 --- /dev/null +++ b/test/integration/core/accounting-oracle-module-balances.integration.ts @@ -0,0 +1,364 @@ +import { expect } from "chai"; +import { getBigInt } from "ethers"; + +import { ether, ONE_GWEI } from "lib"; +import { + buildModuleAccountingReportParams, + depositValidatorsWithoutReport, + getNextReportContext, + getProtocolContext, + ProtocolContext, + report, + submitReportDataWithConsensus, + submitReportDataWithConsensusAndEmptyExtraData, + updateOracleReportLimits, +} from "lib/protocol"; + +import { Snapshot } from "test/suite"; + +const ONE_DAY = 24n * 60n * 60n; +const ONE_VALIDATOR_BALANCE_ETH = 32n; +const ONE_VALIDATOR_BALANCE = ether("32"); +const ONE_ETH = ether("1"); +const MAX_BASIS_POINTS = 10_000n; +const SECONDS_PER_YEAR = 365n * ONE_DAY; +const sumBigints = (values: bigint[]) => values.reduce((sum, value) => sum + value, 0n); + +describe("Integration: AccountingOracle module balances sanity", () => { + let ctx: ProtocolContext; + + let snapshot: string; + let originalState: string; + + before(async () => { + ctx = await getProtocolContext(); + snapshot = await Snapshot.take(); + + await submitModuleBalancesSanityBaseline(); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + after(async () => await Snapshot.restore(snapshot)); + + const getCurrentModuleReportState = async ({ + validatorsDeltaGweiByModule = new Map(), + }: { + validatorsDeltaGweiByModule?: Map; + } = {}) => { + const { stakingModuleIdsWithUpdatedBalance, validatorBalancesGweiByStakingModule } = + await buildModuleAccountingReportParams(ctx, { validatorsDeltaGweiByModule }); + const moduleIndexById = new Map( + stakingModuleIdsWithUpdatedBalance.map((moduleId, index) => [moduleId, index] as const), + ); + + return { stakingModuleIdsWithUpdatedBalance, validatorBalancesGweiByStakingModule, moduleIndexById }; + }; + + const withUpdatedModuleBalances = ( + currentValidatorBalancesGweiByStakingModule: bigint[], + moduleIndexById: Map, + balancesDeltaGweiByModule: Array<[bigint, bigint]>, + ) => { + const updatedValidatorBalancesGweiByStakingModule = [...currentValidatorBalancesGweiByStakingModule]; + + for (const [moduleId, balanceDeltaGwei] of balancesDeltaGweiByModule) { + const index = moduleIndexById.get(moduleId); + if (index === undefined) { + throw new Error(`Missing staking module ${moduleId} in router order`); + } + + updatedValidatorBalancesGweiByStakingModule[index] += balanceDeltaGwei; + } + + return updatedValidatorBalancesGweiByStakingModule; + }; + + const buildReportData = async ({ + clDiff, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + clPendingBalanceGwei, + }: { + clDiff: bigint; + stakingModuleIdsWithUpdatedBalance: bigint[]; + validatorBalancesGweiByStakingModule: bigint[]; + clPendingBalanceGwei: bigint; + }) => { + const { data } = await report(ctx, { + clDiff: clDiff + clPendingBalanceGwei * ONE_GWEI, //simulate full total increase + clPendingBalanceGwei: 0n, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + waitNextReportTime: true, + }); + return { + ...data, + // extract pending balance from simulated total clBalance + clValidatorsBalanceGwei: BigInt(data.clValidatorsBalanceGwei) - clPendingBalanceGwei, + clPendingBalanceGwei, + }; + }; + + const submitModuleBalancesSanityBaseline = async () => { + const { data } = await report(ctx, { + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + }); + + await submitReportDataWithConsensusAndEmptyExtraData(ctx, data); + }; + + it("should accept a report that moves one module's pending balance into validators", async () => { + const { lido } = ctx.contracts; + + const validatorsDeltaGweiByModule = await depositValidatorsWithoutReport(ctx, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + const totalPendingBalanceBeforeGwei = norPendingBalanceBeforeGwei; + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + + const pendingConsumedGwei = norPendingBalanceBeforeGwei; + expect(pendingConsumedGwei).to.be.gt(0n); + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [...validatorsDeltaGweiByModule].reduce>((acc, [moduleId, delta]) => { + if (delta > 0n) { + expect(delta).to.equal(pendingConsumedGwei); + acc.push([moduleId, delta]); + } + return acc; + }, []), + ); + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + const pendingBalanceAfterGwei = 0n; + + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + pendingConsumedGwei); + expect(pendingBalanceAfterGwei).to.equal(totalPendingBalanceBeforeGwei - pendingConsumedGwei); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)).to.not.be.reverted; + }); + + it("should reject a report whose module validators balances do not add up to the reported CL validators total", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + + await depositValidatorsWithoutReport(ctx, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: moduleReportState.validatorBalancesGweiByStakingModule, + clPendingBalanceGwei: norPendingBalanceBeforeGwei, + }); + const inconsistentData = { + ...data, + clValidatorsBalanceGwei: getBigInt(data.clValidatorsBalanceGwei) + 1n, + }; + + await expect(submitReportDataWithConsensus(ctx, inconsistentData)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "InconsistentValidatorsBalanceByModule", + ); + }); + + it("should reject a report that consumes more pending across modules than the global appeared limit allows", async () => { + const { oracleReportSanityChecker } = ctx.contracts; + const { reportTimeElapsed } = await getNextReportContext(ctx); + const perModuleAppearedLimitEthPerDay = + (ONE_VALIDATOR_BALANCE_ETH * ONE_DAY + reportTimeElapsed - 1n) / reportTimeElapsed; + + await updateOracleReportLimits(ctx, { + appearedEthAmountPerDayLimit: perModuleAppearedLimitEthPerDay, + consolidationEthAmountPerDayLimit: 0n, + }); + + const validatorsDeltaGweiByModule = await depositValidatorsWithoutReport(ctx, 2n); + const balanceStatsBeforeReport = await ctx.contracts.lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [...validatorsDeltaGweiByModule].reduce>((acc, [moduleId, delta]) => { + if (delta > 0n) { + acc.push([moduleId, delta]); + } + return acc; + }, []), + ), + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectTotalActivatedBalance", + ); + }); + + it("should reject a report when positive module validators growth exceeds the module increase limit", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + const moduleGrowthExcessGwei = ONE_ETH / ONE_GWEI; + + const validatorsDeltaGweiByModule = await depositValidatorsWithoutReport(ctx, 2n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const { reportTimeElapsed } = await getNextReportContext(ctx); + const totalPendingBalanceBeforeWei = balanceStatsBeforeReport.depositedSinceLastReport; + const totalPendingBalanceBeforeGwei = totalPendingBalanceBeforeWei / ONE_GWEI; + const appearedLimitEthPerDay = + ((totalPendingBalanceBeforeWei / ONE_ETH) * ONE_DAY + reportTimeElapsed - 1n) / reportTimeElapsed; + + await updateOracleReportLimits(ctx, { + annualBalanceIncreaseBPLimit: 0n, + appearedEthAmountPerDayLimit: appearedLimitEthPerDay, + consolidationEthAmountPerDayLimit: 0n, + }); + + const moduleReportState = await getCurrentModuleReportState(); + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + expect(totalPendingBalanceBeforeWei).to.equal(2n * ONE_VALIDATOR_BALANCE); + + const maxDeltaEntry = [...validatorsDeltaGweiByModule.entries()].reduce<[bigint, bigint] | undefined>( + (best, entry) => { + const [, delta] = entry; + return best === undefined || delta > best[1] ? entry : best; + }, + undefined, + ); + expect(maxDeltaEntry, "no module with positive validator delta found").to.not.equal(undefined); + const [grownModuleId] = maxDeltaEntry!; + + const donorModuleEntry = moduleReportState.stakingModuleIdsWithUpdatedBalance + .map((moduleId, index) => { + const balanceGwei = moduleReportState.validatorBalancesGweiByStakingModule[index]; + return [moduleId, balanceGwei] as const; + }) + .find(([moduleId, balanceGwei]) => { + return moduleId !== grownModuleId && balanceGwei > moduleGrowthExcessGwei; + }); + + expect( + donorModuleEntry, + "no other module has enough validators balance to offset moduleGrowthExcessGwei", + ).to.not.equal(undefined); + const [donorModuleId] = donorModuleEntry!; + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [ + [grownModuleId, totalPendingBalanceBeforeGwei + moduleGrowthExcessGwei], + [donorModuleId, -moduleGrowthExcessGwei], + ], + ); + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + totalPendingBalanceBeforeGwei); + + const data = await buildReportData({ + clDiff: totalPendingBalanceBeforeWei, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)) + .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalModuleValidatorsBalanceIncrease") + .withArgs(totalPendingBalanceBeforeWei, totalPendingBalanceBeforeWei + ONE_ETH); + }); + + it("should reject a report that grows module validators without consuming matching pending balance", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + + await updateOracleReportLimits(ctx, { annualBalanceIncreaseBPLimit: 1n }); + + const validatorsDeltaGweiByModule = await depositValidatorsWithoutReport(ctx, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const totalPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + + expect(balanceStatsBeforeReport.clValidatorsBalanceAtLastReport / ONE_GWEI).to.equal( + totalValidatorsBalanceBeforeGwei, + ); + expect(balanceStatsBeforeReport.clValidatorsBalanceAtLastReport).to.be.gt(0n); + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + expect(totalPendingBalanceBeforeGwei).to.be.gt(0n); + + const { reportTimeElapsed } = await getNextReportContext(ctx); + const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); + const allowedValidatorsGrowthGwei = + (totalValidatorsBalanceBeforeGwei * annualBalanceIncreaseBPLimit * reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + const excessiveValidatorsGrowthGwei = allowedValidatorsGrowthGwei + 1n; + const excessiveValidatorsGrowthWei = excessiveValidatorsGrowthGwei * ONE_GWEI; + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [...validatorsDeltaGweiByModule].reduce>((acc, [moduleId, delta]) => { + if (delta > 0n) { + acc.push([moduleId, excessiveValidatorsGrowthGwei]); + } + return acc; + }, []), + ); + + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + excessiveValidatorsGrowthGwei); + + const data = await buildReportData({ + clDiff: excessiveValidatorsGrowthWei, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: totalPendingBalanceBeforeGwei, + }); + + const totalCLBalanceBeforeWei = + balanceStatsBeforeReport.clValidatorsBalanceAtLastReport + + balanceStatsBeforeReport.clPendingBalanceAtLastReport + + balanceStatsBeforeReport.depositedSinceLastReport; + const totalCLGrowthCapWei = + (totalCLBalanceBeforeWei * annualBalanceIncreaseBPLimit * reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + + expect(totalCLGrowthCapWei).to.be.gte( + excessiveValidatorsGrowthWei, + "test precondition failed: total CL annual cap must stay above the crafted validator-only growth", + ); + + await expect(submitReportDataWithConsensus(ctx, data)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectTotalCLBalanceIncrease", + ); + }); +}); diff --git a/test/integration/core/accounting.integration.ts b/test/integration/core/accounting.integration.ts index fa870bd28b..888d979858 100644 --- a/test/integration/core/accounting.integration.ts +++ b/test/integration/core/accounting.integration.ts @@ -4,9 +4,17 @@ import { ethers } from "hardhat"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether, impersonate, ONE_GWEI, updateBalance } from "lib"; +import { advanceChainTime, ether, impersonate, ONE_GWEI, updateBalance } from "lib"; import { LIMITER_PRECISION_BASE } from "lib/constants"; -import { getProtocolContext, getReportTimeElapsed, ProtocolContext, removeStakingLimit, report } from "lib/protocol"; +import { + getProtocolContext, + getReportTimeElapsed, + ProtocolContext, + removeStakingLimit, + report, + seedProtocolPendingBaseline, +} from "lib/protocol"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; import { MAX_BASIS_POINTS, ONE_DAY, SHARE_RATE_PRECISION } from "test/suite/constants"; @@ -98,7 +106,7 @@ describe("Integration: Accounting", () => { } async function readState() { - const { lido, accountingOracle, elRewardsVault, withdrawalVault, burner } = ctx.contracts; + const { lido, accountingOracle, elRewardsVault, withdrawalVault, burner, withdrawalQueue } = ctx.contracts; const lastProcessingRefSlot = await accountingOracle.getLastProcessingRefSlot(); const totalELRewardsCollected = await lido.getTotalELRewardsCollected(); @@ -108,6 +116,12 @@ describe("Integration: Accounting", () => { const elRewardsVaultBalance = await ethers.provider.getBalance(elRewardsVault); const withdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault); const burnerShares = await lido.sharesOf(burner); + const bufferedEther = await lido.getBufferedEther(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositableEther = await lido.getDepositableEther(); + const unfinalizedStETH = await withdrawalQueue.unfinalizedStETH(); return { lastProcessingRefSlot, @@ -118,6 +132,12 @@ describe("Integration: Accounting", () => { elRewardsVaultBalance, withdrawalVaultBalance, burnerShares, + bufferedEther, + depositsReserveTarget, + depositsReserve, + withdrawalsReserve, + depositableEther, + unfinalizedStETH, }; } @@ -134,6 +154,12 @@ describe("Integration: Accounting", () => { elRewardsVaultBalance, withdrawalVaultBalance, burnerShares, + bufferedEther, + depositsReserveTarget, + depositsReserve, + withdrawalsReserve, + depositableEther, + unfinalizedStETH, } = await readState(); expect(lastProcessingRefSlot).to.be.greaterThan( @@ -166,27 +192,59 @@ describe("Integration: Accounting", () => { beforeState.internalShares + (expectedDelta.internalShares ?? 0n), "Internal shares mismatch", ); + + expect(depositsReserveTarget).to.equal( + beforeState.depositsReserveTarget, + "Deposits reserve target should not change during report processing", + ); + const expectedDepositsReserve = bufferedEther < depositsReserveTarget ? bufferedEther : depositsReserveTarget; + expect(depositsReserve).to.equal( + expectedDepositsReserve, + "Deposits reserve should be synced to min(buffered ether, deposits reserve target)", + ); + expect(depositsReserve).to.be.lte(depositsReserveTarget, "Deposits reserve should not exceed target"); + expect(depositsReserve).to.be.lte(bufferedEther, "Deposits reserve should not exceed buffered ether"); + expect(depositableEther).to.equal( + bufferedEther - withdrawalsReserve, + "Depositable should equal buffered minus withdrawals reserve", + ); + expect(withdrawalsReserve).to.be.lte(unfinalizedStETH, "Withdrawals reserve should not exceed demand"); + expect(withdrawalsReserve).to.be.lte(bufferedEther, "Withdrawals reserve should not exceed buffered ether"); } async function expectTransferFeesEvents( reportTxReceipt: ContractTransactionReceipt, noRewards: boolean = false, ): Promise { - const { stakingRouter } = ctx.contracts; - - const stakingModulesCount = await stakingRouter.getStakingModulesCount(); - - const numberOfCSMModules = (await stakingRouter.getStakingModules()).filter( - (module) => module.name === "Community Staking", - ).length; + const { stakingRouter, csm, cmv2 } = ctx.contracts; const { amountOfETHLocked } = getWithdrawalParamsFromEvent(reportTxReceipt); const hasWithdrawals = amountOfETHLocked !== 0n; const transferSharesEvents = ctx.getEvents(reportTxReceipt, "TransferShares"); - const expectedRewardsDistributionEventsCount = noRewards - ? 0n - : BigInt(stakingModulesCount) + BigInt(numberOfCSMModules) + 2n; + let expectedRewardsDistributionEventsCount = 0n; + + if (!noRewards) { + expectedRewardsDistributionEventsCount = BigInt(await stakingRouter.getStakingModulesCount()) + 2n; // +1 initial mint, +1 for the treasury + if (csm !== undefined) { + if ((await stakingRouter.getModuleValidatorsBalance(ctx.modules.csm!.id)) > 0) { + // +1 for the CSM internal transfer + expectedRewardsDistributionEventsCount += 1n; + } else { + // no reward transfer to modules with 0 validators balance + expectedRewardsDistributionEventsCount -= 1n; + } + } + if (cmv2 !== undefined) { + if ((await stakingRouter.getModuleValidatorsBalance(ctx.modules.cmv2!.id)) > 0) { + // +1 for the CSM internal transfer + expectedRewardsDistributionEventsCount += 1n; + } else { + // no reward transfer to modules with 0 validators balance + expectedRewardsDistributionEventsCount -= 1n; + } + } + } const expectedWithdrawalsTransferEventCount = hasWithdrawals ? 1n : 0n; expect(transferSharesEvents.length).to.equal( expectedWithdrawalsTransferEventCount + expectedRewardsDistributionEventsCount, @@ -227,7 +285,7 @@ describe("Integration: Accounting", () => { reportBurner: false, skipWithdrawals: true, }), - ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectCLBalanceIncrease(uint256)"); + ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalCLBalanceIncrease"); }); it("Should account correctly with no CL rebase", async () => { @@ -251,6 +309,86 @@ describe("Integration: Accounting", () => { expect(sharesRateBefore).to.be.lessThanOrEqual(sharesRateAfter); }); + it("Should account correctly with non-zero deposits and withdrawals reserves", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + + await lido.connect(agent).setDepositsReserveTarget(ether("10")); + await lido.connect(agent).submit(ZeroAddress, { value: ether("90") }); + await lido.connect(agent).approve(withdrawalQueue, ether("5")); + await withdrawalQueue.connect(agent).requestWithdrawals([ether("5")], agent.address); + await report(ctx, { + clDiff: 0n, + excludeVaultsBalances: true, + reportBurner: false, + skipWithdrawals: true, + dryRun: false, + }); + + const beforeState = await readState(); + expect(beforeState.depositsReserveTarget).to.equal(ether("10")); + expect(beforeState.depositsReserve).to.equal(ether("10")); + expect(beforeState.withdrawalsReserve).to.be.gt(0n); + const expectedWithdrawalsReserve = + beforeState.unfinalizedStETH < beforeState.bufferedEther - beforeState.depositsReserve + ? beforeState.unfinalizedStETH + : beforeState.bufferedEther - beforeState.depositsReserve; + expect(beforeState.withdrawalsReserve).to.equal(expectedWithdrawalsReserve); + expect(beforeState.depositableEther).to.equal(beforeState.bufferedEther - beforeState.withdrawalsReserve); + + // Deferred target increase must not change effective reserves before report processing. + const increasedTarget = beforeState.bufferedEther + ether("1000"); + await lido.connect(agent).setDepositsReserveTarget(increasedTarget); + expect(await lido.getDepositsReserve()).to.equal(beforeState.depositsReserve); + expect(await lido.getWithdrawalsReserve()).to.equal(beforeState.withdrawalsReserve); + const beforeStateAfterTargetUpdate = await readState(); + expect(beforeStateAfterTargetUpdate.depositsReserveTarget).to.equal(increasedTarget); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const dryRunBefore = await report(ctx, dryRunParams); + expect(dryRunBefore.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches in dry-run report", + ); + const [lockBefore] = await withdrawalQueue.prefinalize( + dryRunBefore.data.withdrawalFinalizationBatches, + dryRunBefore.data.simulatedShareRate, + ); + expect(lockBefore).to.be.lte(beforeStateAfterTargetUpdate.withdrawalsReserve); + + const { reportTx } = await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false }); + const reportTxReceipt = (await reportTx!.wait())!; + const { amountOfETHLocked, sharesBurntAmount } = getWithdrawalParamsFromEvent(reportTxReceipt); + + await expectStateChanges(beforeStateAfterTargetUpdate, { + totalELRewardsCollected: 0n, + internalEther: amountOfETHLocked * -1n, + internalShares: sharesBurntAmount * -1n, + lidoBalance: amountOfETHLocked * -1n, + }); + + const afterState = await readState(); + expect(afterState.depositsReserveTarget).to.equal(increasedTarget); + expect(afterState.depositsReserve).to.equal(afterState.bufferedEther); + expect(afterState.withdrawalsReserve).to.equal(0n); + expect(afterState.depositableEther).to.equal(afterState.bufferedEther); + }); + it("Should account correctly with negative CL rebase", async () => { const CL_REBASE_AMOUNT = ether("-100"); @@ -282,27 +420,26 @@ describe("Integration: Accounting", () => { it("Should account correctly with positive CL rebase close to the limits", async () => { const { lido, oracleReportSanityChecker } = ctx.contracts; + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); + const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); - const { beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); const { timeElapsed } = await getReportTimeElapsed(ctx); - // To calculate the rebase amount close to the annual increase limit - // we use (ONE_DAY + 1n) to slightly underperform for the daily limit - // This ensures we're testing a scenario very close to, but not exceeding, the annual limit - const time = timeElapsed + 1n; - let rebaseAmount = (beaconBalance * annualBalanceIncreaseBPLimit * time) / (365n * ONE_DAY) / MAX_BASIS_POINTS; + // `report()` submits the raw post-vs-pre CL delta. In this seeded scenario the + // pending baseline is activated inside the same report, so the raw boundary is + // the safety-cap component computed from the post-activation validators base. + let rebaseAmount = + ((clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport) * annualBalanceIncreaseBPLimit * timeElapsed) / + (365n * ONE_DAY) / + MAX_BASIS_POINTS; rebaseAmount = roundToGwei(rebaseAmount); - // At this point, rebaseAmount represents a positive CL rebase that is - // just slightly below the maximum allowed daily increase, testing the system's - // behavior near its operational limits const beforeState = await readState(); // Report - const params = { clDiff: rebaseAmount, excludeVaultsBalances: true }; - - const { reportTx } = (await report(ctx, params)) as { + const { reportTx } = (await report(ctx, { clDiff: rebaseAmount, excludeVaultsBalances: true })) as { reportTx: TransactionResponse; extraDataTx: TransactionResponse; }; @@ -575,7 +712,10 @@ describe("Integration: Accounting", () => { await expectStateChanges(stateBefore, { internalShares: -1n * sharesBurntAmount, - burnerShares: -1n * sharesLimit, + // On Hoodi, this report can finalize withdrawal requests at the same time. + // WQ shares first arrive in Burner, and smoothing may leave part of them for + // the next report; sharesLimit itself is checked separately from withdrawal burn below. + burnerShares: sharesToBurn - sharesBurntAmount, internalEther: -1n * amountOfETHLocked, lidoBalance: -1n * amountOfETHLocked, }); diff --git a/test/integration/core/burn-shares.integration.ts b/test/integration/core/burn-shares.integration.ts index 1960e547c6..8714131d76 100644 --- a/test/integration/core/burn-shares.integration.ts +++ b/test/integration/core/burn-shares.integration.ts @@ -66,11 +66,11 @@ describe("Scenario: Burn Shares", () => { const accountingSigner = await impersonate(accounting.address, ether("1")); await burner.connect(accountingSigner).requestBurnSharesForCover(stranger, sharesToBurn); - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; await handleOracleReport(ctx, { - beaconValidators, - clBalance: beaconBalance, + clBalance, sharesRequestedToBurn: sharesToBurn, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, diff --git a/test/integration/core/deposits-reserve.integration.ts b/test/integration/core/deposits-reserve.integration.ts new file mode 100644 index 0000000000..84d482e903 --- /dev/null +++ b/test/integration/core/deposits-reserve.integration.ts @@ -0,0 +1,404 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { advanceChainTime, ether, impersonate, updateBalance } from "lib"; +import { + depositValidatorsWithoutReport, + finalizeWQViaSubmit, + getProtocolContext, + ProtocolContext, + report, + setStakingLimit, +} from "lib/protocol"; + +import { Snapshot, ZERO_HASH } from "test/suite"; + +describe("Integration: Deposits reserve", () => { + let ctx: ProtocolContext; + let snapshot: string; + let testSnapshot: string; + + let reserveManager: HardhatEthersSigner; + let holder: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + before(async () => { + ctx = await getProtocolContext(); + snapshot = await Snapshot.take(); + + [holder, stranger] = await ethers.getSigners(); + reserveManager = holder; + + await setStakingLimit(ctx, ether("200000"), ether("20")); + await finalizeWQViaSubmit(ctx); + + const { acl, lido } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + const role = await lido.BUFFER_RESERVE_MANAGER_ROLE(); + const hasRole = await acl["hasPermission(address,address,bytes32)"](reserveManager.address, lido.address, role); + if (!hasRole) { + // Grant reserve management permission once for the non-agent actor used in ACL tests. + await acl.connect(agent).grantPermission(reserveManager.address, lido.address, role); + } + }); + + beforeEach(async () => { + testSnapshot = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(testSnapshot); + }); + + after(async () => { + await Snapshot.restore(snapshot); + }); + + it("Authorizes reserve target updates via BUFFER_RESERVE_MANAGER_ROLE only", async () => { + const { lido } = ctx.contracts; + + await expect(lido.connect(stranger).setDepositsReserveTarget(ether("1"))).to.be.revertedWith("APP_AUTH_FAILED"); + await expect(lido.connect(reserveManager).setDepositsReserveTarget(ether("1"))) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(ether("1")); + }); + + it("Applies target decrease immediately and defers target increase until report sync", async () => { + const { lido } = ctx.contracts; + + const targetBefore = await lido.getDepositsReserveTarget(); + const reserveBeforeIncrease = await lido.getDepositsReserve(); + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + + const increasedTarget = targetBefore + ether("2"); + // Increase is stored in target immediately but reserve value is synchronized on report. + await lido.connect(reserveManager).setDepositsReserveTarget(increasedTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(increasedTarget); + + expect(await lido.getDepositsReserve()).to.equal(reserveBeforeIncrease); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const increasedAgain = increasedTarget + ether("10"); + await lido.connect(reserveManager).setDepositsReserveTarget(increasedAgain); + expect(await lido.getDepositsReserveTarget()).to.equal(increasedAgain); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const decreasedTarget = increasedTarget - ether("1"); + // Decrease is applied immediately to avoid reducing withdrawals budget unexpectedly. + await lido.connect(reserveManager).setDepositsReserveTarget(decreasedTarget); + expect(await lido.getDepositsReserve()).to.equal(decreasedTarget); + }); + + it("Releases deposits reserve when target is set to zero and preserves reserve/depositable invariants", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + + const requestAmount = ether("5"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + // First set a non-zero effective deposits reserve, then verify explicit reset to zero. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + + await lido.connect(reserveManager).setDepositsReserveTarget(0n); + expect(await lido.getDepositsReserve()).to.equal(0n); + + const buffered = await lido.getBufferedEther(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const unfinalized = await withdrawalQueue.unfinalizedStETH(); + + const expectedWithdrawalsReserve = buffered < unfinalized ? buffered : unfinalized; + // With deposits reserve released, withdrawals reserve is bounded only by buffered and unfinalized demand. + expect(withdrawalsReserve).to.equal(expectedWithdrawalsReserve); + expect(await lido.getDepositableEther()).to.equal(buffered - expectedWithdrawalsReserve); + }); + + it("Reaches increased target on the next report after deferred increase", async () => { + const { lido } = ctx.contracts; + + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + // First report materializes initial target in effective reserve. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("20")); + expect(await lido.getDepositsReserve()).to.equal(ether("20")); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + expect(await lido.getDepositsReserve()).to.equal(ether("20")); + + // Second report applies deferred increase back to the new target. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + + expect(await lido.getDepositsReserveTarget()).to.equal(ether("40")); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + }); + + it("Computes finalization budget from withdrawal-available buffer, excluding deposits reserve", async () => { + const { lido, withdrawalQueue, locator } = ctx.contracts; + + const requestAmount = ether("1"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + const withdrawalsReserveBeforeProtection = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBeforeProtection).to.be.gt(0n); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const buffered = await lido.getBufferedEther(); + // Set target above buffered ether so synced deposits reserve consumes the full buffer first. + await lido.connect(reserveManager).setDepositsReserveTarget(buffered + ether("1000")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + + const elRewardsVaultAddress = await locator.elRewardsVault(); + const extraEthBudget = ether("5"); + await updateBalance(elRewardsVaultAddress, extraEthBudget); + + // The report is built for fixed refSlot, so deposits after refSlot must not increase its finalization budget. + await lido.connect(holder).submit(ZeroAddress, { value: ether("3") }); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Freeze report inputs at refSlot and evaluate finalization budget from dry-run output. + const { data } = await report(ctx, { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: true, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: false, + }); + + expect(data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches for tooling budget check", + ); + const [ethToLock] = await withdrawalQueue.prefinalize(data.withdrawalFinalizationBatches, data.simulatedShareRate); + + expect(ethToLock).to.be.lte(extraEthBudget); + }); + + it("Keeps fixed-refSlot finalization batches stable after late deposits", async () => { + const { lido, withdrawalQueue, locator } = ctx.contracts; + + const requestAmount = ether("1"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const depositsTargetBefore = await lido.getDepositsReserveTarget(); + + const elRewardsVaultAddress = await locator.elRewardsVault(); + await updateBalance(elRewardsVaultAddress, ether("3")); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Build dry-run report with explicit refSlot to make batches deterministic. + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: true, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: false, + } as const; + + const before = await report(ctx, dryRunParams); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches before late deposit", + ); + const [beforeLock] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + // Late deposit after refSlot should not affect withdrawals finalization result. + await lido.connect(holder).submit(ZeroAddress, { value: ether("7") }); + expect(await lido.getDepositsReserveTarget()).to.equal(depositsTargetBefore); + expect(await lido.getDepositsReserve()).to.be.gte(depositsReserveBefore); + + const after = await report(ctx, dryRunParams); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches after late deposit", + ); + const [afterLock] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + + expect(afterLock).to.equal(beforeLock); + // Batches and ETH lock must stay unchanged: post-refSlot deposits must not affect finalization inputs. + expect(after.data.withdrawalFinalizationBatches).to.deep.equal(before.data.withdrawalFinalizationBatches); + }); + + it("Keeps withdrawals finalization budget stable after reserve target increase post-refSlot", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + + const requestAmount = ether("20"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositsReserveBefore = await lido.getDepositsReserve(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Build dry-run data at fixed refSlot, then change target and re-run with the same refSlot. + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const before = await report(ctx, dryRunParams); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches before reserve target increase", + ); + const [beforeLock] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + // Target increase after refSlot is deferred and must not affect current withdrawals finalization budget. + await lido.connect(reserveManager).setDepositsReserveTarget(ether("120")); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + + const after = await report(ctx, dryRunParams); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches after reserve target increase", + ); + const [afterLock] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(afterLock).to.be.lte(withdrawalsReserveBefore); + expect(beforeLock).to.be.lte(withdrawalsReserveBefore); + }); + + it("Does not reduce withdrawals reserve when CL deposits consume depositable ether", async () => { + const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + + const requestAmount = ether("10"); + await lido.connect(reserveManager).submit(ZeroAddress, { value: ether("3200") }); + await lido.connect(reserveManager).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(reserveManager).requestWithdrawals([requestAmount], reserveManager.address); + + const bufferedBefore = await lido.getBufferedEther(); + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositableBefore = await lido.getDepositableEther(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + expect(depositableBefore).to.equal(bufferedBefore - withdrawalsReserveBefore); + + const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); + // Spend depositable ether through CL deposit path. + const depositTx = await stakingRouter.connect(dsmSigner).deposit(1n, ZERO_HASH); + await depositTx.wait(); + + const bufferedAfter = await lido.getBufferedEther(); + const depositsReserveAfter = await lido.getDepositsReserve(); + const withdrawalsReserveAfter = await lido.getWithdrawalsReserve(); + const depositableAfter = await lido.getDepositableEther(); + const consumed = bufferedBefore - bufferedAfter; + + expect(consumed).to.be.gt(0n, "Expected non-zero buffered ether consumption during CL deposit"); + // CL deposit consumes only depositable ether; withdrawals reserve must remain unchanged. + expect(depositsReserveAfter).to.be.lte(depositsReserveBefore); + expect(withdrawalsReserveAfter).to.equal(withdrawalsReserveBefore); + expect(depositableAfter).to.equal(depositableBefore - consumed); + expect(depositableAfter).to.equal(bufferedAfter - withdrawalsReserveAfter); + }); + + it("Keeps fixed-refSlot finalization budget bounded after spending depositable ether post-refSlot", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + + const requestAmount = ether("20"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Fix refSlot first, then spend depositable ether to emulate post-refSlot CL deposits. + const reportParams = { + refSlot, + waitNextReportTime: false, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const before = await report(ctx, { ...reportParams, dryRun: true }); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt(0); + const [lockBefore] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + const bufferedBeforeSpend = await lido.getBufferedEther(); + await depositValidatorsWithoutReport(ctx, 1n); + const bufferedAfterSpend = await lido.getBufferedEther(); + expect(bufferedAfterSpend).to.be.lt(bufferedBeforeSpend); + + const depositsReserveAfterSpend = await lido.getDepositsReserve(); + const withdrawalsReserveAfterSpend = await lido.getWithdrawalsReserve(); + expect(depositsReserveAfterSpend).to.be.lte(depositsReserveBefore); + expect(withdrawalsReserveAfterSpend).to.equal(withdrawalsReserveBefore); + + const after = await report(ctx, { ...reportParams, dryRun: true }); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt(0); + const [lockAfter] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + expect(lockAfter).to.be.gt(0n); + // Finalization lock remains bounded by precomputed withdrawals reserve from fixed refSlot. + expect(lockAfter).to.be.lte(withdrawalsReserveBefore); + expect(lockBefore).to.be.lte(withdrawalsReserveBefore); + }); +}); diff --git a/test/integration/core/happy-path.integration.ts b/test/integration/core/happy-path.integration.ts index 436edc679f..71c0c2d3ce 100644 --- a/test/integration/core/happy-path.integration.ts +++ b/test/integration/core/happy-path.integration.ts @@ -4,8 +4,9 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { advanceChainTime, batch, ether, impersonate, log, updateBalance } from "lib"; +import { advanceChainTime, batch, ether, impersonate, log, ONE_GWEI, updateBalance } from "lib"; import { + buildModuleAccountingReportParams, finalizeWQViaElVault, getProtocolContext, norSdvtEnsureOperators, @@ -14,9 +15,10 @@ import { removeStakingLimit, report, setStakingLimit, + submitReportDataWithConsensusAndEmptyExtraData, } from "lib/protocol"; -import { bailOnFailure, MAX_DEPOSIT, Snapshot, ZERO_HASH } from "test/suite"; +import { bailOnFailure, Snapshot, ZERO_HASH } from "test/suite"; import { LogDescriptionExtended } from "../../../lib/protocol/types"; @@ -32,6 +34,8 @@ describe("Scenario: Protocol Happy Path", () => { let uncountedStETHShares: bigint; let amountWithRewards: bigint; let depositCount: bigint; + let finalizedWithdrawalAmount: bigint; + let norPendingDepositsGwei: bigint; before(async () => { ctx = await getProtocolContext(); @@ -201,30 +205,41 @@ describe("Scenario: Protocol Happy Path", () => { it("Should deposit to staking modules", async () => { const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + const agent = await ctx.getSigner("agent"); await lido.connect(stEthHolder).submit(ZeroAddress, { value: ether("3200") }); + await lido.connect(agent).setDepositsReserveTarget(ether("128")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); const withdrawalsUnfinalizedStETH = await withdrawalQueue.unfinalizedStETH(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); const depositableEther = await lido.getDepositableEther(); const bufferedEtherBeforeDeposit = await lido.getBufferedEther(); - const expectedDepositableEther = bufferedEtherBeforeDeposit - withdrawalsUnfinalizedStETH; + const expectedDepositableEther = bufferedEtherBeforeDeposit - withdrawalsReserve; + expect(depositsReserveTarget).to.equal(ether("128"), "Deposits reserve target"); + expect(depositsReserve).to.equal(ether("128"), "Deposits reserve"); expect(depositableEther).to.equal(expectedDepositableEther, "Depositable ether"); + expect(withdrawalsReserve).to.be.lte(withdrawalsUnfinalizedStETH, "Withdrawals reserve should not exceed demand"); log.debug("Depositable ether", { "Buffered ether": ethers.formatEther(bufferedEtherBeforeDeposit), "Withdrawals unfinalized stETH": ethers.formatEther(withdrawalsUnfinalizedStETH), + "Withdrawals reserve": ethers.formatEther(withdrawalsReserve), "Depositable ether": ethers.formatEther(depositableEther), }); const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); const stakingModules = (await stakingRouter.getStakingModules()).filter((m) => m.id === 1n); depositCount = 0n; + norPendingDepositsGwei = 0n; let expectedBufferedEtherAfterDeposit = bufferedEtherBeforeDeposit; for (const module of stakingModules) { - const depositTx = await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, module.id, ZERO_HASH); + const depositTx = await stakingRouter.connect(dsmSigner).deposit(module.id, ZERO_HASH); const depositReceipt = (await depositTx.wait()) as ContractTransactionReceipt; const unbufferedEvent = ctx.getEvents(depositReceipt, "Unbuffered")[0]; const unbufferedAmount = unbufferedEvent?.args[0] || 0n; @@ -237,6 +252,7 @@ describe("Scenario: Protocol Happy Path", () => { }); depositCount += deposits; + norPendingDepositsGwei += unbufferedAmount / ONE_GWEI; expectedBufferedEtherAfterDeposit -= unbufferedAmount; } @@ -252,7 +268,7 @@ describe("Scenario: Protocol Happy Path", () => { }); it("Should rebase correctly", async () => { - const { lido, withdrawalQueue, locator, burner, nor, sdvt, stakingRouter, csm, accounting } = ctx.contracts; + const { lido, withdrawalQueue, locator, burner, nor, sdvt, stakingRouter, csm, cmv2, accounting } = ctx.contracts; const treasuryAddress = await locator.treasury(); const strangerBalancesBeforeRebase = await getBalances(stranger); @@ -295,9 +311,27 @@ describe("Scenario: Protocol Happy Path", () => { const treasuryBalanceBeforeRebase = await lido.sharesOf(treasuryAddress); - // 0.001 – to simulate rewards + const { depositedSinceLastReport } = await lido.getBalanceStats(); + + // Deposit() moved ETH into protocol pending, but the new sanity path takes its + // baseline from the previous Lido report snapshot rather than router-only state. + // Submit a neutral report first so the next reward-bearing report stays on the + // original "deposits activated + tiny positive CL reward" happy path. + const { data: pendingBaselineData } = await report(ctx, { + clDiff: depositedSinceLastReport, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + ...(await buildModuleAccountingReportParams(ctx)), + }); + await submitReportDataWithConsensusAndEmptyExtraData(ctx, { + ...pendingBaselineData, + clValidatorsBalanceGwei: BigInt(pendingBaselineData.clValidatorsBalanceGwei) - norPendingDepositsGwei, + clPendingBalanceGwei: norPendingDepositsGwei, + }); + const reportData: Partial = { - clDiff: ether("32") * depositCount + ether("0.001"), + clDiff: ether("0.001"), clAppearedValidators: depositCount, }; @@ -323,20 +357,38 @@ describe("Scenario: Protocol Happy Path", () => { const transferSharesEvents = ctx.getEvents(reportTxReceipt, "TransferShares"); let toBurnerTransfer, toNorTransfer, toSdvtTransfer: LogDescriptionExtended | undefined; - let numExpectedTransferEvents = Number(await stakingRouter.getStakingModulesCount()) + 2; // +1 for the treasury + let numExpectedTransferEvents = Number(await stakingRouter.getStakingModulesCount()) + 2; // +1 initial mint, +1 for the treasury if (wereWithdrawalsFinalized) { numExpectedTransferEvents += 1; // +1 for the burner transfer [toBurnerTransfer, , toNorTransfer, toSdvtTransfer] = transferEvents; } else { [, toNorTransfer, toSdvtTransfer] = transferEvents; } - const toTreasuryTransfer = transferEvents[numExpectedTransferEvents - 1]; - const toTreasuryTransferShares = transferSharesEvents[numExpectedTransferEvents - 1]; + + let toTreasuryTransferIdx = numExpectedTransferEvents - 1; if (csm !== undefined) { - // +1 for the CSM internal transfer - numExpectedTransferEvents += 1; + if ((await stakingRouter.getModuleValidatorsBalance(ctx.modules.csm!.id)) > 0) { + // +1 for the CSM internal transfer + numExpectedTransferEvents += 1; + toTreasuryTransferIdx -= 1; + } else { + // no reward transfer to modules with 0 validators balance + numExpectedTransferEvents -= 1; + } + } + if (cmv2 !== undefined) { + if ((await stakingRouter.getModuleValidatorsBalance(ctx.modules.cmv2!.id)) > 0) { + // +1 for the CSM internal transfer + numExpectedTransferEvents += 1; + toTreasuryTransferIdx -= 1; + } else { + // no reward transfer to modules with 0 validators balance + numExpectedTransferEvents -= 1; + } } + const toTreasuryTransfer = transferEvents[toTreasuryTransferIdx]; + const toTreasuryTransferShares = transferSharesEvents[toTreasuryTransferIdx]; expect(transferEvents.length).to.equal(numExpectedTransferEvents, "Transfer events count"); @@ -549,7 +601,7 @@ describe("Scenario: Protocol Happy Path", () => { const lockedEtherAmountBeforeFinalization = await withdrawalQueue.getLockedEtherAmount(); - const reportParams = { clDiff: ether("0.0005") }; // simulate some rewards + const reportParams = { clDiff: 0n }; const { reportTx } = (await report(ctx, reportParams)) as { reportTx: TransactionResponse }; const reportTxReceipt = (await reportTx.wait()) as ContractTransactionReceipt; @@ -557,24 +609,27 @@ describe("Scenario: Protocol Happy Path", () => { const requestId = await withdrawalQueue.getLastRequestId(); const lockedEtherAmountAfterFinalization = await withdrawalQueue.getLockedEtherAmount(); - const expectedLockedEtherAmountAfterFinalization = lockedEtherAmountAfterFinalization - amountWithRewards; + const withdrawalFinalizedEvent = ctx.getEvents(reportTxReceipt, "WithdrawalsFinalized")[0]; + finalizedWithdrawalAmount = withdrawalFinalizedEvent.args.amountOfETHLocked; log.debug("Locked ether amount", { "Before finalization": ethers.formatEther(lockedEtherAmountBeforeFinalization), "After finalization": ethers.formatEther(lockedEtherAmountAfterFinalization), - "Amount with rewards": ethers.formatEther(amountWithRewards), + "Finalized amount": ethers.formatEther(finalizedWithdrawalAmount), }); expect(lockedEtherAmountBeforeFinalization).to.equal( - expectedLockedEtherAmountAfterFinalization, + lockedEtherAmountAfterFinalization - finalizedWithdrawalAmount, "Locked ether amount after finalization", ); - - const withdrawalFinalizedEvent = ctx.getEvents(reportTxReceipt, "WithdrawalsFinalized")[0]; + expect(amountWithRewards - finalizedWithdrawalAmount).to.be.lte( + 2n, + "Finalized amount should differ from requested amount by at most the documented dust", + ); expect(withdrawalFinalizedEvent?.args.toObject()).to.deep.include( { - amountOfETHLocked: amountWithRewards, + amountOfETHLocked: finalizedWithdrawalAmount, from: requestId, to: requestId, }, @@ -608,7 +663,7 @@ describe("Scenario: Protocol Happy Path", () => { const balanceBeforeClaim = await getBalances(stranger); expect(status.isFinalized).to.be.true; - expect(claimableEtherBeforeClaim).to.equal(amountWithRewards, "Claimable ether before claim"); + expect(claimableEtherBeforeClaim).to.equal(finalizedWithdrawalAmount, "Claimable ether before claim"); const claimTx = await withdrawalQueue.connect(stranger).claimWithdrawals([requestId], hints); const claimTxReceipt = (await claimTx.wait()) as ContractTransactionReceipt; @@ -621,7 +676,7 @@ describe("Scenario: Protocol Happy Path", () => { requestId, owner: stranger.address, receiver: stranger.address, - amountOfETH: amountWithRewards, + amountOfETH: finalizedWithdrawalAmount, }, "WithdrawalClaimed event", ); @@ -640,7 +695,7 @@ describe("Scenario: Protocol Happy Path", () => { const balanceAfterClaim = await getBalances(stranger); expect(balanceAfterClaim.ETH).to.equal( - balanceBeforeClaim.ETH + amountWithRewards - spentGas, + balanceBeforeClaim.ETH + finalizedWithdrawalAmount - spentGas, "ETH balance after claim", ); @@ -649,11 +704,11 @@ describe("Scenario: Protocol Happy Path", () => { log.debug("Locked ether amount", { "Before withdrawal": ethers.formatEther(lockedEtherAmountBeforeWithdrawal), "After claim": ethers.formatEther(lockedEtherAmountAfterClaim), - "Amount with rewards": ethers.formatEther(amountWithRewards), + "Finalized amount": ethers.formatEther(finalizedWithdrawalAmount), }); expect(lockedEtherAmountAfterClaim).to.equal( - lockedEtherAmountBeforeWithdrawal - amountWithRewards, + lockedEtherAmountBeforeWithdrawal - finalizedWithdrawalAmount, "Locked ether amount after claim", ); diff --git a/test/integration/core/lido-storage.integration.ts b/test/integration/core/lido-storage.integration.ts index 9d58e556d1..2275f8d1b6 100644 --- a/test/integration/core/lido-storage.integration.ts +++ b/test/integration/core/lido-storage.integration.ts @@ -45,3 +45,37 @@ describe("Integration: Lido storage slots after V3", () => { } }); }); + +describe("Integration: Lido storage slots after V4 (SRv3)", () => { + let ctx: ProtocolContext; + let snapshot: string; + + let stEthHolder: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + before(async () => { + ctx = await getProtocolContext(); + + [stEthHolder, stranger] = await ethers.getSigners(); + await updateBalance(stranger.address, ether("100000000")); + await updateBalance(stEthHolder.address, ether("100000000")); + + snapshot = await Snapshot.take(); + }); + + after(async () => await Snapshot.restore(snapshot)); + + it("Should have old storage slots zeroed in V4", async () => { + const lido = ctx.contracts.lido; + + const oldStorageSlots = { + CL_BALANCE_AND_CL_VALIDATORS_POSITION: streccak("lido.Lido.clBalanceAndClValidators"), + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION: streccak("lido.Lido.bufferedEtherAndDepositedValidators"), + }; + + for (const [key, value] of Object.entries(oldStorageSlots)) { + const storageValue = await ethers.provider.getStorage(lido, value); + expect(storageValue).to.equal(0n, `${key} storage slot at ${value} is not empty`); + } + }); +}); diff --git a/test/integration/core/negative-rebase.integration.ts b/test/integration/core/negative-rebase.integration.ts index ccd991d072..e43dc906d7 100644 --- a/test/integration/core/negative-rebase.integration.ts +++ b/test/integration/core/negative-rebase.integration.ts @@ -4,8 +4,14 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether } from "lib"; -import { getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { ether, impersonate } from "lib"; +import { + getDepositedSinceLastReport, + getProtocolContext, + ProtocolContext, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, +} from "lib/protocol"; import { Snapshot } from "test/suite"; @@ -55,65 +61,155 @@ describe("Integration: Negative rebase", () => { return exited; }; + const ensureAtLeastOneStoredReport = async () => { + const reportDataCount = await ctx.contracts.oracleReportSanityChecker.getReportDataCount(); + if (reportDataCount === 0n) { + await reportWithEffectiveClDiff(ctx, 0n, { + skipWithdrawals: true, + excludeVaultsBalances: true, + }); + } + }; + it("Should store correctly exited validators count", async () => { const { locator, oracleReportSanityChecker } = ctx.contracts; - expect((await locator.oracleReportSanityChecker()) == oracleReportSanityChecker.address); + expect(await locator.oracleReportSanityChecker()).to.equal(oracleReportSanityChecker.address); const currentExited = await exitedValidatorsCount(); const reportExitedValidators = currentExited.get(1n) ?? 0n; - - // On upgrade OracleReportSanityChecker is new and not provisioned thus has no reports - if ((await oracleReportSanityChecker.getReportDataCount()) === 0n) { - await report(ctx, { - clDiff: ether("0"), - skipWithdrawals: true, - clAppearedValidators: 0n, - }); - } - - await report(ctx, { - clDiff: ether("0"), + await ensureAtLeastOneStoredReport(); + const reportDataCountBefore = await oracleReportSanityChecker.getReportDataCount(); + + // On Hoodi after the SRv3 migration, Lido has pending deposits. + // `report(ctx, { clDiff: 0 })` means raw postCL - preCL = 0, which looks + // to the sanity checker like a CL decrease by the amount of those deposits. + // This report must be effective-neutral relative to principal CL balance. + await reportWithEffectiveClDiff(ctx, 0n, { skipWithdrawals: true, clAppearedValidators: 0n, + reportElVault: false, stakingModuleIdsWithNewlyExitedValidators: [1n], numExitedValidatorsByStakingModule: [reportExitedValidators + 2n], }); - const count = await oracleReportSanityChecker.getReportDataCount(); - expect(count).to.be.greaterThanOrEqual(2); - - const lastReportData = await oracleReportSanityChecker.reportData(count - 1n); - const beforeLastReportData = await oracleReportSanityChecker.reportData(count - 2n); + const reportDataCountAfter = await oracleReportSanityChecker.getReportDataCount(); + expect(reportDataCountAfter).to.equal(reportDataCountBefore + 1n); - const lastExitedTotal = Array.from(currentExited.values()).reduce((acc, val) => acc + val, 0n); + const updatedExited = await exitedValidatorsCount(); + const updatedExitedForModule = updatedExited.get(1n) ?? 0n; + const totalExitedBefore = Array.from(currentExited.values()).reduce((acc, val) => acc + val, 0n); + const totalExitedAfter = Array.from(updatedExited.values()).reduce((acc, val) => acc + val, 0n); - expect(lastReportData.totalExitedValidators).to.be.equal(lastExitedTotal + 2n); - expect(beforeLastReportData.totalExitedValidators).to.be.equal(lastExitedTotal); + expect(updatedExitedForModule).to.be.equal(reportExitedValidators + 2n); + expect(totalExitedAfter).to.be.equal(totalExitedBefore + 2n); }); - // 56 weeks of negative rebases is too much for the test and it breaks with the SocketError: other side closed - it.skip("Should store correctly many negative rebases", async () => { + it("Should store correctly many negative rebases", async () => { const { locator, oracleReportSanityChecker } = ctx.contracts; - expect((await locator.oracleReportSanityChecker()) == oracleReportSanityChecker.address); + expect(await locator.oracleReportSanityChecker()).to.equal(oracleReportSanityChecker.address); + + // After migration, the sanity checker stores the current withdrawal vault balance as baseline. + // The reset report must not report the withdrawal vault as 0, otherwise `_getCLWithdrawals` + // fails before the negative rebase check. + await resetCLBalanceDecreaseWindow(ctx, { + excludeVaultsBalances: false, + reportElVault: false, + }); + await ensureAtLeastOneStoredReport(); + + const REPORTS_REPEATED = 10; + const CL_DIFF_PER_REPORT = -1000000000n; // effective -1 gwei per report relative to principal CL balance + let reportDataCount = await oracleReportSanityChecker.getReportDataCount(); + expect(reportDataCount).to.be.gt(0n); + let previousCLBalance = (await oracleReportSanityChecker.reportData(reportDataCount - 1n)).clBalance; - const REPORTS_REPEATED = 56; - const SINGLE_REPORT_DECREASE = -1000000000n; for (let i = 0; i < REPORTS_REPEATED; i++) { - await report(ctx, { - clDiff: SINGLE_REPORT_DECREASE * BigInt(i + 1), + const depositedSinceLastReport = await getDepositedSinceLastReport(ctx); + + await reportWithEffectiveClDiff(ctx, CL_DIFF_PER_REPORT, { skipWithdrawals: true, - reportWithdrawalsVault: false, reportElVault: false, }); + + reportDataCount += 1n; + const reportCountAfter = await oracleReportSanityChecker.getReportDataCount(); + expect(reportCountAfter).to.equal(reportDataCount); + + const lastReportData = await oracleReportSanityChecker.reportData(reportDataCount - 1n); + const expectedCurrentCLBalance = previousCLBalance + depositedSinceLastReport + CL_DIFF_PER_REPORT; + + expect(lastReportData.clBalance).to.equal(expectedCurrentCLBalance); + expect(lastReportData.clBalance).to.be.lt(previousCLBalance + depositedSinceLastReport); + previousCLBalance = lastReportData.clBalance; + } + }); + + // Tests the sliding window CL decrease check by calling checkAccountingOracleReport + // directly with zero deposits/withdrawals (so adjustedBase == raw baseline balance). + it("Should revert with IncorrectCLBalanceDecrease on gradual negative rebases", async () => { + const { oracleReportSanityChecker, accounting, withdrawalVault } = ctx.contracts; + + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const withdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault); + + const reportDataCount = await oracleReportSanityChecker.getReportDataCount(); + let currentBalance = + reportDataCount === 0n + ? ether("1000000") + : (await oracleReportSanityChecker.reportData(reportDataCount - 1n)).clBalance; + + // This direct call bypasses helper report(), so it must pass the reported withdrawal vault balance itself. + // On Hoodi after migration, the sanity checker baseline is non-zero; passing 0 here masks + // the intended IncorrectCLBalanceDecrease check with a withdrawal vault balance error. + const reportFromAccounting = (preBalance: bigint, postBalance: bigint) => + oracleReportSanityChecker + .connect(accountingSigner) + .checkAccountingOracleReport( + 24n * 60n * 60n, + preBalance, + 0n, + postBalance, + 0n, + withdrawalVaultBalance, + 0n, + 0n, + 0n, + 0n, + ); + + // REPORTS_WINDOW in contract is 36 (private constant, no getter). + // Fill window + 1 neutral data points to fully control the baseline. + const REPORTS_WINDOW = 36; + for (let i = 0; i < REPORTS_WINDOW + 1; ++i) { + await reportFromAccounting(currentBalance, currentBalance); } - const count = await oracleReportSanityChecker.getReportDataCount(); - expect(count).to.be.greaterThanOrEqual(REPORTS_REPEATED + 1); - for (let i = count - 1n, j = REPORTS_REPEATED - 1; i >= 0 && j >= 0; --i, --j) { - const reportData = await oracleReportSanityChecker.reportData(i); - expect(reportData.negativeCLRebaseWei).to.be.equal(-1n * SINGLE_REPORT_DECREASE * BigInt(j + 1)); + // Derive the number of 1% decreases that fit under the limit from the actual config. + const limits = await oracleReportSanityChecker.getOracleReportLimits(); + const maxDecreaseBP = limits.maxCLBalanceDecreaseBP; + const DECREASE_PER_REPORT_BP = 100n; // 1% + + let passingReports = 0; + let cumulativeBalanceBP = 10_000n; + while (true) { + const next = cumulativeBalanceBP - (cumulativeBalanceBP * DECREASE_PER_REPORT_BP) / 10_000n; + if (10_000n - next > maxDecreaseBP) break; + cumulativeBalanceBP = next; + passingReports++; } + + for (let i = 0; i < passingReports; ++i) { + const decreasedBalance = currentBalance - (currentBalance * DECREASE_PER_REPORT_BP) / 10_000n; + await reportFromAccounting(currentBalance, decreasedBalance); + currentBalance = decreasedBalance; + } + + const nextDecreasedBalance = currentBalance - (currentBalance * DECREASE_PER_REPORT_BP) / 10_000n; + await expect(reportFromAccounting(currentBalance, nextDecreasedBalance)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectCLBalanceDecrease", + ); }); }); diff --git a/test/integration/core/second-opinion.integration.ts b/test/integration/core/second-opinion.integration.ts index 919ef4a0aa..ccb698f81c 100644 --- a/test/integration/core/second-opinion.integration.ts +++ b/test/integration/core/second-opinion.integration.ts @@ -3,18 +3,20 @@ import { ethers } from "hardhat"; import { SecondOpinionOracle__Mock } from "typechain-types"; -import { ether, impersonate, log, ONE_GWEI } from "lib"; -import { getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { ether, log, ONE_GWEI } from "lib"; +import { + depositValidatorsWithoutReport, + getProtocolContext, + ProtocolContext, + report, + resetCLBalanceDecreaseWindow, +} from "lib/protocol"; import { bailOnFailure, Snapshot } from "test/suite"; const AMOUNT = ether("100"); -const MAX_DEPOSIT = 150n; -const CURATED_MODULE_ID = 1n; const INITIAL_REPORTED_BALANCE = ether("32") * 3n; // 32 ETH * 3 validators -const ZERO_HASH = new Uint8Array(32).fill(0); - // Diff amount is 10% of total supply function getDiffAmount(totalSupply: bigint): bigint { return (totalSupply / 10n / ONE_GWEI) * ONE_GWEI; @@ -34,7 +36,7 @@ describe("Integration: Second opinion", () => { snapshot = await Snapshot.take(); - const { lido, depositSecurityModule, oracleReportSanityChecker } = ctx.contracts; + const { lido, oracleReportSanityChecker } = ctx.contracts; const { chainId } = await ethers.provider.getNetwork(); // Sepolia-specific initialization @@ -51,8 +53,9 @@ describe("Integration: Second opinion", () => { await bepoliaToken.connect(bepiloaSigner).transfer(adapterAddr, BEPOLIA_TO_TRANSFER); } - const dsmSigner = await impersonate(depositSecurityModule.address, AMOUNT); - await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, CURATED_MODULE_ID, ZERO_HASH); + // On Hoodi after SRv3 allocation, a raw router deposit into NOR can return `ZeroDeposits()` + // unless the test first prepares Lido buffered ETH and module deposit limits. + await depositValidatorsWithoutReport(ctx, 1n); secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock", []); const soAddress = await secondOpinion.getAddress(); @@ -62,19 +65,26 @@ describe("Integration: Second opinion", () => { .connect(agentSigner) .grantRole(await oracleReportSanityChecker.SECOND_OPINION_MANAGER_ROLE(), agentSigner.address); - let { beaconBalance } = await lido.getBeaconStat(); + let balanceStats = await lido.getBalanceStats(); + let clBalance = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; // Report initial balances if TVL is zero - if (beaconBalance === 0n) { + if (clBalance === 0n) { await report(ctx, { clDiff: INITIAL_REPORTED_BALANCE, clAppearedValidators: 3n, excludeVaultsBalances: true, }); - beaconBalance = (await lido.getBeaconStat()).beaconBalance; + balanceStats = await lido.getBalanceStats(); + clBalance = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; } - totalSupply = beaconBalance; - await oracleReportSanityChecker.connect(agentSigner).setSecondOpinionOracleAndCLBalanceUpperMargin(soAddress, 74n); + + // Normalize CL decrease window and consume pending deposits to make + // second-opinion checks deterministic across different scratch states. + await resetCLBalanceDecreaseWindow(ctx); + + balanceStats = await lido.getBalanceStats(); + totalSupply = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; }); beforeEach(bailOnFailure); diff --git a/test/integration/core/withdrawal-edge-cases.integration.ts b/test/integration/core/withdrawal-edge-cases.integration.ts index 7c83e1106b..399470a50f 100644 --- a/test/integration/core/withdrawal-edge-cases.integration.ts +++ b/test/integration/core/withdrawal-edge-cases.integration.ts @@ -1,4 +1,5 @@ import { expect } from "chai"; +import { ZeroAddress } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; @@ -6,8 +7,19 @@ import { setBalance, time } from "@nomicfoundation/hardhat-network-helpers"; import { Lido, WithdrawalQueueERC721 } from "typechain-types"; -import { ether, findEventsWithInterfaces } from "lib"; -import { finalizeWQViaSubmit, getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { certainAddress, ether, findEventsWithInterfaces, impersonate, toGwei } from "lib"; +import { + buildModuleAccountingReportParams, + depositValidatorsWithoutReport, + finalizeWQViaSubmit, + getProtocolContext, + ProtocolContext, + report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, +} from "lib/protocol"; +import { adjustReportModuleBalances } from "lib/protocol/helpers/accounting"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; @@ -19,6 +31,60 @@ describe("Integration: Withdrawal edge cases", () => { let holder: HardhatEthersSigner; let lido: Lido; let wq: WithdrawalQueueERC721; + const DEPOSITS_RESERVE_TARGET = ether("25"); + + const assertBufferAllocationInvariants = async () => { + const buffered = await lido.getBufferedEther(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositable = await lido.getDepositableEther(); + const unfinalized = await wq.unfinalizedStETH(); + + expect(depositsReserveTarget).to.equal(DEPOSITS_RESERVE_TARGET, "Deposits reserve target mismatch"); + expect(depositsReserve).to.be.lte(buffered, "Deposits reserve should not exceed buffered ether"); + expect(depositsReserve).to.be.lte(depositsReserveTarget, "Deposits reserve should not exceed target"); + expect(depositable).to.equal(buffered - withdrawalsReserve, "Depositable should equal buffered minus reserve"); + expect(withdrawalsReserve).to.be.lte(unfinalized, "Reserve should not exceed unfinalized withdrawals demand"); + expect(withdrawalsReserve).to.be.lte(buffered, "Reserve should not exceed buffered ether"); + }; + + const reportWithEffectiveClDiffUsingCurrentModuleBalances = async ( + effectiveClDiff: bigint, + skipWithdrawals = false, + ) => { + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport, depositedSinceLastReport } = + await ctx.contracts.lido.getBalanceStats(); + const postCLBalanceWei = + clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport + depositedSinceLastReport + effectiveClDiff; + + await reportWithEffectiveClDiff(ctx, effectiveClDiff, { + excludeVaultsBalances: true, + skipWithdrawals, + ...adjustReportModuleBalances(await buildModuleAccountingReportParams(ctx), toGwei(postCLBalanceWei)), + }); + }; + + const activateDepositedValidators = async (depositsCount: bigint) => { + await depositValidatorsWithoutReport(ctx, depositsCount); + + const { lido: lidoContract } = ctx.contracts; + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport, depositedSinceLastReport } = + await lidoContract.getBalanceStats(); + + const validatorsDeltaGweiByModule = new Map([[NOR_MODULE_ID, toGwei(depositedSinceLastReport)]]); + const postCLBalanceWei = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport + depositedSinceLastReport; + + await report(ctx, { + clDiff: depositedSinceLastReport, + excludeVaultsBalances: true, + skipWithdrawals: true, + ...adjustReportModuleBalances( + await buildModuleAccountingReportParams(ctx, { validatorsDeltaGweiByModule }), + toGwei(postCLBalanceWei), + ), + }); + }; before(async () => { ctx = await getProtocolContext(); @@ -31,6 +97,9 @@ describe("Integration: Withdrawal edge cases", () => { await setBalance(holder.address, ether("1000000")); await finalizeWQViaSubmit(ctx); + + const agent = await ctx.getSigner("agent"); + await lido.connect(agent).setDepositsReserveTarget(DEPOSITS_RESERVE_TARGET); }); after(async () => await Snapshot.restore(snapshot)); @@ -39,6 +108,8 @@ describe("Integration: Withdrawal edge cases", () => { beforeEach(async () => (originalState = await Snapshot.take())); afterEach(async () => await Snapshot.restore(originalState)); it("Should handle bunker mode with multiple batches", async () => { + await resetCLBalanceDecreaseWindow(ctx); + const amount = ether("100"); const withdrawalAmount = ether("10"); @@ -46,12 +117,14 @@ describe("Integration: Withdrawal edge cases", () => { await lido.connect(holder).approve(wq.target, amount); await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); + + await activateDepositedValidators(1n); const stethInitialBalance = await lido.balanceOf(holder.address); - // reportBurner: false — pre-existing Burner cover/non-cover shares on the fork would - // burn during the report and produce a positive rebase that masks the clDiff we set. - await report(ctx, { clDiff: ether("-1"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-1")); + await assertBufferAllocationInvariants(); const stethFirstNegativeReportBalance = await lido.balanceOf(holder.address); @@ -64,7 +137,8 @@ describe("Integration: Withdrawal edge cases", () => { const [firstRequestEvent] = findEventsWithInterfaces(firstRequestReceipt!, "WithdrawalRequested", [wq.interface]); const firstRequestId = firstRequestEvent!.args.requestId; - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); const stethSecondNegativeReportBalance = await lido.balanceOf(holder.address); @@ -87,7 +161,8 @@ describe("Integration: Withdrawal edge cases", () => { expect(firstStatus.amountOfStETH).to.equal(secondStatus.amountOfStETH); expect(firstStatus.amountOfShares).to.be.lt(secondStatus.amountOfShares); - await report(ctx, { clDiff: ether("0.0001"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; @@ -98,7 +173,6 @@ describe("Integration: Withdrawal edge cases", () => { const lastCheckpointIndex = await wq.getLastCheckpointIndex(); const hints = await wq.findCheckpointHints([...requestIds], 1, lastCheckpointIndex); - const claimTx = await wq.connect(holder).claimWithdrawals([...requestIds], [...hints]); const claimReceipt = await claimTx.wait(); @@ -115,14 +189,20 @@ describe("Integration: Withdrawal edge cases", () => { afterEach(async () => await Snapshot.restore(originalState)); it("should handle missed oracle report", async () => { + const whale = await impersonate(certainAddress("provision:eth:whale"), ether("1000000")); + await lido.connect(whale).submit(ZeroAddress, { value: DEPOSITS_RESERVE_TARGET + ether("5") }); + const amount = ether("100"); expect(await lido.balanceOf(holder.address)).to.equal(0); // Submit initial stETH deposit await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true, reportBurner: false }); + await activateDepositedValidators(3n); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.001")); + await assertBufferAllocationInvariants(); // Create withdrawal request await lido.connect(holder).approve(wq.target, amount); @@ -143,7 +223,8 @@ describe("Integration: Withdrawal edge cases", () => { expect(status.isFinalized).to.be.false; // Submit next report to finalize request - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.001")); + await assertBufferAllocationInvariants(); // Verify request finalized const [finalizedStatus] = await wq.getWithdrawalStatus([...requestIds]); @@ -170,15 +251,20 @@ describe("Integration: Withdrawal edge cases", () => { after(async () => await Snapshot.restore(originalState)); it("should handle first rebase correctly", async () => { + await resetCLBalanceDecreaseWindow(ctx); + const amount = ether("100"); expect(await lido.balanceOf(holder.address)).to.equal(0); await lido.connect(holder).approve(wq.target, amount); await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); // First rebase - positive - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true, reportBurner: false }); + await activateDepositedValidators(1n); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0000001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; // Create first withdrawal request @@ -190,7 +276,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle second (negative) rebase correctly", async () => { // Second rebase - negative - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.true; // Verify first request finalized @@ -208,7 +295,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle third (negative) rebase correctly", async () => { // Third rebase - negative - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.true; // Create third withdrawal request @@ -220,7 +308,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle fourth (positive) rebase correctly", async () => { // Fourth rebase - positive - await report(ctx, { clDiff: ether("0.0000001"), excludeVaultsBalances: true, reportBurner: false }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0000001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; // Verify all requests finalized @@ -239,9 +328,13 @@ describe("Integration: Withdrawal edge cases", () => { // Verify claimed amounts const claimEvents = findEventsWithInterfaces(claimReceipt!, "WithdrawalClaimed", [wq.interface]); - expect(claimEvents![0].args.amountOfETH).to.be.lt(withdrawalAmount); - expect(claimEvents![1].args.amountOfETH).to.be.lt(withdrawalAmount); - expect(claimEvents![2].args.amountOfETH).to.equal(withdrawalAmount); + const firstClaimed = claimEvents![0].args.amountOfETH; + const secondClaimed = claimEvents![1].args.amountOfETH; + const thirdClaimed = claimEvents![2].args.amountOfETH; + + expect(firstClaimed).to.be.lt(withdrawalAmount); + expect(secondClaimed).to.be.lt(withdrawalAmount); + expect(thirdClaimed).to.equal(withdrawalAmount); }); }); }); diff --git a/test/integration/core/withdrawal-happy-path.integration.ts b/test/integration/core/withdrawal-happy-path.integration.ts index 94cb5fed9b..467282b63f 100644 --- a/test/integration/core/withdrawal-happy-path.integration.ts +++ b/test/integration/core/withdrawal-happy-path.integration.ts @@ -42,6 +42,10 @@ describe("Integration: Withdrawal happy path", () => { .grantPermission(agentSigner.address, lido.address, await lido.STAKING_CONTROL_ROLE()); await lido.connect(agentSigner).removeStakingLimit(); await lido.connect(holder).submit(ethers.ZeroAddress, { value: ether("10000") }); + await lido.connect(agentSigner).setDepositsReserveTarget(ether("100")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserveTarget()).to.equal(ether("100")); + expect(await lido.getDepositsReserve()).to.equal(ether("100")); expect(await lido.balanceOf(holder.address)).to.be.gte(REQUESTS_SUM); // Get initial state @@ -53,6 +57,18 @@ describe("Integration: Withdrawal happy path", () => { const lastFinalizedRequestId = await wq.getLastFinalizedRequestId(); const lastCheckpointIndexBefore = await wq.getLastCheckpointIndex(); const unfinalizedSteth = await wq.unfinalizedStETH(); + const bufferedEtherBeforeRequest = await lido.getBufferedEther(); + const withdrawalsReserveBeforeRequest = await lido.getWithdrawalsReserve(); + const depositableBeforeRequest = await lido.getDepositableEther(); + + expect(depositableBeforeRequest).to.equal( + bufferedEtherBeforeRequest - withdrawalsReserveBeforeRequest, + "Depositable should equal buffered minus withdrawals reserve", + ); + expect(withdrawalsReserveBeforeRequest).to.be.lte( + unfinalizedSteth, + "Withdrawals reserve should not exceed unfinalized demand", + ); const preReportRequestShares = await lido.getSharesByPooledEth(REQUEST_AMOUNT); @@ -115,6 +131,14 @@ describe("Integration: Withdrawal happy path", () => { reportTx = (await report(ctx, { clDiff: ether("0.00000000000001") })).reportTx; } + const bufferedEtherAfterReport = await lido.getBufferedEther(); + const withdrawalsReserveAfterReport = await lido.getWithdrawalsReserve(); + const depositableAfterReport = await lido.getDepositableEther(); + expect(depositableAfterReport).to.equal( + bufferedEtherAfterReport - withdrawalsReserveAfterReport, + "Depositable should stay consistent after report processing", + ); + const [parsedFinalizedEvent] = findEventsWithInterfaces(reportReceipt!, "WithdrawalsFinalized", [wq.interface]); expect(parsedFinalizedEvent?.args.from).to.equal(lastFinalizedRequestId + 1n); expect(parsedFinalizedEvent?.args.to).to.equal(REQUESTS_COUNT + lastRequestId); diff --git a/test/integration/topup/topup-gas.integration.ts b/test/integration/topup/topup-gas.integration.ts new file mode 100644 index 0000000000..c5df31907b --- /dev/null +++ b/test/integration/topup/topup-gas.integration.ts @@ -0,0 +1,197 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { SSZValidatorsMerkleTree, StakingModuleV2__MockForStakingRouter, TopUpGateway } from "typechain-types"; + +import { WithdrawalCredentialsType } from "lib"; +import { addressToWC, generateBeaconHeader, generateValidator, setBeaconBlockRoot, Validator } from "lib/pdg"; +import { getProtocolContext, ProtocolContext } from "lib/protocol"; +import { prepareLocalMerkleTree } from "lib/top-ups"; + +import { Snapshot } from "test/suite"; + +/** + * Gas measurement integration test for TopUpGateway.topUp(). + * + * Uses a mock V2 staking module (WC 0x02) added to the real StakingRouter. + * Merkle proofs are built locally via SSZValidatorsMerkleTree. + * Validators effectiveBalance = targetBalanceGwei → topUpLimits = 0, no depositable ether needed. + * + * To find the maximum batch size, change NUM_VALIDATORS and rerun. + */ +describe("Integration: TopUpGateway gas measurement", () => { + let ctx: ProtocolContext; + let topUpGateway: TopUpGateway; + let mockModuleV2: StakingModuleV2__MockForStakingRouter; + let moduleId: bigint; + + let caller: HardhatEthersSigner; + + const MAX_BLOCK_GAS = 16_000_000n; + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + const SLOT = 3200; // epoch = 100 + + // *** Change this value to find the maximum batch size *** + const NUM_VALIDATORS = 100; + + let targetBalanceGwei: bigint; + + // Tree state + let sszMerkleTree: SSZValidatorsMerkleTree; + let firstValidatorLeafIndex: bigint; + + // Pre-built data + let validators: Validator[]; + let allValidatorIndices: number[]; + let allProofValidators: string[][]; + let childBlockTimestamp: number; + let beaconBlockHeader: ReturnType; + + let originalState: string; + + before(async () => { + ctx = await getProtocolContext(); + originalState = await Snapshot.take(); + + [, caller] = await ethers.getSigners(); + const [deployer] = await ethers.getSigners(); + + const { stakingRouter } = ctx.contracts; + + // ========================================= + // Get TopUpGateway from LidoLocator + // ========================================= + const topUpGatewayAddress = await ctx.contracts.locator.topUpGateway(); + topUpGateway = await ethers.getContractAt("TopUpGateway", topUpGatewayAddress); + + targetBalanceGwei = BigInt(await topUpGateway.getTargetBalanceGwei()); + + // ========================================= + // Deploy mock V2 module and add to StakingRouter + // ========================================= + mockModuleV2 = await ethers.deployContract("StakingModuleV2__MockForStakingRouter"); + + const agentSigner = await ctx.getSigner("agent"); + + const STAKING_MODULE_MANAGE_ROLE = await stakingRouter.STAKING_MODULE_MANAGE_ROLE(); + await stakingRouter.connect(agentSigner).grantRole(STAKING_MODULE_MANAGE_ROLE, agentSigner.address); + + const modulesCountBefore = await stakingRouter.getStakingModulesCount(); + moduleId = modulesCountBefore + 1n; + + await stakingRouter.connect(agentSigner).addStakingModule("MockV2TopUp", await mockModuleV2.getAddress(), { + stakeShareLimit: 10000, + priorityExitShareThreshold: 10000, + stakingModuleFee: 500, + treasuryFee: 500, + maxDepositsPerBlock: 150, + minDepositBlockDistance: 25, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCountBefore + 1n); + + // ========================================= + // Grant roles on TopUpGateway + // ========================================= + const TOP_UP_ROLE = await topUpGateway.TOP_UP_ROLE(); + const MANAGE_LIMITS_ROLE = await topUpGateway.MANAGE_LIMITS_ROLE(); + + await topUpGateway.connect(agentSigner).grantRole(TOP_UP_ROLE, caller.address); + await topUpGateway.connect(agentSigner).grantRole(MANAGE_LIMITS_ROLE, deployer.address); + await topUpGateway.connect(deployer).setMaxValidatorsPerTopUp(NUM_VALIDATORS); + + // ========================================= + // Build SSZValidatorsMerkleTree with NUM_VALIDATORS + // ========================================= + const localTree = await prepareLocalMerkleTree(); + sszMerkleTree = localTree.stateTree; + firstValidatorLeafIndex = localTree.firstValidatorLeafIndex; + + const withdrawalCredentials = addressToWC(await ctx.contracts.withdrawalVault.getAddress(), 2); + + validators = []; + allValidatorIndices = []; + + for (let i = 0; i < NUM_VALIDATORS; i++) { + const v = generateValidator(withdrawalCredentials); + + v.container.effectiveBalance = targetBalanceGwei; // → topUpLimit = 0 + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 2n; // < epoch(SLOT=3200) = 100 + v.container.exitEpoch = FAR_FUTURE_EPOCH; + v.container.withdrawableEpoch = FAR_FUTURE_EPOCH; + + await sszMerkleTree.addValidatorsLeaf(v.container); + validators.push(v); + + const leafCount = await sszMerkleTree.leafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + allValidatorIndices.push(validatorIndex); + } + + // Commit state root to EIP-4788 + const stateRoot = await sszMerkleTree.getStateRoot(); + beaconBlockHeader = generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(beaconBlockHeader); + childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + // Build all proofs: validator[i] → state_root → beacon_block_root + allProofValidators = await Promise.all( + allValidatorIndices.map(async (vi) => { + const validatorProof = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(vi)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(beaconBlockHeader); + return [...validatorProof, ...headerMerkle.proof]; + }), + ); + }); + + after(async () => await Snapshot.restore(originalState)); + + it(`should measure gas for topUp with ${NUM_VALIDATORS} validators`, async () => { + await ethers.provider.send("evm_increaseTime", [1]); + await ethers.provider.send("evm_mine", []); + + const topUpData = { + moduleId, + keyIndices: allValidatorIndices.map((_, i) => BigInt(i)), + operatorIds: allValidatorIndices.map(() => 0n), + validatorIndices: allValidatorIndices.map((vi) => BigInt(vi)), + beaconRootData: { + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + }, + validatorWitness: validators.map((v, i) => ({ + proofValidator: allProofValidators[i], + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + exitEpoch: v.container.exitEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + })), + pendingBalanceGwei: allValidatorIndices.map(() => 0n), + }; + + const tx = await topUpGateway.connect(caller).topUp(topUpData); + const receipt = await tx.wait(); + + const gasUsed = receipt!.gasUsed; + const fitsInBlock = gasUsed < MAX_BLOCK_GAS; + const perValidator = gasUsed / BigInt(NUM_VALIDATORS); + + console.log(`\n TopUpGateway.topUp() with ${NUM_VALIDATORS} validators:`); + console.log(` Gas used: ${Number(gasUsed).toLocaleString()}`); + console.log(` Per validator: ${Number(perValidator).toLocaleString()}`); + console.log( + ` Fits in block: ${fitsInBlock ? "YES" : "NO"} (limit: ${Number(MAX_BLOCK_GAS).toLocaleString()})`, + ); + + expect(gasUsed).to.be.greaterThan(0n); + }); +}); diff --git a/test/integration/validators-exit-bus-submit-and-trigger-exits.ts b/test/integration/validators-exit-bus-submit-and-trigger-exits.ts index 2c107987ec..8d272714ff 100644 --- a/test/integration/validators-exit-bus-submit-and-trigger-exits.ts +++ b/test/integration/validators-exit-bus-submit-and-trigger-exits.ts @@ -45,7 +45,6 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { let refundRecipient: HardhatEthersSigner; const dataFormat = 1; - const exitRequestsLength = 5; const validatorsExitRequests: ExitRequest[] = [ { moduleId: 1, nodeOpId: 10, valIndex: 100, valPubkey: "0x" + "11".repeat(48) }, @@ -120,7 +119,8 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { it("should submit hash and data if veb is resumed", async () => { // Configure exit requests limits - const MAX_LIMIT = 100; + // Set a high enough balance limit (in ETH) to cover test requests + const MAX_LIMIT = 1_000_000; await veb.connect(agent).setExitRequestLimit(MAX_LIMIT, 1, 48); // Resume the contract await veb.connect(resumer).resume(); @@ -130,6 +130,7 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { .to.emit(veb, "RequestsHashSubmitted") .withArgs(exitRequestsHash); + const limitBefore = await veb.getExitRequestLimitFullInfo(); const tx = await veb.submitExitRequestsData(exitRequest); const receipt = await tx.wait(); const block = await receipt?.getBlock(); @@ -148,7 +149,8 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { // check limit const exitLimitInfo = await veb.getExitRequestLimitFullInfo(); const currentExitRequestsLimit = exitLimitInfo[4]; - expect(currentExitRequestsLimit).to.equal(MAX_LIMIT - exitRequestsLength); + // Limit should decrease after processing + expect(currentExitRequestsLimit).to.be.lessThan(limitBefore[4]); }); it("should trigger exits", async () => { diff --git a/test/integration/vaults/bad-debt.integration.ts b/test/integration/vaults/bad-debt.integration.ts index 56bb8047fc..faf7cd4343 100644 --- a/test/integration/vaults/bad-debt.integration.ts +++ b/test/integration/vaults/bad-debt.integration.ts @@ -707,11 +707,12 @@ describe("Integration: Vault with bad debt", () => { expect(await vaultHub.badDebtToInternalize()).to.be.equal(badDebtShares, "Bad debt to internalize is the same"); // simulate the report at the refSlot (like the Oracle would do) - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const simulationAtRefSlot = await simulateReport(ctx, { refSlot: nextRefSlot, - beaconValidators, - clBalance: beaconBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, }); @@ -723,8 +724,8 @@ describe("Integration: Vault with bad debt", () => { expect( await simulateReport(ctx, { refSlot: (await hashConsensus.getCurrentFrame()).refSlot, - beaconValidators, - clBalance: beaconBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, }), diff --git a/test/integration/vaults/roles/accounting.roles.integration.ts b/test/integration/vaults/roles/accounting.roles.integration.ts index 3590749265..4bb743fcf6 100644 --- a/test/integration/vaults/roles/accounting.roles.integration.ts +++ b/test/integration/vaults/roles/accounting.roles.integration.ts @@ -45,8 +45,8 @@ describe("Integration: Accounting Roles and Access Control", () => { const report = { timestamp: 0n, timeElapsed: 0n, - clValidators: 0n, - clBalance: 0n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, diff --git a/test/integration/vaults/sanity-checker-bad-debt.integration.ts b/test/integration/vaults/sanity-checker-bad-debt.integration.ts index c1c1b93254..2e68d0620f 100644 --- a/test/integration/vaults/sanity-checker-bad-debt.integration.ts +++ b/test/integration/vaults/sanity-checker-bad-debt.integration.ts @@ -5,18 +5,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { advanceChainTime, ether, impersonate, LIMITER_PRECISION_BASE } from "lib"; +import { ether, impersonate, LIMITER_PRECISION_BASE, ONE_GWEI } from "lib"; import { + getNextReportContext, getProtocolContext, ProtocolContext, queueBadDebtInternalization, removeStakingLimit, report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, + seedProtocolPendingBaseline, setupLidoForVaults, setupVaultWithBadDebt, upDefaultTierShareLimit, - waitNextAvailableReportTime, } from "lib/protocol"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; import { SHARE_RATE_PRECISION } from "test/suite/constants"; @@ -268,17 +272,17 @@ describe("Integration: Sanity checker with bad debt internalization", () => { describe("CL balance decrease check with bad debt internalization", () => { it("Small CL balance decrease", async () => { + await resetCLBalanceDecreaseWindow(ctx, { waitNextReportTime: true }); + const stateBefore = await captureState(); // Queue bad debt internalization - const { stakingVault, badDebtShares } = await setupVaultWithBadDebt(ctx, owner, nodeOperator); - await queueBadDebtInternalization(ctx, stakingVault, badDebtShares); + await setupVaultWithBadDebt(ctx, owner, nodeOperator); // Small negative CL diff (within allowed limits) const smallDecrease = ether("-1"); - await report(ctx, { - clDiff: smallDecrease, + await reportWithEffectiveClDiff(ctx, smallDecrease, { excludeVaultsBalances: true, skipWithdrawals: true, // Burner state on the fork can hold pending cover/non-cover shares; burning them @@ -297,29 +301,19 @@ describe("Integration: Sanity checker with bad debt internalization", () => { // Bad debt internalization does not affect calculation of dynamic slashing limit // so the report with max allowed CL decrease should still pass with bad debt internalization - const { oracleReportSanityChecker, lido, stakingRouter } = ctx.contracts; + const { oracleReportSanityChecker, lido } = ctx.contracts; - // Time travel to 54 days to invalidate all current penalties and get max slashing limits - const DAYS_54_IN_SECONDS = 54n * 24n * 60n * 60n; - await advanceChainTime(DAYS_54_IN_SECONDS); + // Submit a neutral report to establish the current CL balance baseline await report(ctx); // Get current protocol state to calculate dynamic slashing limit - const { beaconValidators } = await lido.getBeaconStat(); - const moduleDigests = await stakingRouter.getAllStakingModuleDigests(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const preCLBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const limits = await oracleReportSanityChecker.getOracleReportLimits(); + const maxAllowedNegativeRebase = (preCLBalance * limits.maxCLBalanceDecreaseBP) / 10_000n; - const exitedValidators = moduleDigests.reduce((total, { summary }) => total + summary.totalExitedValidators, 0n); - const activeValidators = beaconValidators - exitedValidators; - - // maxAllowedCLRebaseNegativeSum = initialSlashingAmountPWei * 1e15 * validators + inactivityPenaltiesAmountPWei * 1e15 * validators - const ONE_PWEI = 10n ** 15n; - const maxAllowedNegativeRebase = - limits.initialSlashingAmountPWei * ONE_PWEI * activeValidators + - limits.inactivityPenaltiesAmountPWei * ONE_PWEI * activeValidators; - - // CL decrease exactly at limit minus 1 wei should pass - const clSlashing = -(maxAllowedNegativeRebase - 1n); + // Oracle reports CL balances in gwei, so keep the reported decrease below the limit after gwei rounding. + const clSlashing = -(maxAllowedNegativeRebase - ONE_GWEI); const { stakingVault, badDebtShares } = await setupVaultWithBadDebt(ctx, owner, nodeOperator); await queueBadDebtInternalization(ctx, stakingVault, badDebtShares); @@ -349,56 +343,52 @@ describe("Integration: Sanity checker with bad debt internalization", () => { describe("Annual balance increase check with bad debt internalization", () => { it("CL balance increase over limit reverts, bad debt does not compensate", async () => { - // Bad debt internalization does not affect CL balance increase check - // so even with bad debt queued, the report exceeding limit should revert + // Bad debt internalization does not affect positive CL growth checks, + // so even with bad debt queued, a report exceeding the activated-pending + // plus validators-based safety cap should revert. + + const { oracleReportSanityChecker, lido } = ctx.contracts; - const { oracleReportSanityChecker, lido, accountingOracle, hashConsensus } = ctx.contracts; + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); const { stakingVault, badDebtShares } = await setupVaultWithBadDebt(ctx, owner, nodeOperator); await queueBadDebtInternalization(ctx, stakingVault, badDebtShares); - await waitNextAvailableReportTime(ctx); // Get current protocol state - const { beaconBalance: preCLBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); - const { secondsPerSlot } = await hashConsensus.getChainConfig(); - const { currentFrameRefSlot } = await accountingOracle.getProcessingState(); - const lastRefSlot = await accountingOracle.getLastProcessingRefSlot(); - const slotElapsed = currentFrameRefSlot - lastRefSlot; - - expect(slotElapsed).to.be.gt(0n, "Some slots should have elapsed since last report"); - - // Calculate time elapsed for one frame - const timeElapsed = slotElapsed * secondsPerSlot; - - // Calculate balance increase that exceeds the limit - // The check is: (365 days * 10000 * balanceIncrease / preCLBalance) / timeElapsed > limit - // Solving : balanceIncrease > ((limit + 1) * preCLBalance * timeElapsed - 1) / (365 days * 10000) + const { reportTimeElapsed } = await getNextReportContext(ctx); const SECONDS_PER_YEAR = 365n * 24n * 60n * 60n; const MAX_BASIS_POINTS = 10000n; const maxBalanceIncrease = - ((annualBalanceIncreaseBPLimit + 1n) * preCLBalance * timeElapsed - 1n) / (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + ((annualBalanceIncreaseBPLimit * + (clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport) * + reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS) / + ONE_GWEI) * + ONE_GWEI; const stateBefore = await captureState(); expect(stateBefore.badDebtToInternalize).to.equal(badDebtShares, "Bad debt should be queued"); - // Report should revert - CL increase exceeds the limit - // Bad debt being queued does NOT compensate for the excess + // `report()` consumes the seeded pending baseline inside the same report, so the + // raw CL delta under test is the safety-cap component computed from the + // post-activation validators base. + // Bad debt still must not compensate an over-limit report. + expect(clPendingBalanceAtLastReport).to.be.gt(0n, "test precondition failed: pending baseline must be non-zero"); await expect( report(ctx, { - clDiff: maxBalanceIncrease + 10n ** 9n, // + 1 gwei to exceed limit + clDiff: maxBalanceIncrease + ONE_GWEI, excludeVaultsBalances: true, skipWithdrawals: true, - waitNextReportTime: false, }), - ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectCLBalanceIncrease"); + ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalCLBalanceIncrease"); - // Now report exactly at the limit. Should pass despite bad debt internalization + // Report exactly at the limit should pass despite bad debt internalization await report(ctx, { clDiff: maxBalanceIncrease, excludeVaultsBalances: true, skipWithdrawals: true, - waitNextReportTime: false, }); const stateAfter = await captureState(); diff --git a/test/integration/vaults/scenario/happy-path.integration.ts b/test/integration/vaults/scenario/happy-path.integration.ts index 85e39d69ed..6e5977967e 100644 --- a/test/integration/vaults/scenario/happy-path.integration.ts +++ b/test/integration/vaults/scenario/happy-path.integration.ts @@ -28,6 +28,7 @@ import { ProtocolContext, report, reportVaultDataWithProof, + reportWithEffectiveClDiff, setupLidoForVaults, } from "lib/protocol"; @@ -89,13 +90,15 @@ describe("Scenario: Staking Vaults Happy Path", () => { beforeEach(bailOnFailure); async function calculateReportParams() { - const { beaconBalance } = await ctx.contracts.lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = + await ctx.contracts.lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const { timeElapsed } = await getReportTimeElapsed(ctx); log.debug("Report time elapsed", { timeElapsed }); const gross = (TARGET_APR * TOTAL_BASIS_POINTS) / (TOTAL_BASIS_POINTS - PROTOCOL_FEE); // take into account 10% Lido fee - const elapsedProtocolReward = (beaconBalance * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; + const elapsedProtocolReward = (clBalance * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; const elapsedVaultReward = (VAULT_DEPOSIT * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; log.debug("Report values", { @@ -320,12 +323,9 @@ describe("Scenario: Staking Vaults Happy Path", () => { const { elapsedProtocolReward, elapsedVaultReward } = await calculateReportParams(); const vaultValue = await addRewards(elapsedVaultReward); - const params = { - clDiff: elapsedProtocolReward, + await reportWithEffectiveClDiff(ctx, elapsedProtocolReward, { excludeVaultsBalances: true, - } as OracleReportParams; - - await report(ctx, params); + }); expect(await vaultHub.liabilityShares(stakingVaultAddress)).to.be.equal(stakingVaultMaxMintingShares); @@ -376,14 +376,16 @@ describe("Scenario: Staking Vaults Happy Path", () => { await lido.connect(owner).approve(dashboard, await lido.getPooledEthByShares(stakingVaultMaxMintingShares)); await dashboard.connect(owner).burnShares(stakingVaultMaxMintingShares); - const { elapsedProtocolReward, elapsedVaultReward } = await calculateReportParams(); + const { elapsedVaultReward } = await calculateReportParams(); const vaultValue = await addRewards(elapsedVaultReward / 2n); // Half the vault rewards value after validator exit const params = { - clDiff: elapsedProtocolReward, excludeVaultsBalances: true, } as OracleReportParams; + // This test is about burn -> zero liability shares on the next vault report, not + // about a protocol CL reward. Keep the follow-up report neutral so we don't add + // an unrelated pending-backed APR setup to a burn-flow assertion. await report(ctx, params); await reportVaultDataWithProof(ctx, stakingVault, { totalValue: vaultValue }); diff --git a/test/integration/vaults/withdrawals-bad-debt.integration.ts b/test/integration/vaults/withdrawals-bad-debt.integration.ts index 15b8e7a195..9d64f13152 100644 --- a/test/integration/vaults/withdrawals-bad-debt.integration.ts +++ b/test/integration/vaults/withdrawals-bad-debt.integration.ts @@ -29,6 +29,7 @@ describe("Integration: Withdrawals finalization with bad debt internalization", let owner: HardhatEthersSigner; let nodeOperator: HardhatEthersSigner; let stranger: HardhatEthersSigner; + const DEPOSITS_RESERVE_TARGET = ether("100"); // Helper to capture protocol state const captureState = async () => { @@ -41,6 +42,11 @@ describe("Integration: Withdrawals finalization with bad debt internalization", const internalEther = totalPooledEther - externalEther; const internalShares = totalShares - externalShares; const unfinalizedSTETH = await withdrawalQueue.unfinalizedStETH(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const bufferedEther = await lido.getBufferedEther(); + const depositableEther = await lido.getDepositableEther(); const unfinalizedRequestNumber = await withdrawalQueue.unfinalizedRequestNumber(); const lastFinalizedRequestId = await withdrawalQueue.getLastFinalizedRequestId(); const badDebtToInternalize = await vaultHub.badDebtToInternalize(); @@ -61,7 +67,12 @@ describe("Integration: Withdrawals finalization with bad debt internalization", elRewardsVaultBalance, withdrawalVaultBalance, withdrawalQueueBalance, + depositsReserveTarget, + depositsReserve, unfinalizedSTETH, + withdrawalsReserve, + bufferedEther, + depositableEther, unfinalizedRequestNumber, lastFinalizedRequestId, shareRate: totalShares > 0n ? (totalPooledEther * SHARE_RATE_PRECISION) / totalShares : 0n, @@ -213,6 +224,26 @@ describe("Integration: Withdrawals finalization with bad debt internalization", const finalizedEvent = events[0]; const stateAfter = await captureState(); + expect(stateAfter.depositableEther).to.equal( + stateAfter.bufferedEther - stateAfter.withdrawalsReserve, + "Depositable should equal buffered minus withdrawals reserve after report", + ); + expect(stateAfter.depositsReserveTarget).to.equal( + DEPOSITS_RESERVE_TARGET, + "Deposits reserve target mismatch after report", + ); + expect(stateAfter.depositsReserve).to.be.lte( + stateAfter.depositsReserveTarget, + "Deposits reserve should not exceed target after report", + ); + + const [, , amountOfETHLocked] = finalizedEvent.args; + const availableEthForFinalization = + stateBefore.withdrawalVaultBalance + stateBefore.elRewardsVaultBalance + stateBefore.withdrawalsReserve; + expect(amountOfETHLocked).to.be.lte( + availableEthForFinalization, + "Finalization should be bounded by vault balances plus withdrawals reserve", + ); return { reportTx, finalizedEvent, stateBefore, stateAfter }; }; @@ -244,6 +275,8 @@ describe("Integration: Withdrawals finalization with bad debt internalization", .connect(agent) .grantRole(await oracleReportSanityChecker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), agent); await oracleReportSanityChecker.connect(agent).setMaxPositiveTokenRebase(maxPositiveTokenRebase); + + await lido.connect(agent).setDepositsReserveTarget(DEPOSITS_RESERVE_TARGET); }); beforeEach(async () => (snapshot = await Snapshot.take())); diff --git a/test/suite/constants.ts b/test/suite/constants.ts index 8d621fd694..09faeb01b9 100644 --- a/test/suite/constants.ts +++ b/test/suite/constants.ts @@ -1,8 +1,11 @@ +import { MAX_EFFECTIVE_BALANCE_WC_TYPE_01 } from "lib"; + export const ONE_HOUR = 60n * 60n; export const ONE_DAY = 24n * 60n * 60n; export const MAX_BASIS_POINTS = 100_00n; export const MAX_DEPOSIT = 150n; +export const MAX_DEPOSIT_AMOUNT = MAX_DEPOSIT * MAX_EFFECTIVE_BALANCE_WC_TYPE_01; // 150 * 32 ETH export const CURATED_MODULE_ID = 1n; export const SIMPLE_DVT_MODULE_ID = 2n; @@ -12,3 +15,5 @@ export const ZERO_HASH = new Uint8Array(32).fill(0); export const ZERO_BYTES32 = "0x" + Buffer.from(ZERO_HASH).toString("hex"); export const VAULTS_MAX_RELATIVE_SHARE_LIMIT_BP = 10_00n; + +export const FUSAKA_TX_GAS_LIMIT = 2n ** 24n; // 16M = 16_777_216 diff --git a/test/upgrade/V3Template_Harness.sol b/test/upgrade/UpgradeTemplate_Harness.sol similarity index 52% rename from test/upgrade/V3Template_Harness.sol rename to test/upgrade/UpgradeTemplate_Harness.sol index 8fcc2a4531..d59637edc3 100644 --- a/test/upgrade/V3Template_Harness.sol +++ b/test/upgrade/UpgradeTemplate_Harness.sol @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity >=0.8.25; -import {V3Template} from "contracts/upgrade/V3Template.sol"; +import {UpgradeTemplate} from "contracts/upgrade/UpgradeTemplate.sol"; -contract V3Template__Harness { - V3Template public immutable TEMPLATE; +contract UpgradeTemplate_Harness { + UpgradeTemplate public immutable TEMPLATE; constructor(address _template) { - TEMPLATE = V3Template(_template); + TEMPLATE = UpgradeTemplate(_template); } function startUpgradeTwice() external { diff --git a/tsconfig.json b/tsconfig.json index fdac08afc2..73dd7bfc7e 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -16,5 +16,6 @@ } }, "include": ["./test", "./typechain-types", "./lib", "./scripts", "./tasks", "./deployed-*.json"], + "exclude": ["**/archive"], "files": ["./hardhat.config.ts", "./commitlint.config.ts", "./globals.d.ts"] }