From 235b347c33f7ac729b0ce50416bc2844bfa0f1e0 Mon Sep 17 00:00:00 2001 From: r4bbit <445106+0x-r4bbit@users.noreply.github.com> Date: Fri, 1 Mar 2024 10:56:19 +0100 Subject: [PATCH] fix(StakeManager): don't allow migration initialization while migrating This is actually a bug that the certora prover found. The rule `epochStaysSameOnMigration` failed because a previous `StakeManager` could call `migrationInitialize` and change `currentEpoch` on a next `StakeManager`, even though the next `StakeManager` might be in migration itself (which means the `currentEpoch` is now allowed to change). This commit fixes this by ensure `migrationInitialize()` will revert if the `StakeManager` already has a `migration` on going. --- certora/specs/StakeManagerStartMigration.spec | 8 +++--- contracts/StakeManager.sol | 3 +++ test/StakeManager.t.sol | 27 +++++++++++++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/certora/specs/StakeManagerStartMigration.spec b/certora/specs/StakeManagerStartMigration.spec index f4496a9..2e1a3b9 100644 --- a/certora/specs/StakeManagerStartMigration.spec +++ b/certora/specs/StakeManagerStartMigration.spec @@ -32,7 +32,8 @@ definition blockedWhenMigrating(method f) returns bool = ( f.selector == sig:unstake(uint256).selector || f.selector == sig:lock(uint256).selector || f.selector == sig:executeEpoch().selector || - f.selector == sig:startMigration(address).selector + f.selector == sig:startMigration(address).selector || + f.selector == sig:migrationInitialize(uint256,uint256,uint256,uint256).selector ); definition blockedWhenNotMigrating(method f) returns bool = ( @@ -116,8 +117,9 @@ rule migrationLockedIn(method f) filtered { assert currentContract.migration != 0; } -rule epochStaysSameOnMigration { - method f; +rule epochStaysSameOnMigration(method f) filtered { + f -> !blockedWhenMigrating(f) && f.contract == currentContract +} { env e; calldataarg args; diff --git a/contracts/StakeManager.sol b/contracts/StakeManager.sol index 77c3d09..42d4abc 100644 --- a/contracts/StakeManager.sol +++ b/contracts/StakeManager.sol @@ -251,6 +251,9 @@ contract StakeManager is Ownable { external onlyOldManager { + if (address(migration) != address(0)) { + revert StakeManager__PendingMigration(); + } currentEpoch = _currentEpoch; totalSupplyMP = _totalSupplyMP; totalSupplyBalance = _totalSupplyBalance; diff --git a/test/StakeManager.t.sol b/test/StakeManager.t.sol index 18ebe91..e487f61 100644 --- a/test/StakeManager.t.sol +++ b/test/StakeManager.t.sol @@ -273,6 +273,33 @@ contract MigrateTest is StakeManagerTest { } } +contract MigrationInitializeTest is StakeManagerTest { + function setUp() public override { + StakeManagerTest.setUp(); + } + + function test_RevertWhen_MigrationPending() public { + // first, create 2nd and 3rd generation stake manager + vm.startPrank(deployer); + StakeManager secondStakeManager = new StakeManager(stakeToken, address(stakeManager)); + StakeManager thirdStakeManager = new StakeManager(stakeToken, address(secondStakeManager)); + + // first, ensure `secondStakeManager` is in migration mode itself + secondStakeManager.startMigration(thirdStakeManager); + vm.stopPrank(); + + uint256 currentEpoch = stakeManager.currentEpoch(); + uint256 totalMP = stakeManager.totalSupplyMP(); + uint256 totalBalance = stakeManager.totalSupplyBalance(); + + // `stakeManager` calling `migrationInitialize` while the new stake manager is + // in migration itself, should revert + vm.prank(address(stakeManager)); + vm.expectRevert(StakeManager.StakeManager__PendingMigration.selector); + secondStakeManager.migrationInitialize(currentEpoch, totalMP, totalBalance, 0); + } +} + contract ExecuteAccountTest is StakeManagerTest { StakeVault[] private userVaults;