sherlock-audit / 2024-06-allora-judging

0 stars 0 forks source link

defsec - Incomplete Zero-Height Genesis Preparation in Allora Network #43

Open sherlock-admin3 opened 4 months ago

sherlock-admin3 commented 4 months ago

defsec

Medium

Incomplete Zero-Height Genesis Preparation in Allora Network

Summary

The current implementation of prepForZeroHeightGenesis in the Allora network lacks several key operations present in the reference implementation, potentially leading to inconsistent or incomplete state resets when exporting genesis at zero height.

Vulnerability Detail

The prepForZeroHeightGenesis function in the Allora network's AlloraApp is missing several crucial steps compared to a more comprehensive implementation. The current implementation omits:

Impact

These omissions could result in:

The absence of these operations may lead to unexpected behavior or state inconsistencies when the network is restarted from a zero-height genesis export.

Example Juno Export : https://github.com/CosmosContracts/juno/blob/main/app/export.go#L19

Code Snippet

export.go#L55

// ExportAppStateAndValidators exports the state of the application for a genesis
// file.
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailAllowedAddrs []string, modulesToExport []string) (servertypes.ExportedApp, error) {
    // as if they could withdraw from the start of the next block
    ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()})

    // We export at last height + 1, because that's the height at which
    // Tendermint will start InitChain.
    height := app.LastBlockHeight() + 1
    if forZeroHeight {
        height = 0
        app.prepForZeroHeightGenesis(ctx, jailAllowedAddrs)
    }

    genState := app.ModuleManager.ExportGenesisForModules(ctx, app.appCodec, modulesToExport)
    appState, err := json.MarshalIndent(genState, "", "  ")
    if err != nil {
        return servertypes.ExportedApp{}, err
    }

    validators, err := staking.WriteValidators(ctx, app.AppKeepers.StakingKeeper)
    return servertypes.ExportedApp{
        AppState:        appState,
        Validators:      validators,
        Height:          height,
        ConsensusParams: app.BaseApp.GetConsensusParams(ctx),
    }, err
}

// prepare for fresh start at zero height
// NOTE zero height genesis is a temporary feature which will be deprecated
//
//  in favour of export at a block height
func (app *App) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) {
    applyAllowedAddrs := false

    // check if there is a allowed address list
    if len(jailAllowedAddrs) > 0 {
        applyAllowedAddrs = true
    }

    allowedAddrsMap := make(map[string]bool)

    for _, addr := range jailAllowedAddrs {
        _, err := sdk.ValAddressFromBech32(addr)
        if err != nil {
            log.Fatal(err)
        }
        allowedAddrsMap[addr] = true
    }

    /* Just to be safe, assert the invariants on current state. */
    app.AppKeepers.CrisisKeeper.AssertInvariants(ctx)

    /* Handle fee distribution state. */

    // withdraw all validator commission
    app.AppKeepers.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) {
        _, _ = app.AppKeepers.DistrKeeper.WithdrawValidatorCommission(ctx, val.GetOperator())
        return false
    })

    // withdraw all delegator rewards
    dels := app.AppKeepers.StakingKeeper.GetAllDelegations(ctx)
    for _, delegation := range dels {
        valAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)
        if err != nil {
            panic(err)
        }

        delAddr := sdk.MustAccAddressFromBech32(delegation.DelegatorAddress)

        _, _ = app.AppKeepers.DistrKeeper.WithdrawDelegationRewards(ctx, delAddr, valAddr)
    }

    // clear validator slash events
    app.AppKeepers.DistrKeeper.DeleteAllValidatorSlashEvents(ctx)

    // clear validator historical rewards
    app.AppKeepers.DistrKeeper.DeleteAllValidatorHistoricalRewards(ctx)

    // set context height to zero
    height := ctx.BlockHeight()
    ctx = ctx.WithBlockHeight(0)

    // reinitialize all validators
    app.AppKeepers.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) {
        // donate any unwithdrawn outstanding reward fraction tokens to the community pool
        scraps := app.AppKeepers.DistrKeeper.GetValidatorOutstandingRewardsCoins(ctx, val.GetOperator())
        feePool := app.AppKeepers.DistrKeeper.GetFeePool(ctx)
        feePool.CommunityPool = feePool.CommunityPool.Add(scraps...)
        app.AppKeepers.DistrKeeper.SetFeePool(ctx, feePool)

        if err := app.AppKeepers.DistrKeeper.Hooks().AfterValidatorCreated(ctx, val.GetOperator()); err != nil {
            panic(err)
        }
        return false
    })

    // reinitialize all delegations
    for _, del := range dels {
        valAddr, err := sdk.ValAddressFromBech32(del.ValidatorAddress)
        if err != nil {
            panic(err)
        }
        delAddr := sdk.MustAccAddressFromBech32(del.DelegatorAddress)

        if err := app.AppKeepers.DistrKeeper.Hooks().BeforeDelegationCreated(ctx, delAddr, valAddr); err != nil {
            // never called as BeforeDelegationCreated always returns nil
            panic(fmt.Errorf("error while incrementing period: %w", err))
        }

        if err := app.AppKeepers.DistrKeeper.Hooks().AfterDelegationModified(ctx, delAddr, valAddr); err != nil {
            // never called as AfterDelegationModified always returns nil
            panic(fmt.Errorf("error while creating a new delegation period record: %w", err))
        }
    }

    // reset context height
    ctx = ctx.WithBlockHeight(height)

    /* Handle staking state. */

    // iterate through redelegations, reset creation height
    app.AppKeepers.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) {
        for i := range red.Entries {
            red.Entries[i].CreationHeight = 0
        }
        app.AppKeepers.StakingKeeper.SetRedelegation(ctx, red)
        return false
    })

    // iterate through unbonding delegations, reset creation height
    app.AppKeepers.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) {
        for i := range ubd.Entries {
            ubd.Entries[i].CreationHeight = 0
        }
        app.AppKeepers.StakingKeeper.SetUnbondingDelegation(ctx, ubd)
        return false
    })

    // Iterate through validators by power descending, reset bond heights, and
    // update bond intra-tx counters.
    store := ctx.KVStore(app.AppKeepers.GetKey(stakingtypes.StoreKey))
    iter := sdk.KVStoreReversePrefixIterator(store, stakingtypes.ValidatorsKey)
    counter := int16(0)

    for ; iter.Valid(); iter.Next() {
        addr := sdk.ValAddress(stakingtypes.AddressFromValidatorsKey(iter.Key()))
        validator, found := app.AppKeepers.StakingKeeper.GetValidator(ctx, addr)
        if !found {
            panic("expected validator, not found")
        }

        validator.UnbondingHeight = 0
        if applyAllowedAddrs && !allowedAddrsMap[addr.String()] {
            validator.Jailed = true
        }

        app.AppKeepers.StakingKeeper.SetValidator(ctx, validator)
        counter++
    }

    if err := iter.Close(); err != nil {
        app.Logger().Error("error while closing the key-value store reverse prefix iterator: ", err)
        return
    }

    _, err := app.AppKeepers.StakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)
    if err != nil {
        log.Fatal(err)
    }

    /* Handle slashing state. */

    // reset start height on signing infos
    app.AppKeepers.SlashingKeeper.IterateValidatorSigningInfos(
        ctx,
        func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) {
            info.StartHeight = 0
            app.AppKeepers.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info)
            return false
        },
    )
}

Tool used

Manual Review

Recommendation

Update the prepForZeroHeightGenesis function to include:

sherlock-admin2 commented 4 months ago

The protocol team fixed this issue in the following PRs/commits: https://github.com/allora-network/allora-chain/pull/443