Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: replace panic with error return #740

Merged
merged 1 commit into from
Oct 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 9 additions & 10 deletions x/crosschain/keeper/abci.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,7 @@ func (k Keeper) isNeedOracleSetRequest(ctx sdk.Context) (*types.OracleSet, bool)
}
// 3. Power diff
powerDiff := fmt.Sprintf("%.8f", types.BridgeValidators(currentOracleSet.Members).PowerDiff(latestOracleSet.Members))
powerDiffDec, err := sdkmath.LegacyNewDecFromStr(powerDiff)
if err != nil {
panic(fmt.Errorf("covert power diff to dec err, powerDiff: %v, err: %w", powerDiff, err))
}
powerDiffDec := sdkmath.LegacyMustNewDecFromStr(powerDiff)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Consider using error handling instead of LegacyMustNewDecFromStr

While using sdkmath.LegacyMustNewDecFromStr simplifies the code, it still has the potential to panic if the input string is not a valid decimal. This doesn't fully align with the PR objective of replacing panics with error returns.

Consider using LegacyNewDecFromStr instead and propagating the error:

powerDiffDec, err := sdkmath.LegacyNewDecFromStr(powerDiff)
if err != nil {
    return currentOracleSet, false
}

This approach would handle potential errors gracefully without panicking, which is more in line with the PR's objectives.


oracleSetUpdatePowerChangePercent := k.GetOracleSetUpdatePowerChangePercent(ctx)
if oracleSetUpdatePowerChangePercent.GT(sdkmath.LegacyOneDec()) {
Expand Down Expand Up @@ -163,33 +160,35 @@ func (k Keeper) bridgeCallSlashing(ctx sdk.Context, oracles types.Oracles, signe
// here is the Ethereum block height at the time of the last SendToExternal or SendToFx to be observed. It's very important we do not
// project, if we do a slowdown on ethereum could cause a double spend. Instead timeouts will *only* occur after the timeout period
// AND any deposit or withdraw has occurred to update the Ethereum block height.
func (k Keeper) cleanupTimedOutBatches(ctx sdk.Context) {
func (k Keeper) cleanupTimedOutBatches(ctx sdk.Context) (err error) {
externalBlockHeight := k.GetLastObservedBlockHeight(ctx).ExternalBlockHeight
k.IterateOutgoingTxBatches(ctx, func(batch *types.OutgoingTxBatch) bool {
if batch.BatchTimeout < externalBlockHeight {
if err := k.RefundOutgoingTxBatch(ctx, batch.TokenContract, batch.BatchNonce); err != nil {
panic(fmt.Sprintf("Failed cancel out batch %s %d while trying to execute failed: %s", batch.TokenContract, batch.BatchNonce, err))
if err = k.RefundOutgoingTxBatch(ctx, batch.TokenContract, batch.BatchNonce); err != nil {
return true
}
}
return false
})
return err
}

func (k Keeper) cleanupTimeOutBridgeCall(ctx sdk.Context) {
func (k Keeper) cleanupTimeOutBridgeCall(ctx sdk.Context) (err error) {
externalBlockHeight := k.GetLastObservedBlockHeight(ctx).ExternalBlockHeight
k.IterateOutgoingBridgeCalls(ctx, func(data *types.OutgoingBridgeCall) bool {
if data.Timeout > externalBlockHeight {
return true
}
// 1. handler bridge call refund
if err := k.RefundOutgoingBridgeCall(ctx, data); err != nil {
panic(fmt.Sprintf("failed cancel out bridge call %d while trying to execute failed: %s", data.Nonce, err))
if err = k.RefundOutgoingBridgeCall(ctx, data); err != nil {
return true
}

// 2. delete bridge call
k.DeleteOutgoingBridgeCallRecord(ctx, data.Nonce)
return false
})
return err
}

func (k Keeper) pruneOracleSet(ctx sdk.Context, signedOracleSetsWindow uint64) {
Expand Down
24 changes: 13 additions & 11 deletions x/crosschain/keeper/attestation.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,9 @@ func (k Keeper) Attest(ctx sdk.Context, oracleAddr sdk.AccAddress, claim types.E
k.SetAttestation(ctx, claim.GetEventNonce(), claim.ClaimHash(), att)

if !att.Observed && claim.GetEventNonce() == k.GetLastObservedEventNonce(ctx)+1 {
k.TryAttestation(ctx, att, claim)
if err = k.TryAttestation(ctx, att, claim); err != nil {
return nil, err
}
}

ctx = ctx.WithGasMeter(gasMeter)
Expand All @@ -59,7 +61,7 @@ func (k Keeper) Attest(ctx sdk.Context, oracleAddr sdk.AccAddress, claim types.E
// TryAttestation checks if an attestation has enough votes to be applied to the consensus state
// and has not already been marked Observed, then calls processAttestation to actually apply it to the state,
// and then marks it Observed and emits an event.
func (k Keeper) TryAttestation(ctx sdk.Context, att *types.Attestation, claim types.ExternalClaim) {
func (k Keeper) TryAttestation(ctx sdk.Context, att *types.Attestation, claim types.ExternalClaim) error {
// If the attestation has not yet been Observed, sum up the votes and see if it is ready to apply to the state.
// This conditional stops the attestation from accidentally being applied twice.
// Sum the current powers of all validators who have voted and see if it passes the current threshold
Expand All @@ -71,9 +73,7 @@ func (k Keeper) TryAttestation(ctx sdk.Context, att *types.Attestation, claim ty
oracleAddr := sdk.MustAccAddressFromBech32(oracleStr)
oracle, found := k.GetOracle(ctx, oracleAddr)
if !found {
k.Logger(ctx).Error("TryAttestation", "not found oracle", oracleAddr.String(), "claimEventNonce",
claim.GetEventNonce(), "claimType", claim.GetType(), "claimHeight", claim.GetBlockHeight())
continue
return types.ErrNoFoundOracle
}
oraclePower := oracle.GetPower()
// Add it to the attestation power's sum
Expand Down Expand Up @@ -104,12 +104,17 @@ func (k Keeper) TryAttestation(ctx sdk.Context, att *types.Attestation, claim ty
ctx.EventManager().EmitEvent(event)

// execute the timeout logic
k.cleanupTimedOutBatches(ctx)
k.cleanupTimeOutBridgeCall(ctx)
if err = k.cleanupTimedOutBatches(ctx); err != nil {
return err
}
if err = k.cleanupTimeOutBridgeCall(ctx); err != nil {
return err
}

k.pruneAttestations(ctx)
break
}
return nil
}

// processAttestation actually applies the attestation to the consensus state
Expand Down Expand Up @@ -168,10 +173,7 @@ func (k Keeper) IterateAttestationAndClaim(ctx sdk.Context, cb func(*types.Attes
for ; iter.Valid(); iter.Next() {
att := new(types.Attestation)
k.cdc.MustUnmarshal(iter.Value(), att)
claim, err := types.UnpackAttestationClaim(k.cdc, att)
if err != nil {
panic("couldn't cast to claim")
}
claim := types.MustUnpackAttestationClaim(k.cdc, att)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Handle Potential Panic from MustUnpackAttestationClaim

Using types.MustUnpackAttestationClaim can lead to a panic if unpacking fails. To improve reliability, consider handling the error explicitly by using types.UnpackAttestationClaim and managing any potential errors.

Apply this change to handle the error gracefully:

-		claim := types.MustUnpackAttestationClaim(k.cdc, att)
+		claim, err := types.UnpackAttestationClaim(k.cdc, att)
+		if err != nil {
+			k.Logger(ctx).Error("Failed to unpack attestation claim", "error", err)
+			continue // Skip this attestation or handle the error appropriately
+		}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
claim := types.MustUnpackAttestationClaim(k.cdc, att)
claim, err := types.UnpackAttestationClaim(k.cdc, att)
if err != nil {
k.Logger(ctx).Error("Failed to unpack attestation claim", "error", err)
continue // Skip this attestation or handle the error appropriately
}

// cb returns true to stop early
if cb(att, claim) {
return
Expand Down
11 changes: 5 additions & 6 deletions x/crosschain/keeper/attestation_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ import (
func (k Keeper) AttestationHandler(ctx sdk.Context, externalClaim types.ExternalClaim) error {
switch claim := externalClaim.(type) {
case *types.MsgSendToFxClaim, *types.MsgBridgeCallClaim, *types.MsgBridgeCallResultClaim:
k.SavePendingExecuteClaim(ctx, externalClaim)
return k.SavePendingExecuteClaim(ctx, externalClaim)

case *types.MsgSendToExternalClaim:
k.OutgoingTxBatchExecuted(ctx, claim.TokenContract, claim.BatchNonce)
return k.OutgoingTxBatchExecuted(ctx, claim.TokenContract, claim.BatchNonce)

case *types.MsgBridgeTokenClaim:
return k.AddBridgeTokenExecuted(ctx, claim)
Expand All @@ -25,13 +25,12 @@ func (k Keeper) AttestationHandler(ctx sdk.Context, externalClaim types.External
default:
return types.ErrInvalid.Wrapf("event type: %s", claim.GetType())
}
return nil
}

func (k Keeper) ExecuteClaim(ctx sdk.Context, eventNonce uint64) error {
externalClaim, found := k.GetPendingExecuteClaim(ctx, eventNonce)
if !found {
return sdkerrors.ErrInvalidRequest.Wrap("claim not found")
externalClaim, err := k.GetPendingExecuteClaim(ctx, eventNonce)
if err != nil {
return err
}
k.DeletePendingExecuteClaim(ctx, eventNonce)
switch claim := externalClaim.(type) {
Expand Down
10 changes: 2 additions & 8 deletions x/crosschain/keeper/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,7 @@ func InitGenesis(ctx sdk.Context, k Keeper, state *types.GenesisState) {
// reset attestations in state
for i := 0; i < len(state.Attestations); i++ {
att := state.Attestations[i]
claim, err := types.UnpackAttestationClaim(k.cdc, &att)
if err != nil {
panic("couldn't cast to claim")
}
claim := types.MustUnpackAttestationClaim(k.cdc, &att)

// 0x17
k.SetAttestation(ctx, claim.GetEventNonce(), claim.ClaimHash(), &att)
Expand All @@ -106,10 +103,7 @@ func InitGenesis(ctx sdk.Context, k Keeper, state *types.GenesisState) {
for i := 0; i < len(state.Attestations); i++ {

att := state.Attestations[i]
claim, err := types.UnpackAttestationClaim(k.cdc, &att)
if err != nil {
panic("couldn't cast to claim")
}
claim := types.MustUnpackAttestationClaim(k.cdc, &att)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Consistent change, but reiterate caution on MustUnpackAttestationClaim

This change is consistent with the modification on line 95. However, the same concerns apply here regarding the potential for panics during genesis initialization.

Consider applying the same solution as suggested for line 95 to maintain consistency in error handling throughout the function.

// reconstruct the latest event nonce for every validator
// if somehow this genesis state is saved when all attestations
// have been cleaned up GetLastEventNonceByOracle handles that case
Expand Down
2 changes: 1 addition & 1 deletion x/crosschain/keeper/oracle_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (k Keeper) UpdateOracleSetExecuted(ctx sdk.Context, claim *types.MsgOracleS
observedOracleSet.Height = trustedOracleSet.Height

if _, err := trustedOracleSet.Equal(observedOracleSet); err != nil {
panic(fmt.Sprintf("Potential bridge highjacking: observed oracleSet (%+v) does not match stored oracleSet (%+v)! %s", observedOracleSet, trustedOracleSet, err.Error()))
return err
}
}
k.SetLastObservedOracleSet(ctx, observedOracleSet)
Expand Down
12 changes: 8 additions & 4 deletions x/crosschain/keeper/outgoing_tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,22 +71,25 @@ func (k Keeper) BuildOutgoingTxBatch(ctx sdk.Context, sender sdk.AccAddress, rec
return batch.BatchNonce, nil
}

func (k Keeper) OutgoingTxBatchExecuted(ctx sdk.Context, tokenContract string, batchNonce uint64) {
func (k Keeper) OutgoingTxBatchExecuted(ctx sdk.Context, tokenContract string, batchNonce uint64) (err error) {
batch := k.GetOutgoingTxBatch(ctx, tokenContract, batchNonce)
if batch == nil {
panic(fmt.Sprintf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce))
return fmt.Errorf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Standardize error generation using types package

Instead of using fmt.Errorf, consider using the error types defined in the types package for consistency. For example:

-return fmt.Errorf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce)
+return types.ErrInvalid.Wrapf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce)

This aligns with existing error handling patterns in the codebase.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
return fmt.Errorf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce)
return types.ErrInvalid.Wrapf("unknown batch nonce for outgoing tx batch %s %d", tokenContract, batchNonce)

}

// Iterate through remaining batches
k.IterateOutgoingTxBatches(ctx, func(iterBatch *types.OutgoingTxBatch) bool {
// If the iterated batches nonce is lower than the one that was just executed, cancel it
if iterBatch.BatchNonce < batch.BatchNonce && iterBatch.TokenContract == tokenContract {
if err := k.RefundOutgoingTxBatch(ctx, tokenContract, iterBatch.BatchNonce); err != nil {
panic(fmt.Sprintf("Failed cancel out batch %s %d while trying to execute failed: %s", batch.TokenContract, batch.BatchNonce, err))
if err = k.RefundOutgoingTxBatch(ctx, tokenContract, iterBatch.BatchNonce); err != nil {
return true
}
}
return false
})
if err != nil {
return err
}

// Delete batch since it is finished
k.DeleteBatch(ctx, batch)
Expand All @@ -97,6 +100,7 @@ func (k Keeper) OutgoingTxBatchExecuted(ctx sdk.Context, tokenContract string, b
k.erc20Keeper.DeleteOutgoingTransferRelation(ctx, k.moduleName, tx.Id)
}
}
return nil
}

// --- OUTGOING TX BATCH --- //
Expand Down
15 changes: 9 additions & 6 deletions x/crosschain/keeper/pending_execute_claim.go
Original file line number Diff line number Diff line change
@@ -1,31 +1,34 @@
package keeper

import (
"fmt"

sdk "github.com/cosmos/cosmos-sdk/types"

"github.com/functionx/fx-core/v8/x/crosschain/types"
)

func (k Keeper) SavePendingExecuteClaim(ctx sdk.Context, claim types.ExternalClaim) {
func (k Keeper) SavePendingExecuteClaim(ctx sdk.Context, claim types.ExternalClaim) error {
store := ctx.KVStore(k.storeKey)
bz, err := k.cdc.MarshalInterface(claim)
if err != nil {
panic(err)
return err
}
store.Set(types.GetPendingExecuteClaimKey(claim.GetEventNonce()), bz)
return err
}

func (k Keeper) GetPendingExecuteClaim(ctx sdk.Context, eventNonce uint64) (types.ExternalClaim, bool) {
func (k Keeper) GetPendingExecuteClaim(ctx sdk.Context, eventNonce uint64) (types.ExternalClaim, error) {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.GetPendingExecuteClaimKey(eventNonce))
if len(bz) == 0 {
return nil, false
return nil, fmt.Errorf("claim %d not found", eventNonce)
}
var claim types.ExternalClaim
if err := k.cdc.UnmarshalInterface(bz, &claim); err != nil {
panic(err)
return nil, err
}
return claim, true
return claim, nil
}

func (k Keeper) DeletePendingExecuteClaim(ctx sdk.Context, eventNonce uint64) {
Expand Down
7 changes: 4 additions & 3 deletions x/crosschain/keeper/pending_execute_claim_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,11 @@ func (s *KeeperMockSuite) TestKeeper_SavePendingExecuteClaim() {
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.crosschainKeeper.SavePendingExecuteClaim(s.ctx, tt.claim)
err := s.crosschainKeeper.SavePendingExecuteClaim(s.ctx, tt.claim)
s.Require().NoError(err)

claim, found := s.crosschainKeeper.GetPendingExecuteClaim(s.ctx, tt.claim.GetEventNonce())
s.Require().Equal(true, found)
claim, err := s.crosschainKeeper.GetPendingExecuteClaim(s.ctx, tt.claim.GetEventNonce())
s.Require().NoError(err)
s.Require().Equal(claim, tt.claim)
})
}
Expand Down
8 changes: 5 additions & 3 deletions x/crosschain/types/msgs.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,10 +280,12 @@ var (
_ ExternalClaim = &MsgBridgeCallResultClaim{}
)

func UnpackAttestationClaim(cdc codectypes.AnyUnpacker, att *Attestation) (ExternalClaim, error) {
func MustUnpackAttestationClaim(cdc codectypes.AnyUnpacker, att *Attestation) ExternalClaim {
var msg ExternalClaim
err := cdc.UnpackAny(att.Claim, &msg)
return msg, err
if err := cdc.UnpackAny(att.Claim, &msg); err != nil {
panic(err)
}
return msg
Comment on lines +283 to +288
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Reconsider using panic for error handling in MustUnpackAttestationClaim

Changing the function to panic on error can lead to unintended crashes if the panic is not recovered elsewhere in the application. It's generally better practice to return an error and allow the caller to handle it gracefully.

Consider modifying the function to return an error instead of panicking.

Apply this diff to change the function to return an error:

-func MustUnpackAttestationClaim(cdc codectypes.AnyUnpacker, att *Attestation) ExternalClaim {
+func UnpackAttestationClaim(cdc codectypes.AnyUnpacker, att *Attestation) (ExternalClaim, error) {
    var msg ExternalClaim
    if err := cdc.UnpackAny(att.Claim, &msg); err != nil {
-       panic(err)
+       return nil, err
    }
    return msg
+   return msg, nil
}

Committable suggestion was skipped due to low confidence.

}

func (m *MsgClaim) ValidateBasic() (err error) {
Expand Down