-
Notifications
You must be signed in to change notification settings - Fork 3.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix(node): altda failover to ethda should keep finalizing l2 chain #12845
base: develop
Are you sure you want to change the base?
Changes from 2 commits
bb6be8d
672f820
1bf1f68
34b2df9
ce0267b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -117,8 +117,16 @@ func (d *DA) OnFinalizedHeadSignal(f HeadSignalFn) { | |
func (d *DA) updateFinalizedHead(l1Finalized eth.L1BlockRef) { | ||
d.l1FinalizedHead = l1Finalized | ||
// Prune the state to the finalized head | ||
d.state.Prune(l1Finalized.ID()) | ||
d.finalizedHead = d.state.lastPrunedCommitment | ||
lastPrunedCommIncBlock := d.state.Prune(l1Finalized.ID()) | ||
d.log.Debug("updateFinalizedHead", "currFinalizedHead", d.finalizedHead.Number, "lastPrunedCommIncBlock", lastPrunedCommIncBlock.Number, "l1Finalized", l1Finalized.Number) | ||
// If a commitment was pruned, set the finalized head to that commitment's inclusion block | ||
// When no commitments are left to be pruned (one example is if we have failed over to ethda) | ||
// then updateFinalizedFromL1 becomes the main driver of the finalized head. | ||
// Note that updateFinalizedFromL1 is only called when len(d.state.commitments) == 0 | ||
var zero eth.L1BlockRef | ||
if lastPrunedCommIncBlock != zero { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: use zero value directly, to be consistent with other zero value cases in the repo There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
d.finalizedHead = lastPrunedCommIncBlock | ||
} | ||
} | ||
|
||
// updateFinalizedFromL1 updates the finalized head based on the challenge window. | ||
|
@@ -133,6 +141,7 @@ func (d *DA) updateFinalizedFromL1(ctx context.Context, l1 L1Fetcher) error { | |
if err != nil { | ||
return err | ||
} | ||
d.log.Debug("updateFinalizedFromL1", "currFinalizedHead", d.finalizedHead.Number, "newFinalizedHead", ref.Number, "l1FinalizedHead", d.l1FinalizedHead.Number, "challengeWindow", d.cfg.ChallengeWindow) | ||
d.finalizedHead = ref | ||
return nil | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,7 @@ | ||
package altda | ||
|
||
import ( | ||
"log/slog" | ||
"math/big" | ||
"math/rand" | ||
"testing" | ||
|
@@ -49,6 +50,12 @@ type L2AltDA struct { | |
|
||
type AltDAParam func(p *e2eutils.TestParams) | ||
|
||
func WithLogLevel(level slog.Level) AltDAParam { | ||
return func(p *e2eutils.TestParams) { | ||
p.LogLevel = level | ||
} | ||
} | ||
|
||
func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { | ||
p := &e2eutils.TestParams{ | ||
MaxSequencerDrift: 40, | ||
|
@@ -57,11 +64,12 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { | |
L1BlockTime: 12, | ||
UseAltDA: true, | ||
AllocType: config.AllocTypeAltDA, | ||
LogLevel: log.LevelDebug, | ||
} | ||
for _, apply := range params { | ||
apply(p) | ||
} | ||
log := testlog.Logger(t, log.LvlDebug) | ||
log := testlog.Logger(t, p.LogLevel) | ||
|
||
dp := e2eutils.MakeDeployParams(t, p) | ||
sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) | ||
|
@@ -75,14 +83,13 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { | |
engine := helpers.NewL2Engine(t, log, sd.L2Cfg, jwtPath) | ||
engCl := engine.EngineClient(t, sd.RollupCfg) | ||
|
||
storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} | ||
|
||
l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic)) | ||
require.NoError(t, err) | ||
|
||
altDACfg, err := sd.RollupCfg.GetOPAltDAConfig() | ||
require.NoError(t, err) | ||
|
||
storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} | ||
daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{}) | ||
|
||
sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0, nil) | ||
|
@@ -177,6 +184,34 @@ func (a *L2AltDA) ActNewL2Tx(t helpers.Testing) { | |
a.lastCommBn = a.miner.L1Chain().CurrentBlock().Number.Uint64() | ||
} | ||
|
||
// ActNewL2TxFinalized sends a new L2 transaction, submits a batch containing it to L1 | ||
// and finalizes the L1 and L2 chains (including advancing enough to clear the altda challenge window). | ||
// | ||
// TODO: understand why (notation is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized): | ||
// - the first call advances heads by (0/0/17-71/71/1) | ||
// - second call advances by 0/0/17-204/204/82, | ||
// - but all subsequent calls advance status by exactly 0/0/17-204/204/204. | ||
// | ||
// 17 makes sense because challengeWindow=16 and we create 1 extra block before that, | ||
// and 204 L2blocks = 17 L1blocks * 12 L2blocks/L1block (L1blocktime=12s, L2blocktime=1s) | ||
func (a *L2AltDA) ActNewL2TxFinalized(t helpers.Testing) { | ||
// Include a new l2 batcher transaction, submitting an input commitment to the l1. | ||
a.ActNewL2Tx(t) | ||
// Create ChallengeWindow empty blocks so the above batcher blocks can finalize (can't be challenged anymore) | ||
a.ActL1Blocks(t, a.altDACfg.ChallengeWindow) | ||
// Finalize the L1 chain and the L2 chain (by draining all events and running through derivation pipeline) | ||
// TODO: understand why we need to drain the pipeline before AND after actL1Finalized | ||
a.sequencer.ActL2PipelineFull(t) | ||
a.ActL1Finalized(t) | ||
a.sequencer.ActL2PipelineFull(t) | ||
|
||
// Uncomment the below code to observe the behavior described in the TODO above | ||
// syncStatus := a.sequencer.SyncStatus() | ||
// a.log.Info("Sync status after ActNewL2TxFinalized", | ||
// "unsafeL1", syncStatus.HeadL1.Number, "safeL1", syncStatus.SafeL1.Number, "finalizedL1", syncStatus.FinalizedL1.Number, | ||
// "unsafeL2", syncStatus.UnsafeL2.Number, "safeL2", syncStatus.SafeL2.Number, "finalizedL2", syncStatus.FinalizedL2.Number) | ||
} | ||
|
||
func (a *L2AltDA) ActDeleteLastInput(t helpers.Testing) { | ||
require.NoError(t, a.storage.Client.DeleteData(a.lastComm)) | ||
} | ||
|
@@ -363,7 +398,7 @@ func TestAltDA_ChallengeResolved(gt *testing.T) { | |
} | ||
|
||
// DA storage service goes offline while sequencer keeps making blocks. When storage comes back online, it should be able to catch up. | ||
func TestAltDA_StorageError(gt *testing.T) { | ||
func TestAltDA_StorageGetError(gt *testing.T) { | ||
t := helpers.NewDefaultTesting(gt) | ||
harness := NewL2AltDA(t) | ||
|
||
|
@@ -528,19 +563,20 @@ func TestAltDA_Finalization(gt *testing.T) { | |
t := helpers.NewDefaultTesting(gt) | ||
a := NewL2AltDA(t) | ||
|
||
// build L1 block #1 | ||
// Notation everywhere below is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized | ||
// build L1 block #1: 0/0/0-0/0/0 -> 1/1/0-0/0/0 | ||
a.ActL1Blocks(t, 1) | ||
a.miner.ActL1SafeNext(t) | ||
|
||
// Fill with l2 blocks up to the L1 head | ||
// Fill with l2 blocks up to the L1 head: 1/1/0:0/0/0 -> 1/1/0:1/1/0 | ||
a.sequencer.ActL1HeadSignal(t) | ||
a.sequencer.ActBuildToL1Head(t) | ||
|
||
a.sequencer.ActL2PipelineFull(t) | ||
a.sequencer.ActL1SafeSignal(t) | ||
require.Equal(t, uint64(1), a.sequencer.SyncStatus().SafeL1.Number) | ||
|
||
// add L1 block #2 | ||
// add L1 block #2: 1/1/0:1/1/0 -> 2/2/1:2/1/0 | ||
a.ActL1Blocks(t, 1) | ||
a.miner.ActL1SafeNext(t) | ||
a.miner.ActL1FinalizeNext(t) | ||
|
@@ -552,7 +588,7 @@ func TestAltDA_Finalization(gt *testing.T) { | |
a.sequencer.ActL1FinalizedSignal(t) | ||
a.sequencer.ActL1SafeSignal(t) | ||
|
||
// commit all the l2 blocks to L1 | ||
// commit all the l2 blocks to L1: 2/2/1:2/1/0 -> 3/2/1:2/1/0 | ||
a.batcher.ActSubmitAll(t) | ||
a.miner.ActL1StartBlock(12)(t) | ||
a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) | ||
|
@@ -561,31 +597,31 @@ func TestAltDA_Finalization(gt *testing.T) { | |
// verify | ||
a.sequencer.ActL2PipelineFull(t) | ||
|
||
// fill with more unsafe L2 blocks | ||
// fill with more unsafe L2 blocks: 3/2/1:2/1/0 -> 3/2/1:3/1/0 | ||
a.sequencer.ActL1HeadSignal(t) | ||
a.sequencer.ActBuildToL1Head(t) | ||
|
||
// submit those blocks too, block #4 | ||
// submit those blocks too, block #4: 3/2/1:3/1/0 -> 4/2/1:3/1/0 | ||
a.batcher.ActSubmitAll(t) | ||
a.miner.ActL1StartBlock(12)(t) | ||
a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) | ||
a.miner.ActL1EndBlock(t) | ||
|
||
// add some more L1 blocks #5, #6 | ||
// add some more L1 blocks #5, #6: 4/2/1:3/1/0 -> 6/2/1:3/1/0 | ||
a.miner.ActEmptyBlock(t) | ||
a.miner.ActEmptyBlock(t) | ||
|
||
// and more unsafe L2 blocks | ||
// and more unsafe L2 blocks: 6/2/1:3/1/0 -> 6/2/1:6/1/0 | ||
a.sequencer.ActL1HeadSignal(t) | ||
a.sequencer.ActBuildToL1Head(t) | ||
|
||
// move safe/finalize markers: finalize the L1 chain block with the first batch, but not the second | ||
// move safe/finalize markers: 6/2/1:6/1/0 -> 6/4/3:6/1/0 | ||
a.miner.ActL1SafeNext(t) // #2 -> #3 | ||
a.miner.ActL1SafeNext(t) // #3 -> #4 | ||
a.miner.ActL1FinalizeNext(t) // #1 -> #2 | ||
a.miner.ActL1FinalizeNext(t) // #2 -> #3 | ||
|
||
// L1 safe and finalized as expected | ||
// L1 safe and finalized as expected: | ||
a.sequencer.ActL2PipelineFull(t) | ||
a.sequencer.ActL1FinalizedSignal(t) | ||
a.sequencer.ActL1SafeSignal(t) | ||
|
@@ -607,3 +643,42 @@ func TestAltDA_Finalization(gt *testing.T) { | |
// given 12s l1 time and 1s l2 time, l2 should be 12 * 3 = 36 blocks finalized | ||
require.Equal(t, uint64(36), a.sequencer.SyncStatus().FinalizedL2.Number) | ||
} | ||
|
||
func TestAltDA_FinalizationAfterEthDAFailover(gt *testing.T) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nice, this test case covers ethDA -> altDA. Can we also have a similar case ethDA -> altDA -> ethDA, simulating a temp altDA failure? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done: ce0267b |
||
t := helpers.NewDefaultTesting(gt) | ||
// we only print critical logs to be able to see the statusLogs | ||
harness := NewL2AltDA(t, WithLogLevel(log.LevelDebug)) | ||
|
||
// We first call this twice because the first 2 times are irregular. | ||
// See ActNewL2TxFinalized's TODO comment. | ||
harness.ActNewL2TxFinalized(t) | ||
harness.ActNewL2TxFinalized(t) | ||
|
||
// ActNewL2TxFinalized advances L1 by (1+ChallengeWindow)L1 blocks, and there are 12 L2 blocks per L1 block. | ||
diffL2Blocks := (1 + harness.altDACfg.ChallengeWindow) * 12 | ||
|
||
for i := 0; i < 5; i++ { | ||
ssBefore := harness.sequencer.SyncStatus() | ||
harness.ActNewL2TxFinalized(t) | ||
ssAfter := harness.sequencer.SyncStatus() | ||
// Finalized head should advance normally in altda mode | ||
require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) | ||
} | ||
|
||
// We swap out altda batcher for ethda batcher | ||
harness.batcher.ActFailoverToEthDA(t) | ||
|
||
for i := 0; i < 2; i++ { | ||
ssBefore := harness.sequencer.SyncStatus() | ||
harness.ActNewL2TxFinalized(t) | ||
if i == 0 { | ||
// TODO: figure out why this is needed | ||
// I think it's because the L1 driven finalizedHead is set to L1FinalizedHead-ChallengeWindow (see damgr.go updateFinalizedFromL1), | ||
// so it trails behind by an extra challenge_window when we switch over to ethDA. | ||
harness.ActNewL2TxFinalized(t) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TODO: this felt weird to me, but maybe its expected and fine? |
||
ssAfter := harness.sequencer.SyncStatus() | ||
// Even after failover, the finalized head should continue advancing normally | ||
require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: ... only called when
d.state.NoCommitments()
is trueThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
1bf1f68