Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Beat [5/4]: fix itests for blockbeat #9227

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
a12942a
contractcourt: make sure `launchResolvers` is called on new blockbeat
yyforyongyu Dec 18, 2024
28334af
multi: optimize loggings around changes from `blockbeat`
yyforyongyu Oct 17, 2024
da46e41
lntest+itest: fix `testSweepCPFPAnchorOutgoingTimeout`
yyforyongyu Oct 24, 2024
afdbbb1
itest: fix `testSweepCPFPAnchorIncomingTimeout`
yyforyongyu Oct 24, 2024
dad98b4
itest: fix `testSweepHTLCs`
yyforyongyu Oct 24, 2024
dcbaf4a
itest: fix `testSweepCommitOutputAndAnchor`
yyforyongyu Oct 24, 2024
7ad4c98
itest: fix `testBumpForceCloseFee`
yyforyongyu Oct 17, 2024
e054515
itest: fix `testPaymentSucceededHTLCRemoteSwept`
yyforyongyu Oct 24, 2024
1226d4f
lntest+itest: start flattening the multi-hop tests
yyforyongyu Oct 17, 2024
a1ad9ca
itest: simplify and flatten `testMultiHopReceiverChainClaim`
yyforyongyu Oct 18, 2024
73fb582
lntest+itest: flatten `testMultiHopLocalForceCloseOnChainHtlcTimeout`
yyforyongyu Oct 18, 2024
c8fdca3
lntest+itest: flatten `testMultiHopRemoteForceCloseOnChainHtlcTimeout`
yyforyongyu Oct 19, 2024
8d08283
itest: flatten `testMultiHopHtlcLocalChainClaim`
yyforyongyu Oct 21, 2024
0910db8
itest: flatten `testMultiHopHtlcRemoteChainClaim`
yyforyongyu Oct 22, 2024
eb1a923
itest: flatten `testMultiHopHtlcAggregation`
yyforyongyu Oct 23, 2024
c3a0bc9
itest: flatten `testHtlcTimeoutResolverExtractPreimageLocal`
yyforyongyu Oct 23, 2024
94dccfb
itest: flatten `testHtlcTimeoutResolverExtractPreimageRemote`
yyforyongyu Oct 23, 2024
0dac647
itest: rename file to reflect the tests
yyforyongyu Oct 23, 2024
dd4a644
itest: remove unnecessary force close
yyforyongyu Oct 23, 2024
d3d6c8d
itest: remove redundant block mining in `testFailingChannel`
yyforyongyu Oct 24, 2024
3c28a3b
itest: remove redunant block mining in `testChannelFundingWithUnstabl…
yyforyongyu Oct 24, 2024
37957f1
itest: remove redudant block in `testPsbtChanFundingWithUnstableUtxos`
yyforyongyu Oct 24, 2024
9eb4e74
itest: remove redundant blocks in channel backup tests
yyforyongyu Oct 24, 2024
13a41d7
itest+lntest: fix channel force close test
yyforyongyu Jun 29, 2024
29828f9
itest: flatten and fix `testWatchtower`
yyforyongyu Oct 25, 2024
142f673
itest: remove redundant block in multiple tests
yyforyongyu Oct 25, 2024
d8de303
itest: assert payment status after sending
yyforyongyu Oct 24, 2024
3250e94
lntest+itest: remove the usage of `ht.AssertActiveHtlcs`
yyforyongyu Nov 5, 2024
b4ab36c
lntest+itest: export `DeriveFundingShim`
yyforyongyu Dec 20, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions chainio/consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func (b *BeatConsumer) ProcessBlock(beat Blockbeat) error {
// `beat.NotifyBlockProcessed` to send the error back here.
select {
case err := <-b.errChan:
beat.logger().Debugf("[%s] processed beat: err=%v", b.name, err)
beat.logger().Tracef("[%s] processed beat: err=%v", b.name, err)

return err

Expand All @@ -99,11 +99,11 @@ func (b *BeatConsumer) ProcessBlock(beat Blockbeat) error {
// processing the block.
func (b *BeatConsumer) NotifyBlockProcessed(beat Blockbeat, err error) {
// Update the current height.
beat.logger().Debugf("[%s]: notifying beat processed", b.name)
beat.logger().Tracef("[%s]: notifying beat processed", b.name)

select {
case b.errChan <- err:
beat.logger().Debugf("[%s]: notified beat processed, err=%v",
beat.logger().Tracef("[%s]: notified beat processed, err=%v",
b.name, err)

case <-b.quit:
Expand Down
5 changes: 5 additions & 0 deletions chainio/dispatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/btcsuite/btclog/v2"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/lnutils"
"golang.org/x/sync/errgroup"
)

Expand Down Expand Up @@ -136,6 +137,10 @@ func (b *BlockbeatDispatcher) dispatchBlocks(
return
}

// Log a separator so it's easier to identify when a
// new block arrives for subsystems.
clog.Debugf("%v", lnutils.NewSeparatorClosure())

clog.Infof("Received new block %v at height %d, "+
"notifying consumers...", blockEpoch.Hash,
blockEpoch.Height)
Expand Down
24 changes: 15 additions & 9 deletions contractcourt/channel_arbitrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ func (c *ChannelArbitrator) Start(state *chanArbStartState,
}
}

log.Debugf("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
log.Tracef("Starting ChannelArbitrator(%v), htlc_set=%v, state=%v",
c.cfg.ChanPoint, lnutils.SpewLogClosure(c.activeHTLCs),
state.currentState)

Expand Down Expand Up @@ -2618,14 +2618,14 @@ func (c *ChannelArbitrator) replaceResolver(oldResolver,
func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) {
defer c.wg.Done()

log.Debugf("ChannelArbitrator(%v): attempting to resolve %T",
log.Tracef("ChannelArbitrator(%v): attempting to resolve %T",
c.cfg.ChanPoint, currentContract)

// Until the contract is fully resolved, we'll continue to iteratively
// resolve the contract one step at a time.
for !currentContract.IsResolved() {
log.Debugf("ChannelArbitrator(%v): contract %T not yet resolved",
c.cfg.ChanPoint, currentContract)
log.Tracef("ChannelArbitrator(%v): contract %T not yet "+
"resolved", c.cfg.ChanPoint, currentContract)

select {

Expand Down Expand Up @@ -2994,12 +2994,18 @@ func (c *ChannelArbitrator) handleBlockbeat(beat chainio.Blockbeat) error {

// If the state is StateContractClosed, StateWaitingFullResolution, or
// StateFullyResolved, there's no need to read the close event channel
// and launch the resolvers since the arbitrator can only get to this
// state after processing a previous close event and launched all its
// resolvers.
// since the arbitrator can only get to this state after processing a
// previous close event and launched all its resolvers.
if c.state.IsContractClosed() {
log.Infof("ChannelArbitrator(%v): skipping launching "+
"resolvers in state=%v", c.cfg.ChanPoint, c.state)
log.Infof("ChannelArbitrator(%v): skipping reading close "+
"events in state=%v", c.cfg.ChanPoint, c.state)

// Launch all active resolvers when a new blockbeat is
// received, even when the contract is closed, we still need
// this as the resolvers may transform into new ones. For
// already launched resolvers this will be NOOP as they track
// their own `launched` states.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this implementation account for the resolver receiving new blockbeat information? For example, htlcOutgoingContestResolver.Launch() first checks its launched state and exits immediately if it has already been launched. Then, it queries h.ChainIO.GetBestBlock and proceeds based on the best block height.

Should the resolver consider the blockbeat information passed as an argument to handleBlockbeat? Specifically, could this argument influence its decision-making in the resolver?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for taking a more detailed look! Yes, the only blockbeat info the resolvers need is the exact block where the resolvers should be launched, beyond that, there's no need to process more beats.

For contest resolvers, they are a bit different - we never set the launched state in Launch, but relying its underlying timeout/success resolvers to set that state.

c.launchResolvers()

return nil
}
Expand Down
5 changes: 2 additions & 3 deletions contractcourt/htlc_outgoing_contest_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,8 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) {
if newHeight >= expiry-1 {
h.log.Infof("HTLC about to expire "+
"(height=%v, expiry=%v), transforming "+
"into timeout resolver", h,
h.htlcResolution.ClaimOutpoint,
newHeight, h.htlcResolution.Expiry)
"into timeout resolver", newHeight,
h.htlcResolution.Expiry)

return h.htlcTimeoutResolver, nil
}
Expand Down
48 changes: 13 additions & 35 deletions itest/list_on_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,6 @@ var allTestCases = []*lntest.TestCase{
Name: "basic funding flow",
TestFunc: testBasicChannelFunding,
},
{
Name: "multi hop receiver chain claim",
TestFunc: testMultiHopReceiverChainClaim,
},
{
Name: "external channel funding",
TestFunc: testExternalFundingChanPoint,
Expand Down Expand Up @@ -153,18 +149,6 @@ var allTestCases = []*lntest.TestCase{
Name: "addpeer config",
TestFunc: testAddPeerConfig,
},
{
Name: "multi hop htlc local timeout",
TestFunc: testMultiHopHtlcLocalTimeout,
},
{
Name: "multi hop local force close on-chain htlc timeout",
TestFunc: testMultiHopLocalForceCloseOnChainHtlcTimeout,
},
{
Name: "multi hop remote force close on-chain htlc timeout",
TestFunc: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
},
{
Name: "private channel update policy",
TestFunc: testUpdateChannelPolicyForPrivateChannel,
Expand Down Expand Up @@ -226,11 +210,15 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testChannelUnsettledBalance,
},
{
Name: "channel force closure",
TestFunc: testChannelForceClosure,
Name: "channel force closure anchor",
TestFunc: testChannelForceClosureAnchor,
},
{
Name: "channel force closure simple taproot",
TestFunc: testChannelForceClosureSimpleTaproot,
},
{
Name: "failing link",
Name: "failing channel",
TestFunc: testFailingChannel,
},
{
Expand Down Expand Up @@ -313,18 +301,6 @@ var allTestCases = []*lntest.TestCase{
Name: "REST API",
TestFunc: testRestAPI,
},
{
Name: "multi hop htlc local chain claim",
TestFunc: testMultiHopHtlcLocalChainClaim,
},
{
Name: "multi hop htlc remote chain claim",
TestFunc: testMultiHopHtlcRemoteChainClaim,
},
{
Name: "multi hop htlc aggregation",
TestFunc: testMultiHopHtlcAggregation,
},
{
Name: "revoked uncooperative close retribution",
TestFunc: testRevokedCloseRetribution,
Expand Down Expand Up @@ -574,10 +550,6 @@ var allTestCases = []*lntest.TestCase{
Name: "lookup htlc resolution",
TestFunc: testLookupHtlcResolution,
},
{
Name: "watchtower",
TestFunc: testWatchtower,
},
{
Name: "channel fundmax",
TestFunc: testChannelFundMax,
Expand Down Expand Up @@ -715,3 +687,9 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testQuiescence,
},
}

func init() {
// Register subtests.
allTestCases = append(allTestCases, multiHopForceCloseTestCases...)
allTestCases = append(allTestCases, watchtowerTestCases...)
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I propose to avoid init() function:

var allTestCases = append([]*lntest.TestCase{
	{
		Name:     "update channel status",
		TestFunc: testUpdateChanStatus,
	},
        // all other tests
        }, multiHopForceCloseTestCases...,
)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is an intermediate version, and the final version is,

lnd/itest/list_on_test.go

Lines 703 to 754 in 96756b4

func init() {
// Register subtests.
allTestCases = appendPrefixed(
"multihop", allTestCases, multiHopForceCloseTestCases,
)
allTestCases = appendPrefixed(
"watchtower", allTestCases, watchtowerTestCases,
)
allTestCases = appendPrefixed(
"psbt", allTestCases, psbtFundingTestCases,
)
allTestCases = appendPrefixed(
"remote signer", allTestCases, remoteSignerTestCases,
)
allTestCases = appendPrefixed(
"channel backup", allTestCases, channelRestoreTestCases,
)
allTestCases = appendPrefixed(
"utxo selection", allTestCases, fundUtxoSelectionTestCases,
)
allTestCases = appendPrefixed(
"zero conf", allTestCases, zeroConfPolicyTestCases,
)
allTestCases = appendPrefixed(
"channel fee policy", allTestCases, channelFeePolicyTestCases,
)
allTestCases = appendPrefixed(
"wallet import account", allTestCases,
walletImportAccountTestCases,
)
allTestCases = appendPrefixed(
"funding", allTestCases, basicFundingTestCases,
)
allTestCases = appendPrefixed(
"send to route", allTestCases, sendToRouteTestCases,
)
// Prepare the test cases for windows to exclude some of the flaky
// ones.
//
// NOTE: We need to run this before the isWindowsOS check to make sure
// the excluded tests are found in allTestCases. Otherwise, if a
// non-existing test is included in excludedTestsWindows, we won't be
// able to find it until it's pushed to the CI, which creates a much
// longer feedback loop.
windowsTestCases := filterWindowsFlakyTests()
// If this is Windows, we'll skip running some of the flaky tests.
if isWindowsOS() {
allTestCases = windowsTestCases
}
}

Since it involve a function call, think we have to do it here.

Other than that, I'm curious why avoiding init function?

32 changes: 14 additions & 18 deletions itest/lnd_channel_backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -637,8 +637,8 @@ func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
minerHeight := ht.CurrentHeight()
thawHeight := minerHeight + thawHeightDelta

fundingShim, _ = deriveFundingShim(
ht, dave, carol, crs.params.Amt, thawHeight, true, ct,
fundingShim, _ = ht.DeriveFundingShim(
dave, carol, crs.params.Amt, thawHeight, true, ct,
)
crs.params.FundingShim = fundingShim
}
Expand Down Expand Up @@ -1320,12 +1320,20 @@ func testDataLossProtection(ht *lntest.HarnessTest) {
// information Dave needs to sweep his funds.
require.NoError(ht, restartDave(), "unable to restart Eve")

// Mine a block to trigger Dave's chain watcher to process Carol's sweep
// tx.
//
// TODO(yy): remove this block once the blockbeat starts remembering
// its last processed block and can handle looking for spends in the
// past blocks.
ht.MineEmptyBlocks(1)

// Make sure Dave still has the pending force close channel.
ht.AssertNumPendingForceClose(dave, 1)

// Dave should have a pending sweep.
ht.AssertNumPendingSweeps(dave, 1)

// Mine a block to trigger the sweep.
ht.MineBlocks(1)

// Dave should sweep his funds.
ht.AssertNumTxsInMempool(1)

Expand Down Expand Up @@ -1482,7 +1490,6 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
expectedTxes := 1

// Mine a block to trigger the sweeps.
ht.MineBlocks(1)
ht.AssertNumTxsInMempool(expectedTxes)

// Carol should consider the channel pending force close (since she is
Expand Down Expand Up @@ -1512,7 +1519,7 @@ func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
// we already mined one block after the commitment was published, and
// one block to trigger Carol's sweeps, so take that into account.
ht.MineEmptyBlocks(1)
ht.MineBlocks(2)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm why do we need to mine more blocks eventho blockbeat should take care of the additonal block to trigger the sweep or ? I would expect to mine less blocks ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah the devil lies in the flow - because we removed multiple MineBlocks above, we now need to mine enough blocks to hit the timelock.

ht.AssertNumPendingSweeps(dave, 2)

// Mine a block to trigger the sweeps.
Expand Down Expand Up @@ -1615,8 +1622,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// output and the other for her anchor.
ht.AssertNumPendingSweeps(carol, 2)

// Mine a block to trigger the sweep.
ht.MineEmptyBlocks(1)
ht.MineBlocksAndAssertNumTxes(1, 1)

// Now the channel should be fully closed also from Carol's POV.
Expand All @@ -1635,8 +1640,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// output and the other for his anchor.
ht.AssertNumPendingSweeps(dave, 2)

// Mine a block to trigger the sweep.
ht.MineEmptyBlocks(1)
ht.MineBlocksAndAssertNumTxes(1, 1)

// Now Dave should consider the channel fully closed.
Expand All @@ -1652,10 +1655,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
ht.AssertNumPendingSweeps(dave, 1)
}

// Mine one block to trigger the sweeper to sweep.
ht.MineEmptyBlocks(1)
blocksMined++

// Expect one tx - the commitment sweep from Dave. For anchor
// channels, we expect the two anchor sweeping txns to be
// failed due they are uneconomical.
Expand All @@ -1673,9 +1672,6 @@ func assertDLPExecuted(ht *lntest.HarnessTest,
// commitmment was published, so take that into account.
ht.MineEmptyBlocks(int(defaultCSV - blocksMined))

// Mine one block to trigger the sweeper to sweep.
ht.MineEmptyBlocks(1)

// Carol should have two pending sweeps:
// 1. her commit output.
// 2. her anchor output, if this is anchor channel.
Expand Down
2 changes: 1 addition & 1 deletion itest/lnd_channel_balance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func testChannelUnsettledBalance(ht *lntest.HarnessTest) {
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
alice.RPC.SendPayment(req)
ht.SendPaymentAssertInflight(alice, req)
}()
}

Expand Down
Loading
Loading