From f95e64f084326498a12f85e567d17c6471e6b3a6 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Wed, 23 Oct 2024 22:51:48 +0800 Subject: [PATCH] itest: flatten `testMultiHopHtlcAggregation` --- itest/list_on_test.go | 4 - itest/lnd_multi-hop_force_close_test.go | 423 ++++++++++++++++++++++ itest/lnd_multi-hop_test.go | 457 ------------------------ 3 files changed, 423 insertions(+), 461 deletions(-) diff --git a/itest/list_on_test.go b/itest/list_on_test.go index 1edc278c6..ac440ff53 100644 --- a/itest/list_on_test.go +++ b/itest/list_on_test.go @@ -297,10 +297,6 @@ var allTestCases = []*lntest.TestCase{ Name: "REST API", TestFunc: testRestAPI, }, - { - Name: "multi hop htlc aggregation", - TestFunc: testMultiHopHtlcAggregation, - }, { Name: "revoked uncooperative close retribution", TestFunc: testRevokedCloseRetribution, diff --git a/itest/lnd_multi-hop_force_close_test.go b/itest/lnd_multi-hop_force_close_test.go index 120ea35ff..46631bbda 100644 --- a/itest/lnd_multi-hop_force_close_test.go +++ b/itest/lnd_multi-hop_force_close_test.go @@ -10,6 +10,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/node" + "github.com/lightningnetwork/lnd/lntest/rpc" "github.com/lightningnetwork/lnd/lntypes" "github.com/stretchr/testify/require" ) @@ -101,6 +102,18 @@ var multiHopForceCloseTestCases = []*lntest.TestCase{ Name: "multihop local preimage claim leased", TestFunc: testLocalPreimageClaimLeased, }, + { + Name: "multihop htlc aggregation anchor", + TestFunc: testHtlcAggregaitonAnchor, + }, + { + Name: "multihop htlc aggregation simple taproot", + TestFunc: testHtlcAggregaitonSimpleTaproot, + }, + { + Name: "multihop htlc aggregation leased", + TestFunc: testHtlcAggregaitonLeased, + }, } // testLocalClaimOutgoingHTLCAnchor tests `runLocalClaimOutgoingHTLC` with @@ -2718,3 +2731,413 @@ func runLocalPreimageClaimLeased(ht *lntest.HarnessTest, ht.AssertNumPendingForceClose(alice, 0) ht.AssertNumPendingForceClose(bob, 0) } + +// testHtlcAggregaitonAnchor tests `runHtlcAggregation` with anchor channel. +func testHtlcAggregaitonAnchor(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{Amt: chanAmt} + + cfg := node.CfgAnchor + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: lnrpc.CommitmentType_ANCHORS, + } + + // Prepare Carol's node config to enable zero-conf and anchor. + cfg := node.CfgZeroConf + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// testHtlcAggregaitonSimpleTaproot tests `runHtlcAggregation` with simple +// taproot channel. +func testHtlcAggregaitonSimpleTaproot(ht *lntest.HarnessTest) { + c := lnrpc.CommitmentType_SIMPLE_TAPROOT + + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: c, + Private: true, + } + + cfg := node.CfgSimpleTaproot + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf simple taproot channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: c, + Private: true, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgSimpleTaproot + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// testHtlcAggregaitonLeased tests `runHtlcAggregation` with script enforced +// lease channel. +func testHtlcAggregaitonLeased(ht *lntest.HarnessTest) { + success := ht.Run("no zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // leased channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + CommitmentType: leasedType, + } + + cfg := node.CfgLeased + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) + if !success { + return + } + + ht.Run("zero conf", func(t *testing.T) { + st := ht.Subtest(t) + + // Create a three hop network: Alice -> Bob -> Carol, using + // zero-conf anchor channels. + // + // Prepare params. + params := lntest.OpenChannelParams{ + Amt: chanAmt, + ZeroConf: true, + CommitmentType: leasedType, + } + + // Prepare Carol's node config to enable zero-conf and leased + // channel. + cfg := node.CfgLeased + cfg = append(cfg, node.CfgZeroConf...) + cfgs := [][]string{cfg, cfg, cfg} + + runHtlcAggregation(st, cfgs, params) + }) +} + +// runHtlcAggregation tests that in a multi-hop HTLC scenario, if we force +// close a channel with both incoming and outgoing HTLCs, we can properly +// resolve them using the second level timeout and success transactions. In +// case of anchor channels, the second-level spends can also be aggregated and +// properly feebumped, so we'll check that as well. +func runHtlcAggregation(ht *lntest.HarnessTest, + cfgs [][]string, params lntest.OpenChannelParams) { + + // Set the min relay feerate to be 10 sat/vbyte so the non-CPFP anchor + // is never swept. + // + // TODO(yy): delete this line once the normal anchor sweeping is + // removed. + ht.SetMinRelayFeerate(10_000) + + // Create a three hop network: Alice -> Bob -> Carol. + chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params) + alice, bob, carol := nodes[0], nodes[1], nodes[2] + _, bobChanPoint := chanPoints[0], chanPoints[1] + + // We need one additional UTXO to create the sweeping tx for the + // second-level success txes. + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + + // Bob should have enough wallet UTXOs here to sweep the HTLC in the + // end of this test. However, due to a known issue, Bob's wallet may + // report there's no UTXO available. For details, + // - https://github.com/lightningnetwork/lnd/issues/8786 + // + // TODO(yy): remove this step once the issue is resolved. + ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) + + // If this is a taproot channel, then we'll need to make some manual + // route hints so Alice+Carol can actually find a route. + var ( + carolRouteHints []*lnrpc.RouteHint + aliceRouteHints []*lnrpc.RouteHint + ) + + if params.CommitmentType == lnrpc.CommitmentType_SIMPLE_TAPROOT { + carolRouteHints = makeRouteHints(bob, carol, params.ZeroConf) + aliceRouteHints = makeRouteHints(bob, alice, params.ZeroConf) + } + + // To ensure we have capacity in both directions of the route, we'll + // make a fairly large payment Alice->Carol and settle it. + const reBalanceAmt = 500_000 + invoice := &lnrpc.Invoice{ + Value: reBalanceAmt, + RouteHints: carolRouteHints, + } + invResp := carol.RPC.AddInvoice(invoice) + ht.CompletePaymentRequests(alice, []string{invResp.PaymentRequest}) + + // Make sure Carol has settled the invoice. + ht.AssertInvoiceSettled(carol, invResp.PaymentAddr) + + // With the network active, we'll now add a new hodl invoices at both + // Alice's and Carol's end. Make sure the cltv expiry delta is large + // enough, otherwise Bob won't send out the outgoing htlc. + const numInvoices = 5 + const invoiceAmt = 50_000 + + var ( + carolInvoices []*invoicesrpc.AddHoldInvoiceResp + aliceInvoices []*invoicesrpc.AddHoldInvoiceResp + alicePreimages []lntypes.Preimage + payHashes [][]byte + invoiceStreamsCarol []rpc.SingleInvoiceClient + invoiceStreamsAlice []rpc.SingleInvoiceClient + ) + + // Add Carol invoices. + for i := 0; i < numInvoices; i++ { + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: finalCltvDelta, + Hash: payHash[:], + RouteHints: carolRouteHints, + } + carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) + + carolInvoices = append(carolInvoices, carolInvoice) + payHashes = append(payHashes, payHash[:]) + + // Subscribe the invoice. + stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) + invoiceStreamsCarol = append(invoiceStreamsCarol, stream) + } + + // We'll give Alice's invoices a longer CLTV expiry, to ensure the + // channel Bob<->Carol will be closed first. + for i := 0; i < numInvoices; i++ { + preimage := ht.RandomPreimage() + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, + CltvExpiry: thawHeightDelta - 4, + Hash: payHash[:], + RouteHints: aliceRouteHints, + } + aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq) + + aliceInvoices = append(aliceInvoices, aliceInvoice) + alicePreimages = append(alicePreimages, preimage) + payHashes = append(payHashes, payHash[:]) + + // Subscribe the invoice. + stream := alice.RPC.SubscribeSingleInvoice(payHash[:]) + invoiceStreamsAlice = append(invoiceStreamsAlice, stream) + } + + // Now that we've created the invoices, we'll pay them all from + // Alice<->Carol, going through Bob. We won't wait for the response + // however, as neither will immediately settle the payment. + // + // Alice will pay all of Carol's invoices. + for _, carolInvoice := range carolInvoices { + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + alice.RPC.SendPayment(req) + } + + // And Carol will pay Alice's. + for _, aliceInvoice := range aliceInvoices { + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: aliceInvoice.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + carol.RPC.SendPayment(req) + } + + // At this point, all 3 nodes should now the HTLCs active on their + // channels. + ht.AssertActiveHtlcs(alice, payHashes...) + ht.AssertActiveHtlcs(bob, payHashes...) + ht.AssertActiveHtlcs(carol, payHashes...) + + // Wait for Alice and Carol to mark the invoices as accepted. There is + // a small gap to bridge between adding the htlc to the channel and + // executing the exit hop logic. + for _, stream := range invoiceStreamsCarol { + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + } + + for _, stream := range invoiceStreamsAlice { + ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) + } + + // We want Carol's htlcs to expire off-chain to demonstrate bob's force + // close. However, Carol will cancel her invoices to prevent force + // closes, so we shut her down for now. + restartCarol := ht.SuspendNode(carol) + + // We'll now mine enough blocks to trigger Bob's broadcast of his + // commitment transaction due to the fact that the Carol's HTLCs are + // about to timeout. With the default outgoing broadcast delta of zero, + // this will be the same height as the htlc expiry height. + numBlocks := padCLTV( + uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), + ) + ht.MineBlocks(int(numBlocks)) + + // Bob should have one anchor sweep request. + // + // For neutrino backend, there's no way to know the sweeping of the + // remote anchor is failed, so Bob still sees two pending sweeps. + if ht.IsNeutrinoBackend() { + ht.AssertNumPendingSweeps(bob, 2) + } else { + ht.AssertNumPendingSweeps(bob, 1) + } + + // Bob's force close tx and anchor sweeping tx should now be found in + // the mempool. + ht.AssertNumTxsInMempool(2) + + // Mine a block to confirm Bob's force close tx and anchor sweeping tx. + ht.MineBlocksAndAssertNumTxes(1, 2) + + // Bob should have `numInvoices` for HTLC timeout txns. + ht.AssertNumPendingSweeps(bob, numInvoices) + + // Once bob has force closed, we can restart carol. + require.NoError(ht, restartCarol()) + + // Carol should have commit and anchor outputs. + ht.AssertNumPendingSweeps(carol, 2) + + // Let Alice settle her invoices. When Bob now gets the preimages, he + // will broadcast his second-level txns to claim the htlcs. + for _, preimage := range alicePreimages { + alice.RPC.SettleInvoice(preimage[:]) + } + + // Bob should have `numInvoices` for both HTLC success and timeout + // txns. + ht.AssertNumPendingSweeps(bob, numInvoices*2) + + // Mine a block to trigger the sweep. This is needed because the + // preimage extraction logic from the link is not managed by the + // blockbeat, which means the preimage may be sent to the contest + // resolver after it's launched. + // + // TODO(yy): Expose blockbeat to the link layer. + ht.MineEmptyBlocks(1) + + // We expect to see three sweeping txns: + // 1. Bob's sweeping tx for all timeout HTLCs. + // 2. Bob's sweeping tx for all success HTLCs. + // 3. Carol's sweeping tx for her commit output. + // Mine a block to confirm them. + ht.MineBlocksAndAssertNumTxes(1, 3) + + // For this channel, we also check the number of HTLCs and the stage + // are correct. + ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) + + // For non-leased channels, we can now mine one block so Bob will sweep + // his to_local output. + if params.CommitmentType != leasedType { + // Mine one block so Bob's to_local becomes mature. + ht.MineBlocks(1) + + // Bob should offer the to_local output to his sweeper now. + ht.AssertNumPendingSweeps(bob, 1) + + // Mine a block to confirm Bob's sweeping of his to_local + // output. + ht.MineBlocksAndAssertNumTxes(1, 1) + } + + // Mine blocks till the CSV expires on Bob's HTLC output. + resp := ht.AssertNumPendingForceClose(bob, 1)[0] + require.Equal(ht, numInvoices*2, len(resp.PendingHtlcs)) + + ht.Logf("Bob's timelock to_local output=%v, timelock on second stage "+ + "htlc=%v", resp.BlocksTilMaturity, + resp.PendingHtlcs[0].BlocksTilMaturity) + + ht.MineBlocks(int(resp.PendingHtlcs[0].BlocksTilMaturity)) + + // With the above mined block, Bob's HTLCs should now all be offered to + // his sweeper since the CSV lock is now expired. + // + // For leased channel, due to the test setup, Bob's to_local output is + // now also mature and can be swept together with his HTLCs. + if params.CommitmentType == leasedType { + ht.AssertNumPendingSweeps(bob, numInvoices*2+1) + } else { + ht.AssertNumPendingSweeps(bob, numInvoices*2) + } + + // When we mine one additional block, that will confirm Bob's second + // level sweep. Now Bob should have no pending channels anymore, as + // this just resolved it by the confirmation of the sweep tx. + ht.MineBlocksAndAssertNumTxes(1, 1) + ht.AssertNumPendingForceClose(bob, 0) + + // Carol should have no channels left. + ht.AssertNumPendingForceClose(carol, 0) +} diff --git a/itest/lnd_multi-hop_test.go b/itest/lnd_multi-hop_test.go index 33f91f383..124118af6 100644 --- a/itest/lnd_multi-hop_test.go +++ b/itest/lnd_multi-hop_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainreg" "github.com/lightningnetwork/lnd/lncfg" @@ -16,8 +15,6 @@ import ( "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lntest/rpc" - "github.com/lightningnetwork/lnd/lntest/wait" - "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/routing" "github.com/stretchr/testify/require" ) @@ -159,460 +156,6 @@ func runMultiHopHtlcClaimTest(ht *lntest.HarnessTest, tester caseRunner) { } } -// testMultiHopHtlcAggregation tests that in a multi-hop HTLC scenario, if we -// force close a channel with both incoming and outgoing HTLCs, we can properly -// resolve them using the second level timeout and success transactions. In -// case of anchor channels, the second-level spends can also be aggregated and -// properly feebumped, so we'll check that as well. -func testMultiHopHtlcAggregation(ht *lntest.HarnessTest) { - runMultiHopHtlcClaimTest(ht, runMultiHopHtlcAggregation) -} - -func runMultiHopHtlcAggregation(ht *lntest.HarnessTest, - alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { - - // We need one additional UTXO to create the sweeping tx for the - // second-level success txes. - ht.FundCoins(btcutil.SatoshiPerBitcoin, bob) - - // First, we'll create a three hop network: Alice -> Bob -> Carol. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - ht, alice, bob, false, c, zeroConf, - ) - - // If this is a taproot channel, then we'll need to make some manual - // route hints so Alice+Carol can actually find a route. - var ( - carolRouteHints []*lnrpc.RouteHint - aliceRouteHints []*lnrpc.RouteHint - ) - if c == lnrpc.CommitmentType_SIMPLE_TAPROOT { - carolRouteHints = makeRouteHints(bob, carol, zeroConf) - aliceRouteHints = makeRouteHints(bob, alice, zeroConf) - } - - // To ensure we have capacity in both directions of the route, we'll - // make a fairly large payment Alice->Carol and settle it. - const reBalanceAmt = 500_000 - invoice := &lnrpc.Invoice{ - Value: reBalanceAmt, - RouteHints: carolRouteHints, - } - resp := carol.RPC.AddInvoice(invoice) - ht.CompletePaymentRequests(alice, []string{resp.PaymentRequest}) - - // Make sure Carol has settled the invoice. - ht.AssertInvoiceSettled(carol, resp.PaymentAddr) - - // With the network active, we'll now add a new hodl invoices at both - // Alice's and Carol's end. Make sure the cltv expiry delta is large - // enough, otherwise Bob won't send out the outgoing htlc. - const numInvoices = 5 - const invoiceAmt = 50_000 - - var ( - carolInvoices []*invoicesrpc.AddHoldInvoiceResp - aliceInvoices []*invoicesrpc.AddHoldInvoiceResp - alicePreimages []lntypes.Preimage - payHashes [][]byte - invoiceStreamsCarol []rpc.SingleInvoiceClient - invoiceStreamsAlice []rpc.SingleInvoiceClient - ) - - // Add Carol invoices. - for i := 0; i < numInvoices; i++ { - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: finalCltvDelta, - Hash: payHash[:], - RouteHints: carolRouteHints, - } - carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq) - - carolInvoices = append(carolInvoices, carolInvoice) - payHashes = append(payHashes, payHash[:]) - - // Subscribe the invoice. - stream := carol.RPC.SubscribeSingleInvoice(payHash[:]) - invoiceStreamsCarol = append(invoiceStreamsCarol, stream) - } - - // We'll give Alice's invoices a longer CLTV expiry, to ensure the - // channel Bob<->Carol will be closed first. - for i := 0; i < numInvoices; i++ { - var preimage lntypes.Preimage - copy(preimage[:], ht.Random32Bytes()) - payHash := preimage.Hash() - invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ - Value: invoiceAmt, - CltvExpiry: thawHeightDelta - 4, - Hash: payHash[:], - RouteHints: aliceRouteHints, - } - aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq) - - aliceInvoices = append(aliceInvoices, aliceInvoice) - alicePreimages = append(alicePreimages, preimage) - payHashes = append(payHashes, payHash[:]) - - // Subscribe the invoice. - stream := alice.RPC.SubscribeSingleInvoice(payHash[:]) - invoiceStreamsAlice = append(invoiceStreamsAlice, stream) - } - - // Now that we've created the invoices, we'll pay them all from - // Alice<->Carol, going through Bob. We won't wait for the response - // however, as neither will immediately settle the payment. - - // Alice will pay all of Carol's invoices. - for _, carolInvoice := range carolInvoices { - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - alice.RPC.SendPayment(req) - } - - // And Carol will pay Alice's. - for _, aliceInvoice := range aliceInvoices { - req := &routerrpc.SendPaymentRequest{ - PaymentRequest: aliceInvoice.PaymentRequest, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - } - carol.RPC.SendPayment(req) - } - - // At this point, all 3 nodes should now the HTLCs active on their - // channels. - ht.AssertActiveHtlcs(alice, payHashes...) - ht.AssertActiveHtlcs(bob, payHashes...) - ht.AssertActiveHtlcs(carol, payHashes...) - - // Wait for Alice and Carol to mark the invoices as accepted. There is - // a small gap to bridge between adding the htlc to the channel and - // executing the exit hop logic. - for _, stream := range invoiceStreamsCarol { - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - } - - for _, stream := range invoiceStreamsAlice { - ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED) - } - - // Increase the fee estimate so that the following force close tx will - // be cpfp'ed. - ht.SetFeeEstimate(30000) - - // We want Carol's htlcs to expire off-chain to demonstrate bob's force - // close. However, Carol will cancel her invoices to prevent force - // closes, so we shut her down for now. - restartCarol := ht.SuspendNode(carol) - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the Carol's HTLCs are - // about to timeout. With the default outgoing broadcast delta of zero, - // this will be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), - ) - ht.MineEmptyBlocks(int(numBlocks)) - - // Bob's force close transaction should now be found in the mempool. If - // there are anchors, we expect it to be offered to Bob's sweeper. - ht.AssertNumTxsInMempool(1) - - // Bob has two anchor sweep requests, one for remote (invalid) and the - // other for local. - ht.AssertNumPendingSweeps(bob, 2) - - closeTx := ht.AssertOutpointInMempool( - ht.OutPointFromChannelPoint(bobChanPoint), - ) - closeTxid := closeTx.TxHash() - - // Go through the closing transaction outputs, and make an index for - // the HTLC outputs. - successOuts := make(map[wire.OutPoint]struct{}) - timeoutOuts := make(map[wire.OutPoint]struct{}) - for i, txOut := range closeTx.TxOut { - op := wire.OutPoint{ - Hash: closeTxid, - Index: uint32(i), - } - - switch txOut.Value { - // If this HTLC goes towards Carol, Bob will claim it with a - // timeout Tx. In this case the value will be the invoice - // amount. - case invoiceAmt: - timeoutOuts[op] = struct{}{} - - // If the HTLC has direction towards Alice, Bob will claim it - // with the success TX when he learns the preimage. In this - // case one extra sat will be on the output, because of the - // routing fee. - case invoiceAmt + 1: - successOuts[op] = struct{}{} - } - } - - // Once bob has force closed, we can restart carol. - require.NoError(ht, restartCarol()) - - // Mine a block to confirm the closing transaction. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // The above mined block will trigger Bob to sweep his anchor output. - ht.AssertNumTxsInMempool(1) - - // Let Alice settle her invoices. When Bob now gets the preimages, he - // has no other option than to broadcast his second-level transactions - // to claim the money. - for _, preimage := range alicePreimages { - alice.RPC.SettleInvoice(preimage[:]) - } - - expectedTxes := 0 - switch c { - // In case of anchors, all success transactions will be aggregated into - // one, the same is the case for the timeout transactions. In this case - // Carol will also sweep her commitment and anchor output in a single - // tx. - case lnrpc.CommitmentType_ANCHORS, - lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE, - lnrpc.CommitmentType_SIMPLE_TAPROOT: - - // Bob should have `numInvoices` for both HTLC success and - // timeout txns, plus one anchor sweep. - ht.AssertNumPendingSweeps(bob, numInvoices*2+1) - - // Carol should have commit and anchor outputs. - ht.AssertNumPendingSweeps(carol, 2) - - // We expect to see three sweeping txns: - // 1. Bob's sweeping tx for all timeout HTLCs. - // 2. Bob's sweeping tx for all success HTLCs. - // 3. Carol's sweeping tx for her commit and anchor outputs. - expectedTxes = 3 - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Mine a block to confirm Bob's anchor sweeping, which will also - // trigger his sweeper to sweep HTLCs. - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Assert the sweeping txns are found in the mempool. - txes := ht.GetNumTxsFromMempool(expectedTxes) - - // Since Bob can aggregate the transactions, we expect a single - // transaction, that have multiple spends from the commitment. - var ( - timeoutTxs []*chainhash.Hash - successTxs []*chainhash.Hash - ) - for _, tx := range txes { - txid := tx.TxHash() - - for i := range tx.TxIn { - prevOp := tx.TxIn[i].PreviousOutPoint - if _, ok := successOuts[prevOp]; ok { - successTxs = append(successTxs, &txid) - - break - } - - if _, ok := timeoutOuts[prevOp]; ok { - timeoutTxs = append(timeoutTxs, &txid) - - break - } - } - } - - // In case of anchor we expect all the timeout and success second - // levels to be aggregated into one tx. For earlier channel types, they - // will be separate transactions. - if lntest.CommitTypeHasAnchors(c) { - require.Len(ht, timeoutTxs, 1) - require.Len(ht, successTxs, 1) - } else { - require.Len(ht, timeoutTxs, numInvoices) - require.Len(ht, successTxs, numInvoices) - } - - // All mempool transactions should be spending from the commitment - // transaction. - ht.AssertAllTxesSpendFrom(txes, closeTxid) - - // Mine a block to confirm the all the transactions, including Carol's - // commitment tx, anchor tx(optional), and Bob's second-level timeout - // and success txes. - ht.MineBlocksAndAssertNumTxes(1, expectedTxes) - - // At this point, Bob should have broadcast his second layer success - // transaction, and should have sent it to the nursery for incubation, - // or to the sweeper for sweeping. - forceCloseChan := ht.AssertNumPendingForceClose(bob, 1)[0] - ht.Logf("Bob's timelock on commit=%v, timelock on htlc=%v", - forceCloseChan.BlocksTilMaturity, - forceCloseChan.PendingHtlcs[0].BlocksTilMaturity) - - // For this channel, we also check the number of HTLCs and the stage - // are correct. - ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2) - - if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - // If we then mine additional blocks, Bob can sweep his - // commitment output. - ht.MineEmptyBlocks(1) - - // Assert the tx has been offered to the sweeper. - ht.AssertNumPendingSweeps(bob, 1) - - // Mine one block to trigger the sweep. - ht.MineEmptyBlocks(1) - - // Find the commitment sweep. - bobCommitSweep := ht.GetNumTxsFromMempool(1)[0] - ht.AssertTxSpendFrom(bobCommitSweep, closeTxid) - - // Also ensure it is not spending from any of the HTLC output. - for _, txin := range bobCommitSweep.TxIn { - for _, timeoutTx := range timeoutTxs { - require.NotEqual(ht, *timeoutTx, - txin.PreviousOutPoint.Hash, - "found unexpected spend of timeout tx") - } - - for _, successTx := range successTxs { - require.NotEqual(ht, *successTx, - txin.PreviousOutPoint.Hash, - "found unexpected spend of success tx") - } - } - } - - switch c { - // Mining one additional block, Bob's second level tx is mature, and he - // can sweep the output. Before the blocks are mined, we should expect - // to see Bob's commit sweep in the mempool. - case lnrpc.CommitmentType_ANCHORS, lnrpc.CommitmentType_SIMPLE_TAPROOT: - ht.MineBlocksAndAssertNumTxes(1, 1) - - // Since Bob is the initiator of the Bob-Carol script-enforced leased - // channel, he incurs an additional CLTV when sweeping outputs back to - // his wallet. We'll need to mine enough blocks for the timelock to - // expire to prompt his broadcast. - case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE: - resp := bob.RPC.PendingChannels() - require.Len(ht, resp.PendingForceClosingChannels, 1) - forceCloseChan := resp.PendingForceClosingChannels[0] - require.Positive(ht, forceCloseChan.BlocksTilMaturity) - numBlocks := uint32(forceCloseChan.BlocksTilMaturity) - - // Add debug log. - height := ht.CurrentHeight() - bob.AddToLogf("itest: now mine %d blocks at height %d", - numBlocks, height) - ht.MineEmptyBlocks(int(numBlocks) - 1) - - default: - ht.Fatalf("unhandled commitment type %v", c) - } - - // Make sure Bob's sweeper has received all the sweeping requests. - ht.AssertNumPendingSweeps(bob, numInvoices*2) - - // Mine one block to trigger the sweeps. - ht.MineEmptyBlocks(1) - - // For leased channels, Bob's commit output will mature after the above - // block. - if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { - ht.AssertNumPendingSweeps(bob, numInvoices*2+1) - } - - // We now wait for 30 seconds to overcome the flake - there's a block - // race between contractcourt and sweeper, causing the sweep to be - // broadcast earlier. - // - // TODO(yy): remove this once `blockbeat` is in place. - numExpected := 1 - err := wait.NoError(func() error { - mem := ht.GetRawMempool() - if len(mem) == numExpected { - return nil - } - - if len(mem) > 0 { - numExpected = len(mem) - } - - return fmt.Errorf("want %d, got %v in mempool: %v", numExpected, - len(mem), mem) - }, wait.DefaultTimeout) - ht.Logf("Checking mempool got: %v", err) - - // Make sure it spends from the second level tx. - secondLevelSweep := ht.GetNumTxsFromMempool(numExpected)[0] - bobSweep := secondLevelSweep.TxHash() - - // It should be sweeping all the second-level outputs. - var secondLvlSpends int - for _, txin := range secondLevelSweep.TxIn { - for _, timeoutTx := range timeoutTxs { - if *timeoutTx == txin.PreviousOutPoint.Hash { - secondLvlSpends++ - } - } - - for _, successTx := range successTxs { - if *successTx == txin.PreviousOutPoint.Hash { - secondLvlSpends++ - } - } - } - - // TODO(yy): bring the following check back when `blockbeat` is in - // place - atm we may have two sweeping transactions in the mempool. - // require.Equal(ht, 2*numInvoices, secondLvlSpends) - - // When we mine one additional block, that will confirm Bob's second - // level sweep. Now Bob should have no pending channels anymore, as - // this just resolved it by the confirmation of the sweep transaction. - block := ht.MineBlocksAndAssertNumTxes(1, numExpected)[0] - ht.AssertTxInBlock(block, bobSweep) - - // For leased channels, we need to mine one more block to confirm Bob's - // commit output sweep. - // - // NOTE: we mine this block conditionally, as the commit output may - // have already been swept one block earlier due to the race in block - // consumption among subsystems. - pendingChanResp := bob.RPC.PendingChannels() - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - ht.MineBlocksAndAssertNumTxes(1, 1) - } - ht.AssertNumPendingForceClose(bob, 0) - - // THe channel with Alice is still open. - ht.AssertNodeNumChannels(bob, 1) - - // Carol should have no channels left (open nor pending). - ht.AssertNumPendingForceClose(carol, 0) - ht.AssertNodeNumChannels(carol, 0) - - // Coop close, no anchors. - ht.CloseChannel(alice, aliceChanPoint) -} - // createThreeHopNetwork creates a topology of `Alice -> Bob -> Carol`. func createThreeHopNetwork(ht *lntest.HarnessTest, alice, bob *node.HarnessNode, carolHodl bool, c lnrpc.CommitmentType,