lntemp+itest: refactor testHoldInvoiceForceClose

This commit is contained in:
yyforyongyu
2022-08-05 17:48:35 +08:00
parent c1b3481354
commit 984d3ece12
4 changed files with 94 additions and 75 deletions

View File

@ -567,3 +567,15 @@ func (h *HarnessRPC) VerifyChanBackup(
return resp return resp
} }
// LookupInvoice queries the node's invoices using the specified rHash.
func (h *HarnessRPC) LookupInvoice(rHash []byte) *lnrpc.Invoice {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
payHash := &lnrpc.PaymentHash{RHash: rHash}
resp, err := h.LN.LookupInvoice(ctxt, payHash)
h.NoError(err, "LookupInvoice")
return resp
}

View File

@ -235,4 +235,8 @@ var allTestCasesTemp = []*lntemp.TestCase{
Name: "etcd failover", Name: "etcd failover",
TestFunc: testEtcdFailover, TestFunc: testEtcdFailover,
}, },
{
Name: "hold invoice force close",
TestFunc: testHoldInvoiceForceClose,
},
} }

View File

@ -1,14 +1,13 @@
package itest package itest
import ( import (
"context"
"fmt" "fmt"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -16,17 +15,11 @@ import (
// testHoldInvoiceForceClose tests cancellation of accepted hold invoices which // testHoldInvoiceForceClose tests cancellation of accepted hold invoices which
// would otherwise trigger force closes when they expire. // would otherwise trigger force closes when they expire.
func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) { func testHoldInvoiceForceClose(ht *lntemp.HarnessTest) {
ctxb, cancel := context.WithCancel(context.Background())
defer cancel()
// Open a channel between alice and bob. // Open a channel between alice and bob.
chanReq := lntest.OpenChannelParams{ alice, bob := ht.Alice, ht.Bob
Amt: 300000, chanPoint := ht.OpenChannel(
} alice, bob, lntemp.OpenChannelParams{Amt: 300000},
chanPoint := openChannelAndAssert(
t, net, net.Alice, net.Bob, chanReq,
) )
// Create a non-dust hold invoice for bob. // Create a non-dust hold invoice for bob.
@ -39,87 +32,76 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
CltvExpiry: 40, CltvExpiry: 40,
Hash: payHash[:], Hash: payHash[:],
} }
bobInvoice := bob.RPC.AddHoldInvoice(invoiceReq)
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) // Subscribe the invoice.
defer cancel() stream := bob.RPC.SubscribeSingleInvoice(payHash[:])
bobInvoice, err := net.Bob.AddHoldInvoice(ctxt, invoiceReq)
require.NoError(t.t, err)
// Pay this invoice from Alice -> Bob, we should achieve this with a // Pay this invoice from Alice -> Bob, we should achieve this with a
// single htlc. // single htlc.
_, err = net.Alice.RouterClient.SendPaymentV2( req := &routerrpc.SendPaymentRequest{
ctxb, &routerrpc.SendPaymentRequest{ PaymentRequest: bobInvoice.PaymentRequest,
PaymentRequest: bobInvoice.PaymentRequest, TimeoutSeconds: 60,
TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat,
FeeLimitMsat: noFeeLimitMsat, }
}, alice.RPC.SendPayment(req)
)
require.NoError(t.t, err)
waitForInvoiceAccepted(t, net.Bob, payHash) ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
// Once the HTLC has cleared, alice and bob should both have a single // Once the HTLC has cleared, alice and bob should both have a single
// htlc locked in. // htlc locked in.
nodes := []*lntest.HarnessNode{net.Alice, net.Bob} ht.AssertActiveHtlcs(alice, payHash[:])
err = wait.NoError(func() error { ht.AssertActiveHtlcs(bob, payHash[:])
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Get our htlc expiry height and current block height so that we // Get our htlc expiry height and current block height so that we
// can mine the exact number of blocks required to expire the htlc. // can mine the exact number of blocks required to expire the htlc.
chans, err := net.Alice.ListChannels(ctxb, &lnrpc.ListChannelsRequest{}) channel := ht.QueryChannelByChanPoint(alice, chanPoint)
require.NoError(t.t, err) require.Len(ht, channel.PendingHtlcs, 1)
require.Len(t.t, chans.Channels, 1) activeHtlc := channel.PendingHtlcs[0]
require.Len(t.t, chans.Channels[0].PendingHtlcs, 1)
activeHtlc := chans.Channels[0].PendingHtlcs[0]
require.NoError(t.t, net.Alice.WaitForBlockchainSync()) _, currentHeight := ht.Miner.GetBestBlock()
require.NoError(t.t, net.Bob.WaitForBlockchainSync())
info, err := net.Alice.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
require.NoError(t.t, err)
// Now we will mine blocks until the htlc expires, and wait for each // Now we will mine blocks until the htlc expires, and wait for each
// node to sync to our latest height. Sanity check that we won't // node to sync to our latest height. Sanity check that we won't
// underflow. // underflow.
require.Greater( require.Greater(ht, activeHtlc.ExpirationHeight, uint32(currentHeight),
t.t, activeHtlc.ExpirationHeight, info.BlockHeight, "expected expiry after current height")
"expected expiry after current height", blocksTillExpiry := activeHtlc.ExpirationHeight - uint32(currentHeight)
)
blocksTillExpiry := activeHtlc.ExpirationHeight - info.BlockHeight
// Alice will go to chain with some delta, sanity check that we won't // Alice will go to chain with some delta, sanity check that we won't
// underflow and subtract this from our mined blocks. // underflow and subtract this from our mined blocks.
require.Greater( require.Greater(ht, blocksTillExpiry,
t.t, blocksTillExpiry, uint32(lncfg.DefaultOutgoingBroadcastDelta))
uint32(lncfg.DefaultOutgoingBroadcastDelta),
)
blocksTillForce := blocksTillExpiry - lncfg.DefaultOutgoingBroadcastDelta
mineBlocksSlow(t, net, blocksTillForce, 0) // blocksTillForce is the number of blocks should be mined to
// trigger a force close from Alice iff the invoice cancelation
// failed. This value is 48 in current test setup.
blocksTillForce := blocksTillExpiry -
lncfg.DefaultOutgoingBroadcastDelta
require.NoError(t.t, net.Alice.WaitForBlockchainSync()) // blocksTillCancel is the number of blocks should be mined to trigger
require.NoError(t.t, net.Bob.WaitForBlockchainSync()) // an invoice cancelation from Bob. This value is 30 in current test
// setup.
blocksTillCancel := blocksTillExpiry -
lncfg.DefaultHoldInvoiceExpiryDelta
// Our channel should not have been force closed, instead we expect our // When using ht.MineBlocks, for bitcoind backend, the block height
// channel to still be open and our invoice to have been canceled before // synced differ significantly among subsystems. From observation, the
// expiry. // LNWL syncs much faster than other subsystems, with more than 10
chanInfo, err := getChanInfo(net.Alice) // blocks ahead. For this test case, CRTR may be lagging behind for
require.NoError(t.t, err) // more than 20 blocks. Thus we use slow mining instead.
// TODO(yy): fix block height asymmetry among all the subsystems.
//
// We first mine enough blocks to trigger an invoice cancelation.
ht.MineBlocks(blocksTillCancel)
fundingTxID, err := lnrpc.GetChanPointFundingTxid(chanPoint) // Wait for the nodes to be synced.
require.NoError(t.t, err) ht.WaitForBlockchainSync(alice)
chanStr := fmt.Sprintf("%v:%v", fundingTxID, chanPoint.OutputIndex) ht.WaitForBlockchainSync(bob)
require.Equal(t.t, chanStr, chanInfo.ChannelPoint)
err = wait.NoError(func() error { // Check that the invoice is canceled by Bob.
inv, err := net.Bob.LookupInvoice(ctxt, &lnrpc.PaymentHash{ err := wait.NoError(func() error {
RHash: payHash[:], inv := bob.RPC.LookupInvoice(payHash[:])
})
if err != nil {
return err
}
if inv.State != lnrpc.Invoice_CANCELED { if inv.State != lnrpc.Invoice_CANCELED {
return fmt.Errorf("expected canceled invoice, got: %v", return fmt.Errorf("expected canceled invoice, got: %v",
@ -135,8 +117,33 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
return nil return nil
}, defaultTimeout) }, defaultTimeout)
require.NoError(t.t, err, "expected canceled invoice") require.NoError(ht, err, "expected canceled invoice")
// We now continue to mine more blocks to the point where it could have
// triggered a force close if the invoice cancelation was failed.
//
// NOTE: we need to mine blocks in two sections because of a following
// case has happened frequently with bitcoind backend,
// - when mining all the blocks together, subsystems were syncing
// blocks under very different speed.
// - Bob would cancel the invoice in INVC, and send an UpdateFailHTLC
// in PEER.
// - Alice, however, would need to receive the message before her
// subsystem CNCT being synced to the force close height. This didn't
// happen in bitcoind backend, as Alice's CNCT was syncing way faster
// than Bob's INVC, causing the channel being force closed before the
// invoice cancelation message was received by Alice.
ht.MineBlocks(blocksTillForce - blocksTillCancel)
// Wait for the nodes to be synced.
ht.WaitForBlockchainSync(alice)
ht.WaitForBlockchainSync(bob)
// Check that Alice has not closed the channel because there are no
// outgoing HTLCs in her channel as the only HTLC has already been
// canceled.
ht.AssertNumPendingForceClose(alice, 0)
// Clean up the channel. // Clean up the channel.
closeChannelAndAssert(t, net, net.Alice, chanPoint, false) ht.CloseChannel(alice, chanPoint)
} }

View File

@ -114,10 +114,6 @@ var allTestCases = []*testCase{
name: "hold invoice sender persistence", name: "hold invoice sender persistence",
test: testHoldInvoicePersistence, test: testHoldInvoicePersistence,
}, },
{
name: "hold invoice force close",
test: testHoldInvoiceForceClose,
},
{ {
name: "cpfp", name: "cpfp",
test: testCPFP, test: testCPFP,